diff options
Diffstat (limited to 'sys/arch/i386/i386/machdep.c')
-rw-r--r-- | sys/arch/i386/i386/machdep.c | 250 |
1 files changed, 187 insertions, 63 deletions
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c index 61479795f5e..5291fb73dda 100644 --- a/sys/arch/i386/i386/machdep.c +++ b/sys/arch/i386/i386/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.352 2006/04/18 17:39:15 kettenis Exp $ */ +/* $OpenBSD: machdep.c,v 1.353 2006/04/27 15:37:51 mickey Exp $ */ /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */ /*- @@ -218,6 +218,9 @@ int bufcachepercent = BUFCACHEPERCENT; extern int boothowto; int physmem; +#ifndef SMALL_KERNEL +int pae_copy; +#endif struct dumpmem { paddr_t start; @@ -243,7 +246,7 @@ int i386_has_sse2; int i386_has_xcrypt; bootarg_t *bootargp; -paddr_t avail_end; +paddr_t avail_end, avail_end2; struct vm_map *exec_map = NULL; struct vm_map *phys_map = NULL; @@ -323,6 +326,12 @@ int allowaperture = 0; #endif #endif +#ifdef I686_PAE +int cpu_pae = 1; +#else +int cpu_pae = 0; +#endif + void winchip_cpu_setup(struct cpu_info *); void amd_family5_setup(struct cpu_info *); void amd_family6_setup(struct cpu_info *); @@ -416,7 +425,8 @@ cpu_startup() curcpu()->ci_feature_flags = cpu_feature; identifycpu(curcpu()); - printf("real mem = %u (%uK)\n", ctob(physmem), ctob(physmem)/1024U); + printf("real mem = %llu (%uK)\n", ctob((paddr_t)physmem), + ctob((paddr_t)physmem)/1024U); /* * Find out how much space we need, allocate it, @@ -447,8 +457,8 @@ cpu_startup() phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); - printf("avail mem = %lu (%uK)\n", ptoa(uvmexp.free), - ptoa(uvmexp.free)/1024U); + printf("avail mem = %llu (%uK)\n", ptoa((paddr_t)uvmexp.free), + ptoa((paddr_t)uvmexp.free) / 1024U); printf("using %d buffers containing %u bytes (%uK) of memory\n", nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); @@ -2708,7 +2718,6 @@ fix_f00f(void) { struct region_descriptor region; vaddr_t va; - pt_entry_t *pte; void *p; /* Allocate two new pages */ @@ -2724,8 +2733,7 @@ fix_f00f(void) GCODE_SEL); /* Map first page RO */ - pte = PTE_BASE + atop(va); - *pte &= ~PG_RW; + pmap_pte_setbits(va, 0, PG_RW); /* Reload idtr */ setregion(®ion, idt, sizeof(idt_region) - 1); @@ -2880,11 +2888,11 @@ init386(paddr_t first_avail) if (bootargc > NBPG) panic("too many boot args"); - if (extent_alloc_region(iomem_ex, (paddr_t)bootargv, bootargc, + if (extent_alloc_region(iomem_ex, (u_long)bootargv, bootargc, EX_NOWAIT)) panic("cannot reserve /boot args memory"); - pmap_enter(pmap_kernel(), (vaddr_t)bootargp, (paddr_t)bootargv, + pmap_enter(pmap_kernel(), (vaddr_t)bootargp, (u_long)bootargv, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); @@ -2898,15 +2906,6 @@ init386(paddr_t first_avail) panic("no BIOS memory map supplied"); #endif -#if defined(MULTIPROCESSOR) - /* install the page after boot args as PT page for first 4M */ - pmap_enter(pmap_kernel(), (u_long)vtopte(0), - round_page((vaddr_t)(bootargv + bootargc)), - VM_PROT_READ|VM_PROT_WRITE, - VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED); - memset(vtopte(0), 0, NBPG); /* make sure it is clean before using */ -#endif - /* * account all the memory passed in the map from /boot * calculate avail_end and count the physmem. @@ -2919,27 +2918,12 @@ init386(paddr_t first_avail) for(i = 0, im = bios_memmap; im->type != BIOS_MAP_END; im++) if (im->type == BIOS_MAP_FREE) { register paddr_t a, e; -#ifdef DEBUG - printf(" %llx-%llx", im->addr, im->addr + im->size); -#endif - - if (im->addr >= 0x100000000ULL) { -#ifdef DEBUG - printf("-H"); -#endif - continue; - } a = round_page(im->addr); - if (im->addr + im->size <= 0xfffff000ULL) - e = trunc_page(im->addr + im->size); - else { + e = trunc_page(im->addr + im->size); #ifdef DEBUG - printf("-T"); + printf(" %llx-%llx", a, e); #endif - e = 0xfffff000; - } - /* skip first eight pages */ if (a < 8 * NBPG) a = 8 * NBPG; @@ -2959,7 +2943,16 @@ init386(paddr_t first_avail) continue; } - if (extent_alloc_region(iomem_ex, a, e - a, EX_NOWAIT)) + if (a >= 0x100000000ULL) { +#ifdef DEBUG + printf("-H"); +#endif + if (!cpu_pae) + continue; + } + + if (e <= 0x100000000ULL && + extent_alloc_region(iomem_ex, a, e - a, EX_NOWAIT)) /* XXX What should we do? */ printf("\nWARNING: CAN'T ALLOCATE RAM (%x-%x)" " FROM IOMEM EXTENT MAP!\n", a, e); @@ -2968,11 +2961,15 @@ init386(paddr_t first_avail) dumpmem[i].start = atop(a); dumpmem[i].end = atop(e); i++; - avail_end = max(avail_end, e); + avail_end2 = MAX(avail_end2, e); + if (avail_end2 < 0x100000000ULL) + avail_end = avail_end2; } ndumpmem = i; avail_end -= round_page(MSGBUFSIZE); + if (avail_end2 < 0x100000000ULL) + avail_end2 = avail_end; #ifdef DEBUG printf(": %lx\n", avail_end); @@ -3003,30 +3000,34 @@ init386(paddr_t first_avail) e = dumpmem[i].end; if (a < atop(first_avail) && e > atop(first_avail)) a = atop(first_avail); - if (e > atop(avail_end)) + if (a < atop(avail_end) && e > atop(avail_end)) e = atop(avail_end); if (a < e) { if (a < atop(16 * 1024 * 1024)) { lim = MIN(atop(16 * 1024 * 1024), e); #ifdef DEBUG - printf(" %x-%x (<16M)", a, lim); + printf(" %llx-%llx (<16M)", a, lim); #endif uvm_page_physload(a, lim, a, lim, VM_FREELIST_FIRST16); if (e > lim) { #ifdef DEBUG - printf(" %x-%x", lim, e); + printf(" %llx-%llx", lim, e); #endif uvm_page_physload(lim, e, lim, e, VM_FREELIST_DEFAULT); } } else { #ifdef DEBUG - printf(" %x-%x", a, e); + printf(" %llx-%llx", a, e); #endif - uvm_page_physload(a, e, a, e, - VM_FREELIST_DEFAULT); + if (a >= atop(0x100000000ULL)) + uvm_page_physload(a, e, a, a - 1, + VM_FREELIST_ABOVE4G); + else + uvm_page_physload(a, e, a, e, + VM_FREELIST_DEFAULT); } } } @@ -3464,8 +3465,8 @@ bus_mem_add_mapping(bpa, size, cacheable, bshp) bus_space_handle_t *bshp; { u_long pa, endpa; + u_int32_t bits; vaddr_t va; - pt_entry_t *pte; bus_size_t map_size; #ifdef MULTIPROCESSOR u_int32_t cpumask = 0; @@ -3497,13 +3498,12 @@ bus_mem_add_mapping(bpa, size, cacheable, bshp) * on those machines. */ if (cpu_class != CPUCLASS_386) { - pte = kvtopte(va); if (cacheable) - *pte &= ~PG_N; + bits = pmap_pte_setbits(va, 0, PG_N); else - *pte |= PG_N; + bits = pmap_pte_setbits(va, PG_N, 0); #ifdef MULTIPROCESSOR - pmap_tlb_shootdown(pmap_kernel(), va, *pte, + pmap_tlb_shootdown(pmap_kernel(), va, bits, &cpumask); #else pmap_update_pg(va); @@ -3526,7 +3526,7 @@ bus_space_unmap(t, bsh, size) { struct extent *ex; u_long va, endva; - bus_addr_t bpa; + paddr_t bpa; /* * Find the correct extent and bus physical address. @@ -3536,7 +3536,7 @@ bus_space_unmap(t, bsh, size) bpa = bsh; } else if (t == I386_BUS_SPACE_MEM) { ex = iomem_ex; - bpa = (bus_addr_t)ISA_PHYSADDR(bsh); + bpa = (u_long)ISA_PHYSADDR(bsh); if (IOM_BEGIN <= bpa && bpa <= IOM_END) goto ok; @@ -3572,7 +3572,7 @@ _bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size, bus_addr_t *adrp) { u_long va, endva; - bus_addr_t bpa; + paddr_t bpa; /* * Find the correct bus physical address. @@ -3580,7 +3580,7 @@ _bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size, if (t == I386_BUS_SPACE_IO) { bpa = bsh; } else if (t == I386_BUS_SPACE_MEM) { - bpa = (bus_addr_t)ISA_PHYSADDR(bsh); + bpa = (u_long)ISA_PHYSADDR(bsh); if (IOM_BEGIN <= bpa && bpa <= IOM_END) goto ok; @@ -3603,9 +3603,8 @@ _bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size, panic("bus_space_unmap: bad bus space tag"); ok: - if (adrp != NULL) { + if (adrp != NULL) *adrp = bpa; - } } void @@ -3647,6 +3646,7 @@ _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) struct i386_bus_dmamap *map; void *mapstore; size_t mapsize; + int npages; /* * Allocate and initialize the DMA map. The end of the map @@ -3662,6 +3662,17 @@ _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) */ mapsize = sizeof(struct i386_bus_dmamap) + (sizeof(bus_dma_segment_t) * (nsegments - 1)); + npages = 0; +#ifndef SMALL_KERNEL + if (avail_end2 > avail_end && + (flags & (BUS_DMA_64BIT|BUS_DMA_24BIT)) == 0) { + /* this many pages plus one in case we get split */ + npages = round_page(size) / PAGE_SIZE + 1; + if (npages < nsegments) /* looks stupid, but possible */ + npages = nsegments; + mapsize += sizeof(struct vm_page *) * npages; + } +#endif /* !SMALL_KERNEL */ if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) return (ENOMEM); @@ -3672,10 +3683,55 @@ _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) map->_dm_segcnt = nsegments; map->_dm_maxsegsz = maxsegsz; map->_dm_boundary = boundary; + map->_dm_pages = npages? (void *)&map->dm_segs[nsegments] : NULL; + map->_dm_npages = npages; map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); map->dm_mapsize = 0; /* no valid mappings */ map->dm_nsegs = 0; +#ifndef SMALL_KERNEL + if (npages) { + struct pglist mlist; + vaddr_t va; + int error; + + size = npages << PGSHIFT; + va = uvm_km_valloc(kernel_map, size); + if (va == 0) { + map->_dm_npages = 0; + free(map, M_DEVBUF); + return (ENOMEM); + } + + TAILQ_INIT(&mlist); + /* if not a 64bit map -- allocate some bouncy-bouncy */ + error = uvm_pglistalloc(size, + round_page(ISA_DMA_BOUNCE_THRESHOLD), 0xfffff000, + PAGE_SIZE, boundary, &mlist, nsegments, + (flags & BUS_DMA_NOWAIT) == 0); + if (error) { + map->_dm_npages = 0; + uvm_km_free(kernel_map, (vaddr_t)va, size); + free(map, M_DEVBUF); + return (ENOMEM); + } else { + struct vm_page **pg = map->_dm_pages; + + npages--; + *pg = TAILQ_FIRST(&mlist); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(*pg), + VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); + for (pg++, va += PAGE_SIZE; npages--; + pg++, va += PAGE_SIZE) { + *pg = TAILQ_NEXT(pg[-1], pageq); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(*pg), + VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); + } + } + map->_dm_pgva = va; + } +#endif /* !SMALL_KERNEL */ + *dmamp = map; return (0); } @@ -3706,7 +3762,7 @@ _bus_dmamap_load(t, map, buf, buflen, p, flags) struct proc *p; int flags; { - bus_addr_t lastaddr; + paddr_t lastaddr; int seg, error; /* @@ -3887,6 +3943,7 @@ _bus_dmamap_unload(t, map) */ map->dm_mapsize = 0; map->dm_nsegs = 0; + map->_dm_nused = 0; } /* @@ -3894,15 +3951,47 @@ _bus_dmamap_unload(t, map) * by bus-specific DMA map synchronization functions. */ void -_bus_dmamap_sync(t, map, addr, size, op) +_bus_dmamap_sync(t, map, offset, size, op) bus_dma_tag_t t; bus_dmamap_t map; - bus_addr_t addr; + bus_addr_t offset; bus_size_t size; int op; { +#ifndef SMALL_KERNEL + bus_dma_segment_t *sg; + int i, off = offset; + bus_size_t l; + + /* scan the segment list performing necessary copies */ + if (!(map->_dm_flags & BUS_DMA_64BIT) && map->_dm_nused) { + for (i = map->_dm_segcnt, sg = map->dm_segs; + size && i--; sg++) { + if (off >= sg->ds_len) { + off -= sg->ds_len; + continue; + } - /* Nothing to do here. */ + l = sg->ds_len - off; + if (l > size) + l = size; + size -= l; + if (sg->ds_addr2) { + if (op & BUS_DMASYNC_POSTREAD) { + bcopy((void *)(sg->ds_va2 + off), + (void *)(sg->ds_va + off), l); + pae_copy++; + } + if (op & BUS_DMASYNC_PREWRITE) { + bcopy((void *)(sg->ds_va + off), + (void *)(sg->ds_va2 + off), l); + pae_copy++; + } + } + off = 0; + } + } +#endif /* !SMALL_KERNEL */ } /* @@ -4072,8 +4161,8 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) int first; { bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vaddr_t vaddr = (vaddr_t)buf; + paddr_t curaddr, lastaddr, oaddr, baddr, bmask; + vaddr_t pgva, vaddr = (vaddr_t)buf; int seg; pmap_t pmap; @@ -4089,7 +4178,24 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) /* * Get the physical address for this segment. */ - pmap_extract(pmap, vaddr, (paddr_t *)&curaddr); + pmap_extract(pmap, vaddr, &curaddr); + oaddr = 0; + pgva = 0; +#ifndef SMALL_KERNEL + if (!(map->_dm_flags & BUS_DMA_64BIT) && + curaddr >= 0x100000000ULL) { + struct vm_page *pg; + int page, off; + + if (map->_dm_nused + 1 >= map->_dm_npages) + return (ENOMEM); + off = vaddr & PAGE_MASK; + pg = map->_dm_pages[page = map->_dm_nused++]; + oaddr = curaddr; + curaddr = VM_PAGE_TO_PHYS(pg) + off; + pgva = map->_dm_pgva + (page << PGSHIFT) + off; + } +#endif /* !SMALL_KERNEL */ /* * Compute the segment size, and adjust counts. @@ -4113,7 +4219,10 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) */ if (first) { map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_addr2 = oaddr; map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg].ds_va = vaddr; + map->dm_segs[seg].ds_va2 = pgva; first = 0; } else { if (curaddr == lastaddr && @@ -4127,7 +4236,10 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) if (++seg >= map->_dm_segcnt) break; map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_addr2 = oaddr; map->dm_segs[seg].ds_len = sgsize; + map->dm_segs[seg].ds_va = vaddr; + map->dm_segs[seg].ds_va2 = pgva; } } @@ -4170,6 +4282,19 @@ _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, /* Always round the size. */ size = round_page(size); + if (flags & BUS_DMA_64BIT) { + if (high > 0x100000000ULL && low < 0x100000000ULL) + low = 0x100000000ULL; + } else if (high > 0x100000000ULL) { + if (low >= 0x100000000ULL) { +#ifdef DIAGNOSTIC + printf("_bus_dmamem_alloc_range: " + "32bit request in above 4GB space\n"); +#endif + return (EINVAL); + } else + high = 0x100000000ULL; + } TAILQ_INIT(&mlist); /* @@ -4215,7 +4340,6 @@ _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, } lastaddr = curaddr; } - *rsegs = curseg + 1; return (0); |