diff options
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/alpha/alpha/machdep.c | 73 | ||||
-rw-r--r-- | sys/arch/alpha/alpha/pmap.c | 7 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/machdep.c | 164 | ||||
-rw-r--r-- | sys/arch/arm/arm/arm32_machdep.c | 88 | ||||
-rw-r--r-- | sys/arch/aviion/aviion/machdep.c | 88 | ||||
-rw-r--r-- | sys/arch/hp300/hp300/machdep.c | 80 | ||||
-rw-r--r-- | sys/arch/hppa/hppa/machdep.c | 77 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/machdep.c | 82 | ||||
-rw-r--r-- | sys/arch/i386/i386/machdep.c | 159 | ||||
-rw-r--r-- | sys/arch/luna88k/luna88k/machdep.c | 90 | ||||
-rw-r--r-- | sys/arch/mac68k/mac68k/machdep.c | 78 | ||||
-rw-r--r-- | sys/arch/macppc/macppc/machdep.c | 83 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/machdep.c | 86 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/machdep.c | 90 | ||||
-rw-r--r-- | sys/arch/mvmeppc/mvmeppc/machdep.c | 84 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/machdep.c | 79 | ||||
-rw-r--r-- | sys/arch/sh/sh/sh_machdep.c | 89 | ||||
-rw-r--r-- | sys/arch/solbourne/solbourne/machdep.c | 94 | ||||
-rw-r--r-- | sys/arch/sparc/sparc/machdep.c | 94 | ||||
-rw-r--r-- | sys/arch/sparc64/sparc64/machdep.c | 91 | ||||
-rw-r--r-- | sys/arch/vax/vax/machdep.c | 87 |
21 files changed, 225 insertions, 1638 deletions
diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c index 3d600589dd7..b14871b78c0 100644 --- a/sys/arch/alpha/alpha/machdep.c +++ b/sys/arch/alpha/alpha/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.107 2007/04/23 10:07:43 art Exp $ */ +/* $OpenBSD: machdep.c,v 1.108 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */ /*- @@ -134,12 +134,6 @@ void printregs(struct reg *); /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif @@ -173,12 +167,6 @@ int alpha_cpus; int bootdev_debug = 0; /* patchable, or from DDB */ -/* - * XXX We need an address to which we can assign things so that they - * won't be optimized away because we didn't use the value. - */ -u_int32_t no_optimize; - /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; /* from <machine/param.h> */ char cpu_model[128]; @@ -842,20 +830,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. - * We allocate 10% of memory for buffer space. Insure a - * minimum of 16 buffers. - */ - if (bufpages == 0) - bufpages = (physmem / (100/bufcachepercent)); - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - valloc(buf, struct buf, nbuf); - #undef valloc return v; @@ -878,10 +852,7 @@ consinit() void cpu_startup() { - register unsigned i; - int base, residual; vaddr_t minaddr, maxaddr; - vsize_t size; #if defined(DEBUG) extern int pmapdebug; int opmapdebug = pmapdebug; @@ -908,44 +879,12 @@ cpu_startup() } /* - * Allocate virtual address space for file I/O buffers. - * Note they are different than the array of headers, 'buf', - * and usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("startup: cannot allocate VM for buffers"); - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - pmap_update(pmap_kernel()); - } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. @@ -972,8 +911,6 @@ cpu_startup() printf("stolen memory for VM structures = %d\n", pmap_pages_stolen * PAGE_SIZE); } #endif - printf("using %ld buffers containing %ld bytes (%ldK) of memory\n", - (long)nbuf, (long)bufpages * PAGE_SIZE, (long)bufpages * (PAGE_SIZE / 1024)); /* * Set up buffers, so they can be used to read disk labels. diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c index e48e2c41aad..2aceed49674 100644 --- a/sys/arch/alpha/alpha/pmap.c +++ b/sys/arch/alpha/alpha/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.51 2007/05/04 22:51:12 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.52 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */ /*- @@ -777,9 +777,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) /* * Figure out how many PTE's are necessary to map the kernel. */ - lev3mapsize = (VM_PHYS_SIZE + - nbuf * MAXBSIZE + 16 * NCARGS + PAGER_MAP_SIZE) / PAGE_SIZE + - (maxproc * UPAGES) + nkmempages; + lev3mapsize = (VM_PHYS_SIZE + 16 * NCARGS + PAGER_MAP_SIZE) / + PAGE_SIZE + (maxproc * UPAGES) + nkmempages; #ifdef SYSVSHM lev3mapsize += shminfo.shmall; diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c index a49db688c75..823f5d96da0 100644 --- a/sys/arch/amd64/amd64/machdep.c +++ b/sys/arch/amd64/amd64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.56 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.57 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */ /*- @@ -178,12 +178,6 @@ int kbd_reset; struct vm_map *exec_map = NULL; struct vm_map *phys_map = NULL; -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif @@ -248,7 +242,7 @@ phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; int mem_cluster_cnt; vaddr_t allocsys(vaddr_t); -void setup_buffers(vaddr_t *); +void setup_buffers(void); int cpu_dump(void); int cpu_dumpsize(void); u_long cpu_dump_mempagecnt(void); @@ -319,11 +313,7 @@ cpu_startup(void) if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); - /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. - */ - setup_buffers(&maxaddr); + setup_buffers(); /* * Allocate a submap for exec arguments. This map effectively @@ -342,8 +332,6 @@ cpu_startup(void) printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %u buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); bufinit(); @@ -359,18 +347,6 @@ cpu_startup(void) x86_bus_space_mallocok(); } - -/* - * The following defines are for the code in setup_buffers that tries to - * ensure that enough ISA DMAable memory is still left after the buffercache - * has been allocated. - */ -#define CHUNKSZ (3 * 1024 * 1024) -#define ISADMA_LIMIT (16 * 1024 * 1024) /* XXX wrong place */ -#define ALLOC_PGS(sz, limit, pgs) \ - uvm_pglistalloc((sz), 0, (limit), PAGE_SIZE, 0, &(pgs), 1, 0) -#define FREE_PGS(pgs) uvm_pglistfree(&(pgs)) - /* * Allocate space for system data structures. We are given * a starting virtual address and we return a final virtual @@ -394,136 +370,24 @@ allocsys(vaddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 10% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 35% filled kvm */ - /* XXX - This needs UBC... */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return v; } void -setup_buffers(vaddr_t *maxaddr) +setup_buffers() { - vsize_t size; - vaddr_t addr; - int base, residual, left, chunk, i; - struct pglist pgs, saved_pgs; - struct vm_page *pg; - int rv; - - size = MAXBSIZE * nbuf; - addr = vm_map_min(kernel_map); - if ((rv = uvm_map(kernel_map, &addr, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0)))) - panic("cpu_startup: cannot allocate VM for buffers %d", rv); - buffers = (char *)addr; - - base = bufpages / nbuf; - residual = bufpages % nbuf; - if (base >= MAXBSIZE / PAGE_SIZE) { - /* don't want to alloc more physical mem than needed */ - base = MAXBSIZE / PAGE_SIZE; - residual = 0; - } - /* - * In case we might need DMA bouncing we have to make sure there - * is some memory below 16MB available. On machines with many - * pages reserved for the buffer cache we risk filling all of that - * area with buffer pages. We still want much of the buffers - * reside there as that lowers the probability of them needing to - * bounce, but we have to set aside some space for DMA buffers too. - * - * The current strategy is to grab hold of one 3MB chunk below 16MB - * first, which we are saving for DMA buffers, then try to get - * one chunk at a time for fs buffers, until that is not possible - * anymore, at which point we get the rest wherever we may find it. - * After that we give our saved area back. That will guarantee at - * least 3MB below 16MB left for drivers' attach routines, among - * them isadma. However we still have a potential problem of PCI - * devices attached earlier snatching that memory. This can be - * solved by making the PCI DMA memory allocation routines go for - * memory above 16MB first. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - - left = bufpages; - - /* - * First, save ISA DMA bounce buffer area so we won't lose that - * capability. - */ - TAILQ_INIT(&saved_pgs); - TAILQ_INIT(&pgs); - if (!ALLOC_PGS(CHUNKSZ, ISADMA_LIMIT, saved_pgs)) { - /* - * Then, grab as much ISA DMAable memory as possible - * for the buffer cache as it is nice to not need to - * bounce all buffer I/O. - */ - for (left = bufpages; left > 0; left -= chunk) { - chunk = min(left, CHUNKSZ / PAGE_SIZE); - if (ALLOC_PGS(chunk * PAGE_SIZE, ISADMA_LIMIT, pgs)) - break; - } - } - - /* - * If we need more pages for the buffer cache, get them from anywhere. - */ - if (left > 0 && ALLOC_PGS(left * PAGE_SIZE, avail_end, pgs)) - panic("cannot get physical memory for buffer cache"); - - /* - * Finally, give back the ISA DMA bounce buffer area, so it can be - * allocated by the isadma driver later. - */ - if (!TAILQ_EMPTY(&saved_pgs)) - FREE_PGS(saved_pgs); - - pg = TAILQ_FIRST(&pgs); - for (i = 0; i < nbuf; i++) { - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, - * but has no physical memory allocated for it. - */ - addr = (vaddr_t)buffers + i * MAXBSIZE; - for (size = PAGE_SIZE * (i < residual ? base + 1 : base); - size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) { - pmap_kenter_pa(addr, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - pg = TAILQ_NEXT(pg, pageq); - } - } - pmap_update(pmap_kernel()); + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; } /* diff --git a/sys/arch/arm/arm/arm32_machdep.c b/sys/arch/arm/arm/arm32_machdep.c index ce628cccc41..81b7fe52ca6 100644 --- a/sys/arch/arm/arm/arm32_machdep.c +++ b/sys/arch/arm/arm/arm32_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: arm32_machdep.c,v 1.23 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: arm32_machdep.c,v 1.24 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: arm32_machdep.c,v 1.42 2003/12/30 12:33:15 pk Exp $ */ /* @@ -76,12 +76,6 @@ struct vm_map *phys_map = NULL; extern int physmem; caddr_t allocsys(caddr_t); -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -263,8 +257,6 @@ cpu_startup() paddr_t maxaddr; caddr_t sysbase; caddr_t size; - vsize_t bufsize; - int base, residual; proc0paddr = (struct user *)kernelstack.pv_va; proc0.p_addr = proc0paddr; @@ -328,49 +320,18 @@ cpu_startup() if ((caddr_t)((allocsys(sysbase) - sysbase)) != size) panic("cpu_startup: system table size inconsistency"); - /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - bufsize = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(bufsize), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0)) != 0) - panic("cpu_startup: cannot allocate UVM space for buffers"); - minaddr = (vaddr_t)buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (loop = 0; loop < nbuf; ++loop) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; - - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (loop * MAXBSIZE); - curbufsize = NBPG * ((loop < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -557,33 +518,6 @@ allocsys(caddr_t v) valloc(msghdrs, struct msg, msginfo.msgtql); valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 35% filled kvm */ - /* XXX - This needs UBC... */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - valloc(buf, struct buf, nbuf); return v; } diff --git a/sys/arch/aviion/aviion/machdep.c b/sys/arch/aviion/aviion/machdep.c index fe4951338b7..eb4b2527e0a 100644 --- a/sys/arch/aviion/aviion/machdep.c +++ b/sys/arch/aviion/aviion/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.6 2007/05/12 20:03:22 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.7 2007/05/26 20:26:50 pedro Exp $ */ /* * Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -116,12 +116,6 @@ __cpu_simple_lock_t cpu_mutex = __SIMPLELOCK_UNLOCKED; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -269,49 +263,17 @@ cpu_startup() platform->startup(); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t)buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -327,8 +289,6 @@ cpu_startup() VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %ld (%d pages)\n", ptoa(uvmexp.free), uvmexp.free); - printf("using %d buffers containing %d bytes of memory\n", nbuf, - bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -377,34 +337,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 70% filled kvm */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return v; } diff --git a/sys/arch/hp300/hp300/machdep.c b/sys/arch/hp300/hp300/machdep.c index 449dcfb9604..c04e5885585 100644 --- a/sys/arch/hp300/hp300/machdep.c +++ b/sys/arch/hp300/hp300/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.111 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.112 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.121 1999/03/26 23:41:29 mycroft Exp $ */ /* @@ -104,12 +104,6 @@ extern paddr_t avail_start, avail_end; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -244,7 +238,6 @@ cpu_startup() extern char *etext; unsigned i; caddr_t v; - int base, residual; vaddr_t minaddr, maxaddr; vsize_t size; #ifdef DEBUG @@ -290,45 +283,17 @@ cpu_startup() panic("startup: table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -348,8 +313,6 @@ cpu_startup() #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); /* * Tell the VM system that page 0 isn't mapped. @@ -419,29 +382,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/hppa/hppa/machdep.c b/sys/arch/hppa/hppa/machdep.c index 875387e4ab1..35d6469c7dd 100644 --- a/sys/arch/hppa/hppa/machdep.c +++ b/sys/arch/hppa/hppa/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.155 2007/05/21 23:05:44 kettenis Exp $ */ +/* $OpenBSD: machdep.c,v 1.156 2007/05/26 20:26:50 pedro Exp $ */ /* * Copyright (c) 1999-2003 Michael Shalayeff @@ -88,12 +88,6 @@ /* * Patchable buffer cache parameters */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif /* BUFCACHEPERCENT */ @@ -383,29 +377,9 @@ hppa_init(start) * Now allocate kernel dynamic variables */ - /* buffer cache parameters */ - if (bufpages == 0) - bufpages = physmem / 100 * - (physmem <= 0x1000? 5 : bufcachepercent); - - if (nbuf == 0) - nbuf = bufpages < 16? 16 : bufpages; - - /* Restrict to at most 30% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 3 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 3 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - v1 = v = round_page(start); #define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num)) - valloc(buf, struct buf, nbuf); - #ifdef SYSVMSG valloc(msgpool, char, msginfo.msgmax); valloc(msgmaps, struct msgmap, msginfo.msgseg); @@ -646,8 +620,6 @@ void cpu_startup(void) { vaddr_t minaddr, maxaddr; - vsize_t size; - int i, base, residual; /* * i won't understand a friend of mine, @@ -661,39 +633,18 @@ cpu_startup(void) printf("real mem = %u (%u reserved for PROM, %u used by OpenBSD)\n", ctob(physmem), ctob(resvmem), ctob(resvphysmem - resvmem)); - size = MAXBSIZE * nbuf; - minaddr = vm_map_min(kernel_map); - if (uvm_map(kernel_map, &minaddr, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - buffers = (caddr_t)minaddr; - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (i = 0; i < nbuf; i++) { - vaddr_t curbuf; - int cbpgs; - - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, - * but has no physical memory allocated for it. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - - for (cbpgs = base + (i < residual? 1 : 0); cbpgs--; ) { - struct vm_page *pg; - - if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - UVM_PROT_RW); - curbuf += PAGE_SIZE; - } - } + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. + */ + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -709,8 +660,6 @@ cpu_startup(void) VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %lu\n", ptoa(uvmexp.free)); - printf("using %d buffers containing %u bytes of memory\n", - nbuf, (unsigned)bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. diff --git a/sys/arch/hppa64/hppa64/machdep.c b/sys/arch/hppa64/hppa64/machdep.c index 29c22f24a4a..377bc6ddb7a 100644 --- a/sys/arch/hppa64/hppa64/machdep.c +++ b/sys/arch/hppa64/hppa64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.4 2006/01/10 19:21:14 martin Exp $ */ +/* $OpenBSD: machdep.c,v 1.5 2007/05/26 20:26:50 pedro Exp $ */ /* * Copyright (c) 2005 Michael Shalayeff @@ -75,12 +75,6 @@ /* * Patchable buffer cache parameters */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif /* BUFCACHEPERCENT */ @@ -290,29 +284,6 @@ TODO hpmc/toc/pfr /* sets resvphysmem */ pmap_bootstrap(start); - /* buffer cache parameters */ - if (bufpages == 0) - bufpages = physmem / 100 * - (physmem <= 0x1000? 5 : bufcachepercent); - - if (nbuf == 0) - nbuf = bufpages < 16? 16 : bufpages; - - /* Restrict to at most 50% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / 2) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE / 2; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - if (!(buf = (struct buf *)pmap_steal_memory(round_page(nbuf * - sizeof(struct buf)), NULL, NULL))) - panic("cpu_startup: no space for bufs"); - bzero(buf, nbuf * sizeof(struct buf)); - /* space has been reserved in pmap_bootstrap() */ msgbufp = (struct msgbuf *)((vaddr_t)ptoa(physmem) - round_page(MSGBUFSIZE)); @@ -415,8 +386,6 @@ void cpu_startup(void) { vaddr_t minaddr, maxaddr; - vsize_t size; - int i, base, residual; /* * psychodelic kingdom come @@ -428,41 +397,18 @@ cpu_startup(void) printf("real mem = %u (%u reserved for PROM, %u used by OpenBSD)\n", ctob(physmem), ctob(resvmem), ctob(resvphysmem - resvmem)); -printf("here2\n"); - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, &minaddr, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - buffers = (caddr_t)minaddr; - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (i = 0; i < nbuf; i++) { - vaddr_t curbuf; - int cbpgs, pd; - -{ extern int pmapdebug; pd = pmapdebug; pmapdebug = 0; } - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, - * but has no physical memory allocated for it. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - - for (cbpgs = base + (i < residual? 1 : 0); cbpgs--; ) { - struct vm_page *pg; - - if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - UVM_PROT_RW); - curbuf += PAGE_SIZE; - } -{ extern int pmapdebug; pmapdebug = pd; } - } + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. + */ + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; printf("here3\n"); /* @@ -481,8 +427,6 @@ printf("here4\n"); printf("here5\n"); printf("avail mem = %lu\n", ptoa(uvmexp.free)); - printf("using %u buffers containing %u bytes of memory\n", - nbuf, (unsigned)bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c index 8202008a456..a6fb19dbb84 100644 --- a/sys/arch/i386/i386/machdep.c +++ b/sys/arch/i386/i386/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.390 2007/05/25 15:55:26 art Exp $ */ +/* $OpenBSD: machdep.c,v 1.391 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */ /*- @@ -174,17 +174,6 @@ extern struct proc *npxproc; #endif #endif /* NCOM > 0 || NPCCOM > 0 */ -/* - * The following defines are for the code in setup_buffers that tries to - * ensure that enough ISA DMAable memory is still left after the buffercache - * has been allocated. - */ -#define CHUNKSZ (3 * 1024 * 1024) -#define ISADMA_LIMIT (16 * 1024 * 1024) /* XXX wrong place */ -#define ALLOC_PGS(sz, limit, pgs) \ - uvm_pglistalloc((sz), 0, (limit), PAGE_SIZE, 0, &(pgs), 1, 0) -#define FREE_PGS(pgs) uvm_pglistfree(&(pgs)) - /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; @@ -199,14 +188,8 @@ int cpu_apmhalt = 0; /* sysctl'd to 1 for halt -p hack */ int user_ldt_enable = 0; /* sysctl'd to 1 to enable */ #endif -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT -#define BUFCACHEPERCENT 5 +#define BUFCACHEPERCENT 10 #endif #ifdef BUFPAGES @@ -283,7 +266,7 @@ struct extent *iomem_ex; static int ioport_malloc_safe; caddr_t allocsys(caddr_t); -void setup_buffers(vaddr_t *); +void setup_buffers(void); void dumpsys(void); int cpu_dump(void); void init386(paddr_t); @@ -444,7 +427,7 @@ cpu_startup() * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ - setup_buffers(&maxaddr); + setup_buffers(); /* * Allocate a submap for exec arguments. This map effectively @@ -462,8 +445,6 @@ cpu_startup() printf("avail mem = %llu (%lluMB)\n", ptoa((unsigned long long)uvmexp.free), ptoa((unsigned long long)uvmexp.free)/1024U/1024U); - printf("using %d buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); /* * Set up buffers, so they can be used to read disk labels. @@ -556,134 +537,24 @@ allocsys(caddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest of below 4G memory, - * with a minimum of 16 buffers. We allocate 1/2 as many swap - * buffer headers as file i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024 + avail_end)) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 35% filled kvm */ - /* XXX - This needs UBC... */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return v; } void -setup_buffers(vaddr_t *maxaddr) +setup_buffers() { - vsize_t size; - vaddr_t addr; - int base, residual, left, chunk, i; - struct pglist pgs, saved_pgs; - struct vm_page *pg; - - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - addr = (vaddr_t)buffers; - - base = bufpages / nbuf; - residual = bufpages % nbuf; - if (base >= MAXBSIZE / PAGE_SIZE) { - /* don't want to alloc more physical mem than needed */ - base = MAXBSIZE / PAGE_SIZE; - residual = 0; - } - /* - * In case we might need DMA bouncing we have to make sure there - * is some memory below 16MB available. On machines with many - * pages reserved for the buffer cache we risk filling all of that - * area with buffer pages. We still want much of the buffers - * reside there as that lowers the probability of them needing to - * bounce, but we have to set aside some space for DMA buffers too. - * - * The current strategy is to grab hold of one 3MB chunk below 16MB - * first, which we are saving for DMA buffers, then try to get - * one chunk at a time for fs buffers, until that is not possible - * anymore, at which point we get the rest wherever we may find it. - * After that we give our saved area back. That will guarantee at - * least 3MB below 16MB left for drivers' attach routines, among - * them isadma. However we still have a potential problem of PCI - * devices attached earlier snatching that memory. This can be - * solved by making the PCI DMA memory allocation routines go for - * memory above 16MB first. + * Determine how many buffers to allocate. We use bufcachepercent% + * of the memory below 4GB. */ - - left = bufpages; - - /* - * First, save ISA DMA bounce buffer area so we won't lose that - * capability. - */ - TAILQ_INIT(&saved_pgs); - TAILQ_INIT(&pgs); - if (!ALLOC_PGS(CHUNKSZ, ISADMA_LIMIT, saved_pgs)) { - /* - * Then, grab as much ISA DMAable memory as possible - * for the buffer cache as it is nice to not need to - * bounce all buffer I/O. - */ - for (left = bufpages; left > 0; left -= chunk) { - chunk = min(left, CHUNKSZ / PAGE_SIZE); - if (ALLOC_PGS(chunk * PAGE_SIZE, ISADMA_LIMIT, pgs)) - break; - } - } - - /* - * If we need more pages for the buffer cache, get them from anywhere. - */ - if (left > 0 && ALLOC_PGS(left * PAGE_SIZE, avail_end, pgs)) - panic("cannot get physical memory for buffer cache"); - - /* - * Finally, give back the ISA DMA bounce buffer area, so it can be - * allocated by the isadma driver later. - */ - if (!TAILQ_EMPTY(&saved_pgs)) - FREE_PGS(saved_pgs); - - pg = TAILQ_FIRST(&pgs); - for (i = 0; i < nbuf; i++) { - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, - * but has no physical memory allocated for it. - */ - addr = (vaddr_t)buffers + i * MAXBSIZE; - for (size = PAGE_SIZE * (i < residual ? base + 1 : base); - size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) { - pmap_kenter_pa(addr, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - pg = TAILQ_NEXT(pg, pageq); - } - } - pmap_update(pmap_kernel()); + if (bufpages == 0) + bufpages = btoc(avail_end) * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; } /* diff --git a/sys/arch/luna88k/luna88k/machdep.c b/sys/arch/luna88k/luna88k/machdep.c index ed80eb8cef7..6932ee9432e 100644 --- a/sys/arch/luna88k/luna88k/machdep.c +++ b/sys/arch/luna88k/luna88k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.40 2007/05/12 20:03:25 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.41 2007/05/26 20:26:50 pedro Exp $ */ /* * Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -186,12 +186,6 @@ struct vm_map *phys_map = NULL; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -360,8 +354,6 @@ cpu_startup() { caddr_t v; int sz, i; - vsize_t size; - int base, residual; vaddr_t minaddr, maxaddr; /* @@ -467,49 +459,17 @@ cpu_startup() panic("obiova %lx: OBIO not free", obiova); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t)buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -525,8 +485,6 @@ cpu_startup() VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %ld (%d pages)\n", ptoa(uvmexp.free), uvmexp.free); - printf("using %d buffers containing %d bytes of memory\n", nbuf, - bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -574,34 +532,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 70% filled kvm */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return v; } diff --git a/sys/arch/mac68k/mac68k/machdep.c b/sys/arch/mac68k/mac68k/machdep.c index 46956d3b101..02e1ebc0128 100644 --- a/sys/arch/mac68k/mac68k/machdep.c +++ b/sys/arch/mac68k/mac68k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.137 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.138 2007/05/26 20:26:50 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.207 1998/07/08 04:39:34 thorpej Exp $ */ /* @@ -173,12 +173,6 @@ struct vm_map *phys_map = NULL; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -364,7 +358,6 @@ cpu_startup(void) caddr_t v; unsigned i; int vers; - int base, residual; vaddr_t minaddr, maxaddr; vsize_t size = 0; /* To avoid compiler warning */ int delay; @@ -419,43 +412,17 @@ cpu_startup(void) panic("startup: table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - pmap_update(pmap_kernel()); - } + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -472,8 +439,6 @@ cpu_startup(void) printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); - printf("using %d buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); /* * Set up CPU-specific registers, cache, etc. @@ -526,29 +491,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c index 1dde4fb97e9..d9f4d98be86 100644 --- a/sys/arch/macppc/macppc/machdep.c +++ b/sys/arch/macppc/macppc/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.90 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.91 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */ /* @@ -96,12 +96,6 @@ struct pool ppc_vecpl; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -487,12 +481,11 @@ install_extint(void (*handler)(void)) void cpu_startup() { - int sz, i; + int sz; caddr_t v; vaddr_t minaddr, maxaddr; - int base, residual; - v = (caddr_t)proc0paddr + USPACE; + v = (caddr_t)proc0paddr + USPACE; proc0.p_addr = proc0paddr; printf("%s", version); @@ -511,43 +504,17 @@ cpu_startup() panic("startup: table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - sz = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(sz), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - /* - addr = (vaddr_t)buffers; - */ - base = bufpages / nbuf; - residual = bufpages % nbuf; - if (base >= MAXBSIZE) { - /* Don't want to alloc more physical mem than ever needed */ - base = MAXBSIZE; - residual = 0; - } - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; - - curbuf = (vaddr_t)buffers + i * MAXBSIZE; - curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base); - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for" - " buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -566,8 +533,6 @@ cpu_startup() printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); - printf("using %u buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); /* * Set up the buffers. @@ -593,28 +558,6 @@ allocsys(caddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Decide on buffer space to use. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 35% filled kvm */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return v; } diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c index bde27ec65c1..fd3713e5856 100644 --- a/sys/arch/mvme68k/mvme68k/machdep.c +++ b/sys/arch/mvme68k/mvme68k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.101 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.102 2007/05/26 20:26:51 pedro Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -126,12 +126,6 @@ extern vaddr_t avail_end; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -221,8 +215,6 @@ cpu_startup() { unsigned i; caddr_t v; - int base, residual; - vaddr_t minaddr, maxaddr; vsize_t size; #ifdef DEBUG @@ -261,50 +253,17 @@ cpu_startup() panic("startup: table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } - base = bufpages / nbuf; - residual = bufpages % nbuf; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; - - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t)buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -330,8 +289,6 @@ cpu_startup() printf("avail mem = %u (%uMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); - printf("using %d buffers containing %d bytes of memory\n", - nbuf, bufpages * PAGE_SIZE); /* * Configure the system. @@ -367,29 +324,6 @@ allocsys(caddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c index 6123305b6d6..f703d099263 100644 --- a/sys/arch/mvme88k/mvme88k/machdep.c +++ b/sys/arch/mvme88k/mvme88k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.188 2007/05/23 20:33:46 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.189 2007/05/26 20:26:51 pedro Exp $ */ /* * Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -141,12 +141,6 @@ __cpu_simple_lock_t cpu_mutex = __SIMPLELOCK_UNLOCKED; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -330,8 +324,6 @@ cpu_startup() { caddr_t v; int sz, i; - vsize_t size; - int base, residual; vaddr_t minaddr, maxaddr; /* @@ -386,49 +378,17 @@ cpu_startup() } /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, - UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; - - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t)buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -445,8 +405,6 @@ cpu_startup() printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %d bytes of memory\n", nbuf, - bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -495,34 +453,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 70% filled kvm */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return v; } diff --git a/sys/arch/mvmeppc/mvmeppc/machdep.c b/sys/arch/mvmeppc/mvmeppc/machdep.c index 321a3b190aa..c48a32cdcfa 100644 --- a/sys/arch/mvmeppc/mvmeppc/machdep.c +++ b/sys/arch/mvmeppc/mvmeppc/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.52 2007/05/23 20:33:47 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.53 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */ /* @@ -110,12 +110,6 @@ static struct consdev bootcons = { /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -393,10 +387,9 @@ install_extint(handler) void cpu_startup() { - int sz, i; + int sz; caddr_t v; vaddr_t minaddr, maxaddr; - int base, residual; proc0.p_addr = proc0paddr; @@ -416,44 +409,17 @@ cpu_startup() panic("startup: table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - sz = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(sz), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - /* - addr = (vaddr_t)buffers; - */ - base = bufpages / nbuf; - residual = bufpages % nbuf; - if (base >= MAXBSIZE) { - /* Don't want to alloc more physical mem than ever needed */ - base = MAXBSIZE; - residual = 0; - } - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; - - curbuf = (vaddr_t)buffers + i * MAXBSIZE; - curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base); - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for" - " buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -472,9 +438,7 @@ cpu_startup() printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); - printf("using %d buffers containing %d bytes of memory\n", nbuf, - bufpages * PAGE_SIZE); - + /* * Set up the buffers. */ @@ -505,28 +469,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Decide on buffer space to use. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 35% filled kvm */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return v; } diff --git a/sys/arch/sgi/sgi/machdep.c b/sys/arch/sgi/sgi/machdep.c index 8953dc13de4..69afe724253 100644 --- a/sys/arch/sgi/sgi/machdep.c +++ b/sys/arch/sgi/sgi/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.39 2007/05/23 20:33:47 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.40 2007/05/26 20:26:51 pedro Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -103,9 +103,6 @@ char cpu_model[30]; /* * Declare these as initialized data so we can patch them. */ -#ifndef NBUF -#define NBUF 0 /* Can be changed in config */ -#endif #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 /* Can be changed in config */ #endif @@ -113,7 +110,6 @@ char cpu_model[30]; #define BUFPAGES 0 /* Can be changed in config */ #endif -int nbuf = NBUF; int bufpages = BUFPAGES; int bufcachepercent = BUFCACHEPERCENT; @@ -656,28 +652,6 @@ allocsys(caddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 35% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 20) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 20; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return(v); } @@ -736,10 +710,7 @@ consinit() void cpu_startup() { - unsigned i; - int base, residual; vaddr_t minaddr, maxaddr; - vsize_t size; #ifdef PMAPDEBUG extern int pmapdebug; int opmapdebug = pmapdebug; @@ -757,44 +728,18 @@ cpu_startup() ptoa(physmem)/1024/1024); /* - * Allocate virtual address space for file I/O buffers. - * Note they are different than the array of headers, 'buf', - * and usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - base = bufpages / nbuf; - residual = bufpages % nbuf; - - for (i = 0; i < nbuf; i++) { - vsize_t curbufsize; - vaddr_t curbuf; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; + + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, - * but has no physical memory allocated for it. - */ - curbuf = (vaddr_t)buffers + i * MAXBSIZE; - curbufsize = PAGE_SIZE * (i < residual ? base+1 : base); - - while (curbufsize) { - struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for" - " buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. @@ -810,8 +755,6 @@ cpu_startup() #endif printf("avail mem = %u (%uMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %d bytes of memory\n", - nbuf, bufpages * PAGE_SIZE); extent_malloc_flags = EX_MALLOCOK; diff --git a/sys/arch/sh/sh/sh_machdep.c b/sys/arch/sh/sh/sh_machdep.c index f609cf7cb47..9d8045b058b 100644 --- a/sys/arch/sh/sh/sh_machdep.c +++ b/sys/arch/sh/sh/sh_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sh_machdep.c,v 1.12 2007/04/29 17:53:37 miod Exp $ */ +/* $OpenBSD: sh_machdep.c,v 1.13 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: sh3_machdep.c,v 1.59 2006/03/04 01:13:36 uwe Exp $ */ /* @@ -118,12 +118,6 @@ #include <sh/intr.h> #include <sh/kcore.h> -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -267,12 +261,9 @@ sh_proc0_init() void sh_startup() { - u_int loop; vaddr_t minaddr, maxaddr; caddr_t sysbase; caddr_t size; - vsize_t bufsize; - int base, residual; printf("%s", version); if (*cpu_model != '\0') @@ -309,48 +300,17 @@ sh_startup() panic("cpu_startup: system table size inconsistency"); /* - * Now allocate buffers proper. They are different than the above - * in that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - bufsize = MAXBSIZE * nbuf; - if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(bufsize), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0)) != 0) - panic("sh_startup: cannot allocate UVM space for buffers"); - minaddr = (vaddr_t)buffers; - /* don't want to alloc more physical mem than needed */ - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) - bufpages = btoc(MAXBSIZE) * nbuf; - - base = bufpages / nbuf; - residual = bufpages % nbuf; - for (loop = 0; loop < nbuf; ++loop) { - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (loop * MAXBSIZE); - curbufsize = NBPG * ((loop < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("sh_startup: not enough memory for buffer cache"); - - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -372,8 +332,6 @@ sh_startup() printf("avail mem = %u (%uK)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024); - printf("using %d buffers containing %u bytes (%uK) of memory\n", - nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024); if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG @@ -404,34 +362,7 @@ allocsys(caddr_t v) valloc(msghdrs, struct msg, msginfo.msgtql); valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We use 10% of the - * first 2MB of memory, and 5% of the rest, with a minimum of 16 - * buffers. We allocate 1/2 as many swap buffer headers as file - * i/o buffers. - */ - if (bufpages == 0) - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - - /* Restrict to at most 35% filled kvm */ - /* XXX - This needs UBC... */ - if (nbuf > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 35 / 100; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - valloc(buf, struct buf, nbuf); return v; } diff --git a/sys/arch/solbourne/solbourne/machdep.c b/sys/arch/solbourne/solbourne/machdep.c index 4d07197f46d..7277e60a4c9 100644 --- a/sys/arch/solbourne/solbourne/machdep.c +++ b/sys/arch/solbourne/solbourne/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.2 2006/04/15 17:36:47 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.3 2007/05/26 20:26:51 pedro Exp $ */ /* OpenBSD: machdep.c,v 1.105 2005/04/11 15:13:01 deraadt Exp */ /* @@ -96,12 +96,6 @@ struct vm_map *phys_map = NULL; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -141,16 +135,13 @@ static int kap_maskcheck(void); void cpu_startup() { - unsigned i; caddr_t v; int sz; - int base, residual; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; - vsize_t size; extern struct user *proc0paddr; #ifdef DEBUG @@ -191,52 +182,18 @@ cpu_startup() if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); - /* - * allocate virtual and physical memory for the buffers. - */ - size = MAXBSIZE * nbuf; /* # bytes for buffers */ - - /* allocate VM for buffers... area is not managed by VM system */ - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - - minaddr = (vaddr_t) buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */ - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - /* now allocate RAM for buffers */ - for (i = 0 ; i < nbuf ; i++) { - vaddr_t curbuf; - vsize_t curbufsize; - struct vm_page *pg; + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. + */ + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * each buffer has MAXBSIZE bytes of VM space allocated. of - * that MAXBSIZE space we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: " - "not enough RAM for buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - UVM_PROT_RW); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -274,8 +231,6 @@ cpu_startup() pmapdebug = opmapdebug; #endif printf("avail mem = %ld\n", ptoa(uvmexp.free)); - printf("using %d buffers containing %d bytes of memory\n", - nbuf, bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -309,31 +264,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - if (nbuf > 200) - nbuf = 200; /* or we run out of PMEGS */ - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/sparc/sparc/machdep.c b/sys/arch/sparc/sparc/machdep.c index e2295603534..099d6bcddde 100644 --- a/sys/arch/sparc/sparc/machdep.c +++ b/sys/arch/sparc/sparc/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.110 2007/05/23 20:33:47 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.111 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.85 1997/09/12 08:55:02 pk Exp $ */ /* @@ -109,12 +109,6 @@ struct vm_map *phys_map = NULL; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 5 #endif @@ -154,16 +148,13 @@ void stackdump(void); void cpu_startup() { - unsigned i; caddr_t v; int sz; - int base, residual; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; - vsize_t size; extern struct user *proc0paddr; #ifdef DEBUG @@ -204,52 +195,18 @@ cpu_startup() if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); - /* - * allocate virtual and physical memory for the buffers. - */ - size = MAXBSIZE * nbuf; /* # bytes for buffers */ - - /* allocate VM for buffers... area is not managed by VM system */ - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - - minaddr = (vaddr_t) buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */ - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - /* now allocate RAM for buffers */ - for (i = 0 ; i < nbuf ; i++) { - vaddr_t curbuf; - vsize_t curbufsize; - struct vm_page *pg; + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. + */ + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * each buffer has MAXBSIZE bytes of VM space allocated. of - * that MAXBSIZE space we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: " - "not enough RAM for buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively @@ -303,8 +260,6 @@ cpu_startup() #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %d bytes of memory\n", - nbuf, bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -338,31 +293,6 @@ allocsys(v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - if (nbuf > 200) - nbuf = 200; /* or we run out of PMEGS */ - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/sparc64/sparc64/machdep.c b/sys/arch/sparc64/sparc64/machdep.c index 229efbf7b1d..f0e910a0351 100644 --- a/sys/arch/sparc64/sparc64/machdep.c +++ b/sys/arch/sparc64/sparc64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.88 2007/05/23 20:33:47 pvalchev Exp $ */ +/* $OpenBSD: machdep.c,v 1.89 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.108 2001/07/24 19:30:14 eeh Exp $ */ /*- @@ -172,14 +172,8 @@ extern vaddr_t avail_end; /* * Declare these as initialized data so we can patch them. */ -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif - #ifndef BUFCACHEPERCENT -#define BUFCACHEPERCENT 5 +#define BUFCACHEPERCENT 10 #endif #ifdef BUFPAGES @@ -232,16 +226,13 @@ void stackdump(void); void cpu_startup() { - unsigned i; caddr_t v; long sz; - int base, residual; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; - vsize_t size; extern struct user *proc0paddr; #ifdef DEBUG @@ -267,52 +258,12 @@ cpu_startup() if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); - /* - * allocate virtual and physical memory for the buffers. - */ - size = MAXBSIZE * nbuf; /* # bytes for buffers */ - - /* allocate VM for buffers... area is not managed by VM system */ - if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0)) != 0) - panic("cpu_startup: cannot allocate VM for buffers"); - - minaddr = (vaddr_t) buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */ - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - - /* now allocate RAM for buffers */ - for (i = 0 ; i < nbuf ; i++) { - vaddr_t curbuf; - vsize_t curbufsize; - struct vm_page *pg; - - /* - * each buffer has MAXBSIZE bytes of VM space allocated. of - * that MAXBSIZE space we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vaddr_t) buffers + (i * MAXBSIZE); - curbufsize = NBPG * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: " - "not enough RAM for buffer cache"); - pmap_kenter_pa(curbuf, - VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(pmap_kernel()); + /* + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. + */ + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; /* * Allocate a submap for exec arguments. This map effectively @@ -327,8 +278,6 @@ cpu_startup() #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %ld bytes of memory\n", nbuf, - (long)bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -352,30 +301,6 @@ allocsys(caddr_t v) valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate (enough to - * hold 5% of total physical memory, but at least 16). - * Allocate 1/2 as many swap buffer headers as file i/o buffers. - */ - if (bufpages == 0) - bufpages = physmem * bufcachepercent / 100; - if (nbuf == 0) { - nbuf = bufpages; - if (nbuf < 16) - nbuf = 16; - } - /* Restrict to at most 30% filled kvm */ - if (nbuf * MAXBSIZE > - (KERNEND - KERNBASE) * 3 / 10) - nbuf = (KERNEND - KERNBASE) / - MAXBSIZE * 3 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - valloc(buf, struct buf, nbuf); - return (v); } diff --git a/sys/arch/vax/vax/machdep.c b/sys/arch/vax/vax/machdep.c index 8ca32824555..e545a325125 100644 --- a/sys/arch/vax/vax/machdep.c +++ b/sys/arch/vax/vax/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.88 2007/05/25 14:50:38 deraadt Exp $ */ +/* $OpenBSD: machdep.c,v 1.89 2007/05/26 20:26:51 pedro Exp $ */ /* $NetBSD: machdep.c,v 1.108 2000/09/13 15:00:23 thorpej Exp $ */ /* @@ -123,11 +123,6 @@ caddr_t allocsys(caddr_t); #define BUFCACHEPERCENT 5 #endif -#ifdef NBUF -int nbuf = NBUF; -#else -int nbuf = 0; -#endif #ifdef BUFPAGES int bufpages = BUFPAGES; #else @@ -176,9 +171,8 @@ void cpu_startup() { caddr_t v; - int base, residual, i, sz; + int sz; vaddr_t minaddr, maxaddr; - vsize_t size; extern unsigned int avail_end; extern char cpu_model[]; @@ -212,53 +206,19 @@ cpu_startup() panic("startup: no room for tables"); if (((unsigned long)allocsys(v) - (unsigned long)v) != sz) panic("startup: table size inconsistency"); + /* - * Now allocate buffers proper. They are different than the above in - * that they usually occupy more virtual memory than physical. + * Determine how many buffers to allocate. + * We allocate bufcachepercent% of memory for buffer space. */ - size = MAXBSIZE * nbuf; /* # bytes for buffers */ - - /* allocate VM for buffers... area is not managed by VM system */ - if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(size), - NULL, UVM_UNKNOWN_OFFSET, 0, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0))) - panic("cpu_startup: cannot allocate VM for buffers"); - - minaddr = (vaddr_t)buffers; - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } - base = bufpages / nbuf; - residual = bufpages % nbuf; - /* now allocate RAM for buffers */ - for (i = 0; i < nbuf; i++) { - vaddr_t curbuf; - vsize_t curbufsize; - struct vm_page *pg; + if (bufpages == 0) + bufpages = physmem * bufcachepercent / 100; - /* - * First <residual> buffers get (base+1) physical pages - * allocated for them. The rest get (base) physical pages. - * - * The rest of each buffer occupies virtual space, but has no - * physical memory allocated for it. - */ - curbuf = (vaddr_t)buffers + i * MAXBSIZE; - curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base); - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: " - "not enough RAM for buffer cache"); - pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } - } - pmap_update(kernel_map->pmap); + /* Restrict to at most 25% filled kvm */ + if (bufpages > + (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) + bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / + PAGE_SIZE / 4; /* * Allocate a submap for exec arguments. This map effectively limits @@ -276,7 +236,6 @@ cpu_startup() printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); - printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * PAGE_SIZE); /* * Set up buffers, so they can be used to read disk labels. @@ -895,28 +854,6 @@ allocsys(v) VALLOC(msqids, struct msqid_ds, msginfo.msgmni); #endif - /* - * Determine how many buffers to allocate. We make sure we allocate - * at least 16 buffers. - */ - if (bufpages == 0) { - bufpages = (btoc(2 * 1024 * 1024) + physmem) * - bufcachepercent / 100; - } - if (nbuf == 0) - nbuf = bufpages < 16 ? 16 : bufpages; - - /* Restrict to at most 70% filled kvm */ - if (nbuf * MAXBSIZE > - (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 7 / 10) - nbuf = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / - MAXBSIZE * 7 / 10; - - /* More buffer pages than fits into the buffers is senseless. */ - if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE) - bufpages = nbuf * MAXBSIZE / PAGE_SIZE; - - VALLOC(buf, struct buf, nbuf); return (v); } |