diff options
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/amd64/amd64/gdt.c | 112 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/vm_machdep.c | 5 | ||||
-rw-r--r-- | sys/arch/amd64/include/gdt.h | 8 | ||||
-rw-r--r-- | sys/arch/i386/i386/gdt.c | 114 | ||||
-rw-r--r-- | sys/arch/i386/i386/mptramp.s | 4 | ||||
-rw-r--r-- | sys/arch/i386/i386/vm_machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/i386/include/gdt.h | 15 |
7 files changed, 74 insertions, 188 deletions
diff --git a/sys/arch/amd64/amd64/gdt.c b/sys/arch/amd64/amd64/gdt.c index 1f36bcfdb4b..0753153a884 100644 --- a/sys/arch/amd64/amd64/gdt.c +++ b/sys/arch/amd64/amd64/gdt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: gdt.c,v 1.13 2009/01/17 23:44:46 guenther Exp $ */ +/* $OpenBSD: gdt.c,v 1.14 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: gdt.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */ /*- @@ -40,45 +40,27 @@ #include <sys/param.h> #include <sys/systm.h> #include <sys/proc.h> -#include <sys/rwlock.h> #include <sys/user.h> +#include <sys/mutex.h> #include <uvm/uvm.h> #include <machine/gdt.h> -int gdt_size; /* size of GDT in bytes */ -int gdt_dyncount; /* number of dyn. allocated GDT entries in use */ -int gdt_dynavail; int gdt_next; /* next available slot for sweeping */ int gdt_free; /* next free slot; terminated with GNULL_SEL */ -struct rwlock gdt_lock_store = RWLOCK_INITIALIZER("gdtlk"); +struct mutex gdt_lock_store = MUTEX_INITIALIZER(IPL_HIGH); -static __inline void gdt_lock(void); -static __inline void gdt_unlock(void); void gdt_init(void); -void gdt_grow(void); int gdt_get_slot(void); void gdt_put_slot(int); /* - * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep - * waiting for memory. + * Lock and unlock the GDT. */ -static __inline void -gdt_lock(void) -{ - if (curproc != NULL) /* XXX - ugh. needed for startup */ - rw_enter_write(&gdt_lock_store); -} - -static __inline void -gdt_unlock(void) -{ - if (curproc != NULL) - rw_exit_write(&gdt_lock_store); -} +#define gdt_lock() (mtx_enter(&gdt_lock_store)) +#define gdt_unlock() (mtx_leave(&gdt_lock_store)) void set_mem_gdt(struct mem_segment_descriptor *sd, void *base, size_t limit, @@ -126,16 +108,12 @@ gdt_init(void) vaddr_t va; struct cpu_info *ci = &cpu_info_primary; - gdt_size = MINGDTSIZ; - gdt_dyncount = 0; gdt_next = 0; gdt_free = GNULL_SEL; - gdt_dynavail = - (gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor); old_gdt = gdtstore; gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ); - for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + gdt_size; + for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MAXGDTSIZ; va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) { @@ -144,7 +122,7 @@ gdt_init(void) pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); } - memcpy(gdtstore, old_gdt, DYNSEL_START); + bcopy(old_gdt, gdtstore, DYNSEL_START); ci->ci_gdt = gdtstore; set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0); @@ -159,12 +137,22 @@ gdt_init(void) void gdt_alloc_cpu(struct cpu_info *ci) { - ci->ci_gdt = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ); - uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt, - (vaddr_t)ci->ci_gdt + MINGDTSIZ, FALSE, FALSE); - memset(ci->ci_gdt, 0, MINGDTSIZ); - memcpy(ci->ci_gdt, gdtstore, - DYNSEL_START + gdt_dyncount * sizeof(struct sys_segment_descriptor)); + struct vm_page *pg; + vaddr_t va; + + ci->ci_gdt = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ); + uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt, + (vaddr_t)ci->ci_gdt + MAXGDTSIZ, FALSE, FALSE); + for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + MAXGDTSIZ; + va += PAGE_SIZE) { + pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); + if (pg == NULL) + panic("gdt_init: no pages"); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), + VM_PROT_READ | VM_PROT_WRITE); + } + bzero(ci->ci_gdt, MAXGDTSIZ); + bcopy(gdtstore, ci->ci_gdt, MAXGDTSIZ); } #endif /* MULTIPROCESSOR */ @@ -194,46 +182,11 @@ gdt_reload_cpu(struct cpu_info *ci) } #endif - -/* - * Grow or shrink the GDT. - */ -void -gdt_grow(void) -{ - CPU_INFO_ITERATOR cii; - struct cpu_info *ci; - struct vm_page *pg; - size_t old_len; - vaddr_t va; - - old_len = gdt_size; - gdt_size = 2 * gdt_size; - gdt_dynavail = - (gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor); - - CPU_INFO_FOREACH(cii, ci) { - for (va = (vaddr_t)(ci->ci_gdt) + old_len; - va < (vaddr_t)(ci->ci_gdt) + gdt_size; - va += PAGE_SIZE) { - while ((pg = - uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) == - NULL) { - uvm_wait("gdt_grow"); - } - pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - } - } -} - /* * Allocate a GDT slot as follows: * 1) If there are entries on the free list, use those. - * 2) If there are fewer than gdt_dynavail entries in use, there are free slots + * 2) If there are fewer than MAXGDTSIZ entries in use, there are free slots * near the end that we can sweep through. - * 3) As a last resort, we increase the size of the GDT, and sweep through - * the new slots. */ int gdt_get_slot(void) @@ -249,21 +202,11 @@ gdt_get_slot(void) slot = gdt_free; gdt_free = gdt[slot].sd_xx3; /* XXXfvdl res. field abuse */ } else { -#ifdef DIAGNOSTIC - if (gdt_next != gdt_dyncount) - panic("gdt_get_slot botch 1"); -#endif - if (gdt_next >= gdt_dynavail) { -#ifdef DIAGNOSTIC - if (gdt_size >= MAXGDTSIZ) - panic("gdt_get_slot botch 2"); -#endif - gdt_grow(); - } + if (gdt_next >= MAXGDTSIZ) + panic("gdt_get_slot: out of GDT descriptors"); slot = gdt_next++; } - gdt_dyncount++; gdt_unlock(); return (slot); } @@ -279,7 +222,6 @@ gdt_put_slot(int slot) gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; gdt_lock(); - gdt_dyncount--; gdt[slot].sd_type = SDT_SYSNULL; gdt[slot].sd_xx3 = gdt_free; diff --git a/sys/arch/amd64/amd64/vm_machdep.c b/sys/arch/amd64/amd64/vm_machdep.c index d9607ed1747..144ecf99541 100644 --- a/sys/arch/amd64/amd64/vm_machdep.c +++ b/sys/arch/amd64/amd64/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.14 2009/01/28 08:02:02 grange Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.15 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */ /*- @@ -160,6 +160,7 @@ cpu_exit(struct proc *p) mtrr_clean(p); pmap_deactivate(p); + tss_free(p->p_md.md_tss_sel); sched_exit(p); } @@ -171,8 +172,6 @@ cpu_exit(struct proc *p) void cpu_wait(struct proc *p) { - /* Nuke the TSS. */ - tss_free(p->p_md.md_tss_sel); } /* diff --git a/sys/arch/amd64/include/gdt.h b/sys/arch/amd64/include/gdt.h index 3b4abd01071..b52bc98e3eb 100644 --- a/sys/arch/amd64/include/gdt.h +++ b/sys/arch/amd64/include/gdt.h @@ -1,4 +1,4 @@ -/* $OpenBSD: gdt.h,v 1.3 2008/06/26 05:42:09 ray Exp $ */ +/* $OpenBSD: gdt.h,v 1.4 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: gdt.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $ */ /*- @@ -51,6 +51,8 @@ void set_sys_gdt(struct sys_segment_descriptor *, void *, size_t, int, int, int); #endif -/* MINGDTSIZ must be a multiple of PAGE_SIZE or gdt_grow breaks */ -#define MINGDTSIZ PAGE_SIZE +/* + * Maximum GDT size. It cannot exceed 65536 since the selector field of + * a descriptor is just 16 bits, and used as free list link. + */ #define MAXGDTSIZ 65536 diff --git a/sys/arch/i386/i386/gdt.c b/sys/arch/i386/i386/gdt.c index 6d2fb8484bf..eaab6cb7aeb 100644 --- a/sys/arch/i386/i386/gdt.c +++ b/sys/arch/i386/i386/gdt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: gdt.c,v 1.28 2008/06/26 05:42:10 ray Exp $ */ +/* $OpenBSD: gdt.c,v 1.29 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: gdt.c,v 1.28 2002/12/14 09:38:50 junyoung Exp $ */ /*- @@ -34,15 +34,11 @@ * The GDT handling has two phases. During the early lifetime of the * kernel there is a static gdt which will be stored in bootstrap_gdt. * Later, when the virtual memory is initialized, this will be - * replaced with a dynamically resizable GDT (although, we will only - * ever be growing it, there is almost no gain at all to compact it, - * and it has proven to be a complicated thing to do, considering - * parallel access, so it's just not worth the effort. + * replaced with a maximum sized GDT. * - * The static GDT area will hold the initial requirement of NGDT descriptors. - * The dynamic GDT will have a statically sized virtual memory area of size - * GDTMAXPAGES, the physical area backing this will be allocated as needed - * starting with the size needed for holding a copy of the bootstrap gdt. + * The bootstrap GDT area will hold the initial requirement of NGDT + * descriptors. The normal GDT will have a statically sized virtual memory + * area of size MAXGDTSIZ. * * Every CPU in a system has its own copy of the GDT. The only real difference * between the two are currently that there is a cpu-specific segment holding @@ -58,7 +54,7 @@ #include <sys/proc.h> #include <sys/lock.h> #include <sys/user.h> -#include <sys/rwlock.h> +#include <sys/mutex.h> #include <uvm/uvm.h> @@ -67,31 +63,19 @@ union descriptor bootstrap_gdt[NGDT]; union descriptor *gdt = bootstrap_gdt; -int gdt_size; /* total number of GDT entries */ int gdt_next; /* next available slot for sweeping */ int gdt_free; /* next free slot; terminated with GNULL_SEL */ -struct rwlock gdt_lock_store = RWLOCK_INITIALIZER("gdtlk"); +struct mutex gdt_lock_store = MUTEX_INITIALIZER(IPL_HIGH); -void gdt_grow(void); int gdt_get_slot(void); void gdt_put_slot(int); /* - * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep - * waiting for memory. + * Lock and unlock the GDT. */ -#define gdt_lock() \ - do { \ - if (curproc != NULL) \ - rw_enter_write(&gdt_lock_store);\ - } while (0) - -#define gdt_unlock() \ - do { \ - if (curproc != NULL) \ - rw_exit_write(&gdt_lock_store); \ - } while (0) +#define gdt_lock() (mtx_enter(&gdt_lock_store)) +#define gdt_unlock() (mtx_leave(&gdt_lock_store)) /* XXX needs spinlocking if we ever mean to go finegrained. */ void @@ -102,7 +86,7 @@ setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32, CPU_INFO_ITERATOR cii; struct cpu_info *ci; - KASSERT(sel < gdt_size); + KASSERT(sel < MAXGDTSIZ); setsegment(sd, base, limit, type, dpl, def32, gran); CPU_INFO_FOREACH(cii, ci) @@ -116,20 +100,16 @@ setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32, void gdt_init() { - size_t max_len, min_len; struct vm_page *pg; vaddr_t va; struct cpu_info *ci = &cpu_info_primary; - max_len = MAXGDTSIZ * sizeof(union descriptor); - min_len = MINGDTSIZ * sizeof(union descriptor); - - gdt_size = MINGDTSIZ; gdt_next = NGDT; gdt_free = GNULL_SEL; - gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len); - for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) { + gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ); + for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ; + va += PAGE_SIZE) { pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); if (pg == NULL) panic("gdt_init: no pages"); @@ -151,14 +131,22 @@ gdt_init() void gdt_alloc_cpu(struct cpu_info *ci) { - int max_len = MAXGDTSIZ * sizeof(union descriptor); - int min_len = MINGDTSIZ * sizeof(union descriptor); + struct vm_page *pg; + vaddr_t va; - ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len); + ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ); uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt, - (vaddr_t)ci->ci_gdt + min_len, FALSE, FALSE); - bzero(ci->ci_gdt, min_len); - bcopy(gdt, ci->ci_gdt, gdt_size * sizeof(union descriptor)); + (vaddr_t)ci->ci_gdt + MAXGDTSIZ, FALSE, FALSE); + for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + MAXGDTSIZ; + va += PAGE_SIZE) { + pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); + if (pg == NULL) + panic("gdt_init: no pages"); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), + VM_PROT_READ | VM_PROT_WRITE); + } + bzero(ci->ci_gdt, MAXGDTSIZ); + bcopy(gdt, ci->ci_gdt, MAXGDTSIZ); setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0); } @@ -174,50 +162,15 @@ gdt_init_cpu(struct cpu_info *ci) { struct region_descriptor region; - setregion(®ion, ci->ci_gdt, - MAXGDTSIZ * sizeof(union descriptor) - 1); + setregion(®ion, ci->ci_gdt, MAXGDTSIZ - 1); lgdt(®ion); } /* - * Grow the GDT. - */ -void -gdt_grow() -{ - size_t old_len, new_len; - CPU_INFO_ITERATOR cii; - struct cpu_info *ci; - struct vm_page *pg; - vaddr_t va; - - old_len = gdt_size * sizeof(union descriptor); - gdt_size <<= 1; - new_len = old_len << 1; - - CPU_INFO_FOREACH(cii, ci) { - for (va = (vaddr_t)(ci->ci_gdt) + old_len; - va < (vaddr_t)(ci->ci_gdt) + new_len; - va += PAGE_SIZE) { - while ( - (pg = - uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) == - NULL) { - uvm_wait("gdt_grow"); - } - pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE); - } - } -} - -/* * Allocate a GDT slot as follows: * 1) If there are entries on the free list, use those. - * 2) If there are fewer than gdt_size entries in use, there are free slots + * 2) If there are fewer than MAXGDTSIZ entries in use, there are free slots * near the end that we can sweep through. - * 3) As a last resort, we increase the size of the GDT, and sweep through - * the new slots. */ int gdt_get_slot() @@ -230,11 +183,8 @@ gdt_get_slot() slot = gdt_free; gdt_free = gdt[slot].gd.gd_selector; } else { - if (gdt_next >= gdt_size) { - if (gdt_size >= MAXGDTSIZ) - panic("gdt_get_slot: out of GDT descriptors"); - gdt_grow(); - } + if (gdt_next >= MAXGDTSIZ) + panic("gdt_get_slot: out of GDT descriptors"); slot = gdt_next++; } diff --git a/sys/arch/i386/i386/mptramp.s b/sys/arch/i386/i386/mptramp.s index b8bee786c7e..2644283b41d 100644 --- a/sys/arch/i386/i386/mptramp.s +++ b/sys/arch/i386/i386/mptramp.s @@ -1,4 +1,4 @@ -/* $OpenBSD: mptramp.s,v 1.10 2008/06/26 05:42:10 ray Exp $ */ +/* $OpenBSD: mptramp.s,v 1.11 2009/02/03 11:24:19 mikeb Exp $ */ /*- * Copyright (c) 2000 The NetBSD Foundation, Inc. @@ -180,7 +180,7 @@ _TRMP_LABEL(mp_startup) # %ecx points at our cpu_info structure.. - movw $((MAXGDTSIZ*8) - 1), 6(%esp) # prepare segment descriptor + movw $(MAXGDTSIZ-1), 6(%esp) # prepare segment descriptor movl CPU_INFO_GDT(%ecx), %eax # for real gdt movl %eax, 8(%esp) HALTT(0x8, %eax) diff --git a/sys/arch/i386/i386/vm_machdep.c b/sys/arch/i386/i386/vm_machdep.c index acad8e49e34..c96575eb141 100644 --- a/sys/arch/i386/i386/vm_machdep.c +++ b/sys/arch/i386/i386/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.55 2009/01/17 23:44:46 guenther Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.56 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $ */ /*- @@ -136,13 +136,13 @@ cpu_exit(struct proc *p) #endif pmap_deactivate(p); + tss_free(p->p_md.md_tss_sel); sched_exit(p); } void cpu_wait(struct proc *p) { - tss_free(p->p_md.md_tss_sel); } /* diff --git a/sys/arch/i386/include/gdt.h b/sys/arch/i386/include/gdt.h index 1b1ac72c88c..b34872d6ff3 100644 --- a/sys/arch/i386/include/gdt.h +++ b/sys/arch/i386/include/gdt.h @@ -1,4 +1,4 @@ -/* $OpenBSD: gdt.h,v 1.11 2008/06/26 05:42:10 ray Exp $ */ +/* $OpenBSD: gdt.h,v 1.12 2009/02/03 11:24:19 mikeb Exp $ */ /* $NetBSD: gdt.h,v 1.7.10.6 2002/08/19 01:22:36 sommerfeld Exp $ */ /*- @@ -50,15 +50,8 @@ void setgdt(int, void *, size_t, int, int, int, int); #endif /* - * The initial GDT size (as a descriptor count), and the maximum - * GDT size possible. - * - * These are actually not arbitrary. To start with, they have to be - * multiples of 512 and at least 512, in order to work with the - * allocation strategy set forth by gdt_init and gdt_grow. Then, the - * max cannot exceed 65536 since the selector field of a descriptor is - * just 16 bits, and used as free list link. + * Maximum GDT size. It cannot exceed 65536 since the selector field of + * a descriptor is just 16 bits, and used as free list link. */ -#define MINGDTSIZ 512 -#define MAXGDTSIZ 8192 +#define MAXGDTSIZ 65536 |