diff options
24 files changed, 1227 insertions, 723 deletions
diff --git a/sys/arch/alpha/alpha/cpu.c b/sys/arch/alpha/alpha/cpu.c index 83e18399061..ec7156486c3 100644 --- a/sys/arch/alpha/alpha/cpu.c +++ b/sys/arch/alpha/alpha/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.31 2014/01/19 12:45:35 deraadt Exp $ */ +/* $OpenBSD: cpu.c,v 1.32 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: cpu.c,v 1.44 2000/05/23 05:12:53 thorpej Exp $ */ /*- @@ -74,22 +74,24 @@ #include <machine/rpb.h> #include <machine/prom.h> +struct cpu_info cpu_info_primary; +struct cpu_info *cpu_info_list = &cpu_info_primary; + #if defined(MULTIPROCESSOR) #include <sys/malloc.h> -#include <sys/kthread.h> /* * Array of CPU info structures. Must be statically-allocated because * curproc, etc. are used early. */ -struct cpu_info cpu_info[ALPHA_MAXPROCS]; +struct cpu_info *cpu_info[ALPHA_MAXPROCS]; -/* Bitmask of CPUs currently running. */ +/* Bitmask of CPUs booted, currently running, and paused. */ +__volatile u_long cpus_booted; __volatile u_long cpus_running; +__volatile u_long cpus_paused; void cpu_boot_secondary(struct cpu_info *); -#else /* MULTIPROCESSOR */ -struct cpu_info cpu_info_store; #endif /* MULTIPROCESSOR */ /* @@ -114,6 +116,8 @@ struct cfdriver cpu_cd = { NULL, "cpu", DV_DULL }; +void cpu_announce_extensions(struct cpu_info *); + static const char *ev4minor[] = { "pass 2 or 2.1", "pass 3", 0 }, *lcaminor[] = { @@ -146,7 +150,6 @@ static const char *ev4minor[] = { "pass 4.0", 0 }; - struct cputable_struct { int cpu_major_code; const char *cpu_major_name; @@ -195,10 +198,7 @@ struct cputable_struct { */ int -cpumatch(parent, cfdata, aux) - struct device *parent; - void *cfdata; - void *aux; +cpumatch(struct device *parent, void *cfdata, void *aux) { struct mainbus_attach_args *ma = aux; @@ -216,12 +216,10 @@ cpumatch(parent, cfdata, aux) } void -cpuattach(parent, dev, aux) - struct device *parent; - struct device *dev; - void *aux; +cpuattach(struct device *parent, struct device *dev, void *aux) { struct mainbus_attach_args *ma = aux; + struct cpu_info *ci; int i; const char **s; struct pcs *p; @@ -232,7 +230,6 @@ cpuattach(parent, dev, aux) #if defined(MULTIPROCESSOR) extern paddr_t avail_start, avail_end; struct pcb *pcb; - struct cpu_info *ci; struct pglist mlist; int error; #endif @@ -263,17 +260,6 @@ cpuattach(parent, dev, aux) recognized: printf("\n"); - if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) { - cpu_implver = alpha_implver(); - if (cpu_implver >= ALPHA_IMPLVER_EV5) - cpu_amask = - (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL; - if (cpu_amask) { - printf("%s: architecture extensions: %b\n", - dev->dv_xname, cpu_amask, ALPHA_AMASK_BITS); - } - } - #ifdef DEBUG if (p->pcs_proc_var != 0) { printf("%s: ", dev->dv_xname); @@ -303,19 +289,21 @@ recognized: printf("%s: processor ID too large, ignoring\n", dev->dv_xname); return; } +#endif /* MULTIPROCESSOR */ - ci = &cpu_info[ma->ma_slot]; +#if defined(MULTIPROCESSOR) + if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) + ci = &cpu_info_primary; + else + ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK | M_ZERO); + + cpu_info[ma->ma_slot] = ci; simple_lock_init(&ci->ci_slock); +#else + ci = &cpu_info_primary; +#endif ci->ci_cpuid = ma->ma_slot; ci->ci_dev = dev; -#endif /* MULTIPROCESSOR */ - - /* - * Though we could (should?) attach the LCA cpus' PCI - * bus here there is no good reason to do so, and - * the bus attachment code is easier to understand - * and more compact if done the 'normal' way. - */ #if defined(MULTIPROCESSOR) /* @@ -360,56 +348,104 @@ recognized: * Initialize the idle stack pointer, reserving space for an * (empty) trapframe (XXX is the trapframe really necessary?) */ - pcb->pcb_hw.apcb_ksp = + pcb->pcb_hw.apcb_ksp = pcb->pcb_hw.apcb_backup_ksp = (u_int64_t)pcb + USPACE - sizeof(struct trapframe); /* * Initialize the idle PCB. */ - pcb->pcb_hw.apcb_backup_ksp = pcb->pcb_hw.apcb_ksp; pcb->pcb_hw.apcb_asn = proc0.p_addr->u_pcb.pcb_hw.apcb_asn; pcb->pcb_hw.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr; #if 0 - printf("%s: hwpcb ksp = 0x%lx\n", sc->sc_dev.dv_xname, + printf("%s: hwpcb ksp = 0x%lx\n", dev->dv_xname, pcb->pcb_hw.apcb_ksp); - printf("%s: hwpcb ptbr = 0x%lx\n", sc->sc_dev.dv_xname, + printf("%s: hwpcb ptbr = 0x%lx\n", dev->dv_xname, pcb->pcb_hw.apcb_ptbr); #endif +#endif /* MULTIPROCESSOR */ /* * If we're the primary CPU, no more work to do; we're already * running! */ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) { - ci->ci_flags |= CPUF_PRIMARY; + cpu_announce_extensions(ci); +#if defined(MULTIPROCESSOR) + ci->ci_flags |= CPUF_PRIMARY | CPUF_RUNNING; + atomic_setbits_ulong(&cpus_booted, (1UL << ma->ma_slot)); atomic_setbits_ulong(&cpus_running, (1UL << ma->ma_slot)); - } #endif /* MULTIPROCESSOR */ + } else { +#if defined(MULTIPROCESSOR) + /* + * Boot the secondary processor. It will announce its + * extensions, and then spin up until we tell it to go + * on its merry way. + */ + cpu_boot_secondary(ci); +#endif /* MULTIPROCESSOR */ + } +} + +void +cpu_announce_extensions(struct cpu_info *ci) +{ + u_long implver, amask = 0; + + implver = alpha_implver(); + if (implver >= ALPHA_IMPLVER_EV5) + amask = (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL; + + if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id) { + cpu_implver = implver; + cpu_amask = amask; + } else { + if (implver < cpu_implver) + printf("%s: WARNING: IMPLVER %lu < %lu\n", + ci->ci_dev->dv_xname, implver, cpu_implver); + + /* + * Cap the system architecture mask to the intersection + * of features supported by all processors in the system. + */ + cpu_amask &= amask; + } + + if (amask) { + printf("%s: architecture extensions: %b\n", + ci->ci_dev->dv_xname, amask, ALPHA_AMASK_BITS); + } } #if defined(MULTIPROCESSOR) void -cpu_boot_secondary_processors() +cpu_boot_secondary_processors(void) { struct cpu_info *ci; u_long i; for (i = 0; i < ALPHA_MAXPROCS; i++) { - ci = &cpu_info[i]; - if (ci->ci_idle_pcb == NULL) + ci = cpu_info[i]; + if (ci == NULL || ci->ci_idle_pcb == NULL) continue; if (ci->ci_flags & CPUF_PRIMARY) continue; - ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; + if ((cpus_booted & (1UL << i)) == 0) + continue; - /* This processor is all set up; boot it! */ - cpu_boot_secondary(ci); + /* + * Link the processor into the list, and launch it. + */ + ci->ci_next = cpu_info_list->ci_next; + cpu_info_list->ci_next = ci; + atomic_setbits_ulong(&ci->ci_flags, CPUF_RUNNING); + atomic_setbits_ulong(&cpus_running, (1UL << i)); + ncpus++; } } void -cpu_boot_secondary(ci) - struct cpu_info *ci; +cpu_boot_secondary(struct cpu_info *ci) { long timeout; struct pcs *pcsp, *primary_pcsp; @@ -439,10 +475,8 @@ cpu_boot_secondary(ci) * the primary CPU's PALcode revision info to the secondary * CPUs PCS. */ - memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev, sizeof(pcsp->pcs_pal_rev)); - pcsp->pcs_flags |= (PCS_CV|PCS_RC); pcsp->pcs_flags &= ~PCS_BIP; @@ -472,7 +506,7 @@ cpu_boot_secondary(ci) */ for (timeout = 10000; timeout != 0; timeout--) { alpha_mb(); - if (cpus_running & cpumask) + if (cpus_booted & cpumask) break; delay(1000); } @@ -481,48 +515,59 @@ cpu_boot_secondary(ci) } void -cpu_halt_secondary(cpu_id) - u_long cpu_id; +cpu_pause_resume(u_long cpu_id, int pause) { - long timeout; - u_long cpumask = (1UL << cpu_id); + u_long cpu_mask = (1UL << cpu_id); -#ifdef DIAGNOSTIC - if (cpu_id >= hwrpb->rpb_pcs_cnt || - cpu_info[cpu_id].ci_dev == NULL) - panic("cpu_halt_secondary: bogus cpu_id"); -#endif + if (pause) { + atomic_setbits_ulong(&cpus_paused, cpu_mask); + alpha_send_ipi(cpu_id, ALPHA_IPI_PAUSE); + } else + atomic_clearbits_ulong(&cpus_paused, cpu_mask); +} - alpha_mb(); - if ((cpus_running & cpumask) == 0) { - /* Processor not running. */ - return; +void +cpu_pause_resume_all(int pause) +{ + struct cpu_info *ci, *self = curcpu(); + CPU_INFO_ITERATOR cii; + + CPU_INFO_FOREACH(cii, ci) { + if (ci == self) + continue; + cpu_pause_resume(ci->ci_cpuid, pause); } +} - /* Send the HALT IPI to the secondary. */ - alpha_send_ipi(cpu_id, ALPHA_IPI_HALT); +void +cpu_halt(void) +{ +#if 0 + struct cpu_info *ci = curcpu(); +#endif + u_long cpu_id = cpu_number(); + struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id); - /* ...and wait for it to shut down. */ - for (timeout = 10000; timeout != 0; timeout--) { - alpha_mb(); - if ((cpus_running & cpumask) == 0) - return; - delay(1000); - } +#if 0 + printf("%s: shutting down...\n", ci->ci_dev->dv_xname); +#endif + + pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ); + pcsp->pcs_flags |= PCS_HALT_STAY_HALTED; + + atomic_clearbits_ulong(&cpus_running, (1UL << cpu_id)); + atomic_clearbits_ulong(&cpus_booted, (1U << cpu_id)); - /* Erk, secondary failed to halt. */ - printf("WARNING: %s (ID %lu) failed to halt\n", - cpu_info[cpu_id].ci_dev->dv_xname, cpu_id); + alpha_pal_halt(); + /* NOTREACHED */ } void -cpu_hatch(ci) - struct cpu_info *ci; +cpu_hatch(struct cpu_info *ci) { - u_long cpumask = (1UL << ci->ci_cpuid); - - /* Set our `curpcb' to reflect our context. */ - curpcb = ci->ci_idle_pcb_paddr; + u_long cpu_id = cpu_number(); + u_long cpumask = (1UL << cpu_id); + int s; /* Mark the kernel pmap active on this processor. */ atomic_setbits_ulong(&pmap_kernel()->pm_cpus, cpumask); @@ -531,27 +576,38 @@ cpu_hatch(ci) trap_init(); /* Yahoo! We're running kernel code! Announce it! */ - printf("%s: processor ID %lu running\n", ci->ci_dev->dv_xname, - alpha_pal_whami()); - atomic_setbits_ulong(&cpus_running, cpumask); + cpu_announce_extensions(ci); + + atomic_setbits_ulong(&cpus_booted, cpumask); /* - * Lower interrupt level so that we can get IPIs. Don't use - * spl0() because we don't want to hassle w/ software interrupts - * right now. Note that interrupt() prevents the secondaries - * from servicing DEVICE and CLOCK interrupts. + * Spin here until we're told we can start. */ - (void) alpha_pal_swpipl(ALPHA_PSL_IPL_0); + while ((cpus_running & cpumask) == 0) + /* spin */ ; - /* Ok, so all we do is spin for now... */ - for (;;) - /* nothing */ ; + /* + * Invalidate the TLB and sync the I-stream before we + * jump into the kernel proper. We have to do this + * because we haven't been getting IPIs while we've + * been spinning. + */ + ALPHA_TBIA(); + alpha_pal_imb(); + + sched_init_cpu(ci); + nanouptime(&ci->ci_schedstate.spc_runtime); + ci->ci_curproc = ci->ci_fpcurproc = NULL; + ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; + + (void) alpha_pal_swpipl(ALPHA_PSL_IPL_0); + SCHED_LOCK(s); + cpu_switchto(NULL, sched_chooseproc()); + /* NOTREACHED */ } int -cpu_iccb_send(cpu_id, msg) - cpuid_t cpu_id; - const char *msg; +cpu_iccb_send(cpuid_t cpu_id, const char *msg) { struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id); int timeout; @@ -576,6 +632,7 @@ cpu_iccb_send(cpu_id, msg) pcsp->pcs_iccb.iccb_rxlen = strlen(msg); /* XXX cast to __volatile */ atomic_setbits_ulong((__volatile u_long *)&hwrpb->rpb_rxrdy, cpumask); + alpha_mb(); /* Wait for the message to be received. */ for (timeout = 10000; timeout != 0; timeout--) { @@ -591,7 +648,7 @@ cpu_iccb_send(cpu_id, msg) } void -cpu_iccb_receive() +cpu_iccb_receive(void) { #if 0 /* Don't bother... we don't get any important messages anyhow. */ u_int64_t txrdy; @@ -629,4 +686,11 @@ cpu_iccb_receive() hwrpb->rpb_txrdy = 0; alpha_mb(); } + +void +cpu_unidle(struct cpu_info *ci) +{ + if (ci != curcpu()) + alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST); +} #endif /* MULTIPROCESSOR */ diff --git a/sys/arch/alpha/alpha/db_interface.c b/sys/arch/alpha/alpha/db_interface.c index 911621aaf31..121a8865cc8 100644 --- a/sys/arch/alpha/alpha/db_interface.c +++ b/sys/arch/alpha/alpha/db_interface.c @@ -1,4 +1,4 @@ -/* $OpenBSD: db_interface.c,v 1.17 2010/11/27 19:57:23 miod Exp $ */ +/* $OpenBSD: db_interface.c,v 1.18 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: db_interface.c,v 1.8 1999/10/12 17:08:57 jdolecek Exp $ */ /* @@ -81,6 +81,17 @@ extern int trap_types; db_regs_t ddb_regs; +#if defined(MULTIPROCESSOR) +void db_mach_cpu(db_expr_t, int, db_expr_t, char *); +#endif + +struct db_command db_machine_command_table[] = { +#if defined(MULTIPROCESSOR) + { "ddbcpu", db_mach_cpu, 0, NULL }, +#endif + { NULL, NULL, 0, NULL } +}; + int db_active = 0; struct db_variable db_regs[] = { @@ -130,6 +141,7 @@ ddb_trap(a0, a1, a2, entry, regs) unsigned long a0, a1, a2, entry; db_regs_t *regs; { + struct cpu_info *ci = curcpu(); int s; if (entry != ALPHA_KENTRY_IF || @@ -151,6 +163,7 @@ ddb_trap(a0, a1, a2, entry, regs) * alpha_debug() switches us to the debugger stack. */ + ci->ci_db_regs = regs; ddb_regs = *regs; s = splhigh(); @@ -485,3 +498,55 @@ next_instr_address(pc, branch) return (pc + sizeof(int)); return (branch_taken(*(u_int *)pc, pc, getreg_val, DDB_REGS)); } + +#if defined(MULTIPROCESSOR) +void +db_mach_cpu(db_expr_t addr, int have_addr, db_expr_t count, char *modif) +{ + struct cpu_info *ci; + CPU_INFO_ITERATOR cii; + + if (have_addr == 0) { + db_printf("addr dev id flags ipis " + "curproc fpcurproc\n"); + CPU_INFO_FOREACH(cii, ci) + db_printf("%p %-5s %02lu %08lx %04lx %p %p\n", + ci, ci->ci_dev->dv_xname, ci->ci_cpuid, + ci->ci_flags, ci->ci_ipis, ci->ci_curproc, + ci->ci_fpcurproc); + return; + } + + if (addr < 0 || addr >= ALPHA_MAXPROCS) { + db_printf("CPU %ld out of range\n", addr); + return; + } + + ci = cpu_info[addr]; + if (ci == NULL) { + db_printf("CPU %ld is not configured\n", addr); + return; + } + + if (ci != curcpu()) { + if ((ci->ci_flags & CPUF_PAUSED) == 0) { + db_printf("CPU %ld not paused\n", addr); + return; + } + } + + if (ci->ci_db_regs == NULL) { + db_printf("CPU %ld has no register state\n", addr); + return; + } + + db_printf("Using CPU %ld\n", addr); + ddb_regs = *ci->ci_db_regs; /* struct copy */ +} +#endif /* MULTIPROCESSOR */ + +void +db_machine_init() +{ + db_machine_commands_install(db_machine_command_table); +} diff --git a/sys/arch/alpha/alpha/debug.s b/sys/arch/alpha/alpha/debug.s index dacd24d3f2e..b0ff7f4407b 100644 --- a/sys/arch/alpha/alpha/debug.s +++ b/sys/arch/alpha/alpha/debug.s @@ -1,4 +1,4 @@ -/* $OpenBSD: debug.s,v 1.6 2010/06/06 11:25:37 miod Exp $ */ +/* $OpenBSD: debug.s,v 1.7 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: debug.s,v 1.5 1999/06/18 18:11:56 thorpej Exp $ */ /*- @@ -80,9 +80,9 @@ NESTED_NOPROFILE(alpha_debug, 5, 32, ra, IM_RA|IM_S0, 0) mov sp, s0 #if defined(MULTIPROCESSOR) - /* - * XXX PAUSE ALL OTHER CPUs. - */ + /* Pause all other CPUs. */ + ldiq a0, 1 + CALL(cpu_pause_resume_all) #endif /* @@ -105,9 +105,13 @@ NESTED_NOPROFILE(alpha_debug, 5, 32, ra, IM_RA|IM_S0, 0) mov s0, sp #if defined(MULTIPROCESSOR) - /* - * XXX RESUME ALL OTHER CPUs. - */ + mov v0, s0 + + /* Resume all other CPUs. */ + mov zero, a0 + CALL(cpu_pause_resume_all) + + mov s0, v0 #endif ldq ra, (32-8)(sp) /* restore ra */ diff --git a/sys/arch/alpha/alpha/interrupt.c b/sys/arch/alpha/alpha/interrupt.c index 4fff2042e1e..9507cfe3ffb 100644 --- a/sys/arch/alpha/alpha/interrupt.c +++ b/sys/arch/alpha/alpha/interrupt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: interrupt.c,v 1.31 2011/04/15 20:40:03 deraadt Exp $ */ +/* $OpenBSD: interrupt.c,v 1.32 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: interrupt.c,v 1.46 2000/06/03 20:47:36 thorpej Exp $ */ /*- @@ -84,17 +84,11 @@ #include <machine/frame.h> #include <machine/cpuconf.h> -#if defined(MULTIPROCESSOR) -#include <sys/device.h> -#endif - #include "apecs.h" #include "cia.h" #include "lca.h" #include "tcasic.h" -static u_int schedclk2; - extern struct evcount clk_count; struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]; @@ -204,32 +198,15 @@ void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { - struct proc *p; struct cpu_info *ci = curcpu(); extern int schedhz; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) - { - u_long pending_ipis, bit; - -#if 0 - printf("CPU %lu got IPI\n", cpu_id); -#endif + atomic_add_ulong(&ci->ci_intrdepth, 1); -#ifdef DIAGNOSTIC - if (ci->ci_dev == NULL) { - /* XXX panic? */ - printf("WARNING: no device for ID %lu\n", ci->ci_cpuid); - return; - } -#endif - - pending_ipis = atomic_loadlatch_ulong(&ci->ci_ipis, 0); - for (bit = 0; bit < ALPHA_NIPIS; bit++) - if (pending_ipis & (1UL << bit)) - (*ipifuncs[bit])(); + alpha_ipi_process(ci, framep); /* * Handle inter-console messages if we're the primary @@ -238,20 +215,17 @@ interrupt(unsigned long a0, unsigned long a1, unsigned long a2, if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); - } + + atomic_sub_ulong(&ci->ci_intrdepth, 1); #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ -#if defined(MULTIPROCESSOR) - /* XXX XXX XXX */ - if (CPU_IS_PRIMARY(ci) == 0) - return; -#endif - uvmexp.intrs++; - clk_count.ec_count++; + atomic_add_int(&uvmexp.intrs, 1); + if (CPU_IS_PRIMARY(ci)) + clk_count.ec_count++; if (platform.clockintr) { /* * Call hardclock(). This will also call @@ -264,18 +238,20 @@ interrupt(unsigned long a0, unsigned long a1, unsigned long a2, * If it's time to call the scheduler clock, * do so. */ - if ((++schedclk2 & 0x3f) == 0 && - (p = ci->ci_curproc) != NULL && schedhz != 0) - schedclock(p); + if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 && + schedhz != 0) + schedclock(ci->ci_curproc); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ + atomic_add_ulong(&ci->ci_intrdepth, 1); a0 = alpha_pal_rdmces(); if (platform.mcheck_handler) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); + atomic_sub_ulong(&ci->ci_intrdepth, 1); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ @@ -284,15 +260,24 @@ interrupt(unsigned long a0, unsigned long a1, unsigned long a2, KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); + atomic_add_ulong(&ci->ci_intrdepth, 1); #if defined(MULTIPROCESSOR) - /* XXX XXX XXX */ - if (CPU_IS_PRIMARY(ci) == 0) - return; + /* + * XXX Need to support IPL_MPSAFE eventually. Acquiring the + * XXX kernel lock could be done deeper, as most of the + * XXX scb handlers end up invoking + * XXX alpha_shared_intr_dispatch(). + */ + __mp_lock(&kernel_lock); #endif - uvmexp.intrs++; + atomic_add_int(&uvmexp.intrs, 1); scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; (*scb->scb_func)(scb->scb_arg, a1); +#if defined(MULTIPROCESSOR) + __mp_unlock(&kernel_lock); +#endif + atomic_sub_ulong(&ci->ci_intrdepth, 1); break; } @@ -497,6 +482,10 @@ softintr_dispatch() struct alpha_soft_intrhand *sih; u_int64_t n, i; +#if defined(MULTIPROCESSOR) + __mp_lock(&kernel_lock); +#endif + while ((n = atomic_loadlatch_ulong(&ssir, 0)) != 0) { for (i = 0; i < SI_NSOFT; i++) { if ((n & (1 << i)) == 0) @@ -515,7 +504,7 @@ softintr_dispatch() TAILQ_REMOVE(&asi->softintr_q, sih, sih_q); sih->sih_pending = 0; - uvmexp.softs++; + atomic_add_int(&uvmexp.softs, 1); mtx_leave(&asi->softintr_mtx); @@ -523,6 +512,10 @@ softintr_dispatch() } } } + +#if defined(MULTIPROCESSOR) + __mp_unlock(&kernel_lock); +#endif } static int diff --git a/sys/arch/alpha/alpha/ipifuncs.c b/sys/arch/alpha/alpha/ipifuncs.c index e1622f8c4b2..75f1b55a166 100644 --- a/sys/arch/alpha/alpha/ipifuncs.c +++ b/sys/arch/alpha/alpha/ipifuncs.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ipifuncs.c,v 1.3 2012/11/01 21:09:17 miod Exp $ */ +/* $OpenBSD: ipifuncs.c,v 1.4 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: ipifuncs.c,v 1.9 1999/12/02 01:09:11 thorpej Exp $ */ /*- @@ -38,6 +38,7 @@ #include <sys/param.h> #include <sys/device.h> #include <sys/systm.h> +#include <sys/reboot.h> #include <uvm/uvm_extern.h> @@ -45,41 +46,71 @@ #include <machine/alpha_cpu.h> #include <machine/cpu.h> #include <machine/intr.h> +#include <machine/prom.h> #include <machine/rpb.h> -void alpha_ipi_halt(void); -void alpha_ipi_tbia(void); -void alpha_ipi_tbiap(void); -void alpha_ipi_imb(void); -void alpha_ipi_ast(void); +typedef void (*ipifunc_t)(struct cpu_info *, struct trapframe *); + +void alpha_ipi_halt(struct cpu_info *, struct trapframe *); +void alpha_ipi_imb(struct cpu_info *, struct trapframe *); +void alpha_ipi_ast(struct cpu_info *, struct trapframe *); +void alpha_ipi_synch_fpu(struct cpu_info *, struct trapframe *); +void alpha_ipi_discard_fpu(struct cpu_info *, struct trapframe *); +void alpha_ipi_pause(struct cpu_info *, struct trapframe *); /* * NOTE: This table must be kept in order with the bit definitions * in <machine/intr.h>. */ -ipifunc_t ipifuncs[ALPHA_NIPIS] = { +const ipifunc_t ipifuncs[ALPHA_NIPIS] = { alpha_ipi_halt, - alpha_ipi_tbia, - alpha_ipi_tbiap, + pmap_do_tlb_shootdown, alpha_ipi_imb, alpha_ipi_ast, + alpha_ipi_synch_fpu, + alpha_ipi_discard_fpu, + alpha_ipi_pause }; /* + * Process IPIs for a CPU. + */ +void +alpha_ipi_process(struct cpu_info *ci, struct trapframe *framep) +{ + u_long pending_ipis, bit; + + for (;;) { + pending_ipis = ci->ci_ipis; + if (pending_ipis == 0) + break; + + atomic_clearbits_ulong(&ci->ci_ipis, pending_ipis); + + for (bit = 0; bit < ALPHA_NIPIS; bit++) { + if (pending_ipis & (1UL << bit)) { + (*ipifuncs[bit])(ci, framep); + } + } + } +} + +/* * Send an interprocessor interrupt. */ void -alpha_send_ipi(cpu_id, ipimask) - u_long cpu_id, ipimask; +alpha_send_ipi(u_long cpu_id, u_long ipimask) { #ifdef DIAGNOSTIC if (cpu_id >= hwrpb->rpb_pcs_cnt || - cpu_info[cpu_id].ci_dev == NULL) - panic("alpha_sched_ipi: bogus cpu_id"); + cpu_info[cpu_id] == NULL) + panic("alpha_send_ipi: bogus cpu_id"); + if (((1UL << cpu_id) & cpus_running) == 0) + panic("alpha_send_ipi: CPU %ld not running", cpu_id); #endif - atomic_setbits_ulong(&cpu_info[cpu_id].ci_ipis, ipimask); + atomic_setbits_ulong(&cpu_info[cpu_id]->ci_ipis, ipimask); alpha_pal_wripir(cpu_id); } @@ -87,15 +118,21 @@ alpha_send_ipi(cpu_id, ipimask) * Broadcast an IPI to all but ourselves. */ void -alpha_broadcast_ipi(ipimask) - u_long ipimask; +alpha_broadcast_ipi(u_long ipimask) { - u_long i; + struct cpu_info *ci; + CPU_INFO_ITERATOR cii; + u_long cpu_id = cpu_number(); + u_long cpumask; - for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { - if (cpu_info[i].ci_dev == NULL) + cpumask = cpus_running & ~(1UL << cpu_id); + if (cpumask == 0) + return; + + CPU_INFO_FOREACH(cii, ci) { + if ((cpumask & (1UL << ci->ci_cpuid)) == 0) continue; - alpha_send_ipi(i, ipimask); + alpha_send_ipi(ci->ci_cpuid, ipimask); } } @@ -103,71 +140,88 @@ alpha_broadcast_ipi(ipimask) * Send an IPI to all in the list but ourselves. */ void -alpha_multicast_ipi(cpumask, ipimask) - u_long cpumask, ipimask; +alpha_multicast_ipi(u_long cpumask, u_long ipimask) { - u_long i; + struct cpu_info *ci; + CPU_INFO_ITERATOR cii; + u_long cpu_id = cpu_number(); cpumask &= cpus_running; - cpumask &= ~(1UL << cpu_number()); + cpumask &= ~(1UL << cpu_id); if (cpumask == 0) return; - for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { - if ((cpumask & (1UL << i)) == 0) + CPU_INFO_FOREACH(cii, ci) { + if ((cpumask & (1UL << ci->ci_cpuid)) == 0) continue; - alpha_send_ipi(i, ipimask); + alpha_send_ipi(ci->ci_cpuid, ipimask); } } void -alpha_ipi_halt() +alpha_ipi_halt(struct cpu_info *ci, struct trapframe *framep) { - u_long cpu_id = alpha_pal_whami(); - struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id); - /* Disable interrupts. */ (void) splhigh(); - printf("%s: shutting down...\n", cpu_info[cpu_id].ci_dev->dv_xname); - atomic_clearbits_ulong(&cpus_running, (1UL << cpu_id)); - - pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ); - pcsp->pcs_flags |= PCS_HALT_STAY_HALTED; - alpha_pal_halt(); + cpu_halt(); /* NOTREACHED */ } void -alpha_ipi_tbia() +alpha_ipi_imb(struct cpu_info *ci, struct trapframe *framep) { - u_long cpu_id = alpha_pal_whami(); - - /* If we're doing a TBIA, we don't need to do a TBIAP or a SHOOTDOWN. */ - atomic_clearbits_ulong(&cpu_info[cpu_id].ci_ipis, - ALPHA_IPI_TBIAP|ALPHA_IPI_SHOOTDOWN); - - ALPHA_TBIA(); + alpha_pal_imb(); } void -alpha_ipi_tbiap() +alpha_ipi_ast(struct cpu_info *ci, struct trapframe *framep) { - - /* Can't clear SHOOTDOWN here; might have PG_ASM mappings. */ - - ALPHA_TBIAP(); +#if 0 /* useless */ + cpu_unidle(ci); +#endif } void -alpha_ipi_imb() +alpha_ipi_synch_fpu(struct cpu_info *ci, struct trapframe *framep) { + if (ci->ci_flags & CPUF_FPUSAVE) + return; + fpusave_cpu(ci, 1); +} - alpha_pal_imb(); +void +alpha_ipi_discard_fpu(struct cpu_info *ci, struct trapframe *framep) +{ + if (ci->ci_flags & CPUF_FPUSAVE) + return; + fpusave_cpu(ci, 0); } void -alpha_ipi_ast() +alpha_ipi_pause(struct cpu_info *ci, struct trapframe *framep) { - cpu_unidle(curcpu()); + u_long cpumask = (1UL << ci->ci_cpuid); + int s; + + s = splhigh(); + + /* Point debuggers at our trapframe for register state. */ + ci->ci_db_regs = framep; + + atomic_setbits_ulong(&ci->ci_flags, CPUF_PAUSED); + + /* Spin with interrupts disabled until we're resumed. */ + do { + alpha_mb(); + } while (cpus_paused & cpumask); + + atomic_clearbits_ulong(&ci->ci_flags, CPUF_PAUSED); + + ci->ci_db_regs = NULL; + + splx(s); + + /* Do an IMB on the way out, in case the kernel text was changed. */ + alpha_pal_imb(); } diff --git a/sys/arch/alpha/alpha/lock_machdep.c b/sys/arch/alpha/alpha/lock_machdep.c new file mode 100644 index 00000000000..2b449b15ac3 --- /dev/null +++ b/sys/arch/alpha/alpha/lock_machdep.c @@ -0,0 +1,173 @@ +/* $OpenBSD: lock_machdep.c,v 1.1 2014/01/26 17:40:09 miod Exp $ */ + +/* + * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include <sys/param.h> +#include <sys/lock.h> +#include <sys/systm.h> + +#include <machine/atomic.h> +#include <machine/cpu.h> +#include <machine/lock.h> + +#include <ddb/db_output.h> + +void +__mp_lock_init(struct __mp_lock *lock) +{ + lock->mpl_cpu = NULL; + lock->mpl_count = 0; +} + +#if defined(MP_LOCKDEBUG) +#ifndef DDB +#error "MP_LOCKDEBUG requires DDB" +#endif + +/* CPU-dependent timing, needs this to be settable from ddb. */ +extern int __mp_lock_spinout; +#endif + +static __inline void +__mp_lock_spin(struct __mp_lock *mpl) +{ +#ifndef MP_LOCKDEBUG + while (mpl->mpl_count != 0) + SPINLOCK_SPIN_HOOK; +#else + int ticks = __mp_lock_spinout; + if (!CPU_IS_PRIMARY(curcpu())) /* XXX */ + ticks += ticks; /* XXX */ + + while (mpl->mpl_count != 0 && --ticks > 0) + SPINLOCK_SPIN_HOOK; + + if (ticks == 0) { + db_printf("__mp_lock(%p): lock spun out", mpl); + Debugger(); + } +#endif +} + +void +__mp_lock(struct __mp_lock *mpl) +{ + int s; + struct cpu_info *ci = curcpu(); + + /* + * Please notice that mpl_count gets incremented twice for the + * first lock. This is on purpose. The way we release the lock + * in mp_unlock is to decrement the mpl_count and then check if + * the lock should be released. Since mpl_count is what we're + * spinning on, decrementing it in mpl_unlock to 0 means that + * we can't clear mpl_cpu, because we're no longer holding the + * lock. In theory mpl_cpu doesn't need to be cleared, but it's + * safer to clear it and besides, setting mpl_count to 2 on the + * first lock makes most of this code much simpler. + */ + while (1) { + s = splhigh(); + if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) { + alpha_mb(); + mpl->mpl_cpu = ci; + } + + if (mpl->mpl_cpu == ci) { + mpl->mpl_count++; + splx(s); + break; + } + splx(s); + + __mp_lock_spin(mpl); + } +} + +void +__mp_unlock(struct __mp_lock *mpl) +{ + int s; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_unlock(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + s = splhigh(); + if (--mpl->mpl_count == 1) { + mpl->mpl_cpu = NULL; + alpha_mb(); + mpl->mpl_count = 0; + } + + splx(s); +} + +int +__mp_release_all(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 1; + int s; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + s = splhigh(); + mpl->mpl_cpu = NULL; + alpha_mb(); + mpl->mpl_count = 0; + splx(s); + + return (rv); +} + +int +__mp_release_all_but_one(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 2; +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + mpl->mpl_count = 2; + + return (rv); +} + +void +__mp_acquire_count(struct __mp_lock *mpl, int count) +{ + while (count--) + __mp_lock(mpl); +} + +int +__mp_lock_held(struct __mp_lock *mpl) +{ + return mpl->mpl_cpu == curcpu(); +} diff --git a/sys/arch/alpha/alpha/locore.s b/sys/arch/alpha/alpha/locore.s index 201e8186e50..08359644b5b 100644 --- a/sys/arch/alpha/alpha/locore.s +++ b/sys/arch/alpha/alpha/locore.s @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.s,v 1.37 2013/06/13 02:27:23 deraadt Exp $ */ +/* $OpenBSD: locore.s,v 1.38 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: locore.s,v 1.94 2001/04/26 03:10:44 ross Exp $ */ /*- @@ -92,18 +92,18 @@ #else /* if not MULTIPROCESSOR... */ -IMPORT(cpu_info_store, CPU_INFO_SIZEOF) +IMPORT(cpu_info_primary, CPU_INFO_SIZEOF) -#define GET_CPUINFO lda v0, cpu_info_store +#define GET_CPUINFO lda v0, cpu_info_primary -#define GET_CURPROC lda v0, cpu_info_store + CPU_INFO_CURPROC +#define GET_CURPROC lda v0, cpu_info_primary + CPU_INFO_CURPROC -#define GET_FPCURPROC lda v0, cpu_info_store + CPU_INFO_FPCURPROC +#define GET_FPCURPROC lda v0, cpu_info_primary + CPU_INFO_FPCURPROC -#define GET_CURPCB lda v0, cpu_info_store + CPU_INFO_CURPCB +#define GET_CURPCB lda v0, cpu_info_primary + CPU_INFO_CURPCB #define GET_IDLE_PCB(reg) \ - lda reg, cpu_info_store ; \ + lda reg, cpu_info_primary ; \ ldq reg, CPU_INFO_IDLE_PCB_PADDR(reg) #endif @@ -290,20 +290,6 @@ LEAF(exception_return, 1) /* XXX should be NESTED */ br pv, 1f 1: LDGP(pv) -#if defined(MULTIPROCESSOR) - /* XXX XXX XXX */ - /* - * Check the current processor ID. If we're not the primary - * CPU, then just restore registers and bail out. - */ - call_pal PAL_OSF1_whami - lda t0, hwrpb - ldq t0, 0(t0) - ldq t1, RPB_PRIMARY_CPU_ID(t0) - cmpeq t1, v0, t0 - beq t0, 4f /* == 0: bail out now */ -#endif - ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */ and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */ bne t0, 4f /* != 0: can't do AST or SIR */ diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c index b4767440dad..515bcef6383 100644 --- a/sys/arch/alpha/alpha/machdep.c +++ b/sys/arch/alpha/alpha/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.145 2013/12/22 18:52:34 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.146 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */ /*- @@ -110,9 +110,8 @@ #ifdef DDB #include <machine/db_machdep.h> -#include <ddb/db_access.h> -#include <ddb/db_sym.h> #include <ddb/db_extern.h> +#include <ddb/db_interface.h> #endif #include "ioasic.h" @@ -254,7 +253,8 @@ alpha_init(unused, ptb, bim, bip, biv) * Set our SysValue to the address of our cpu_info structure. * Secondary processors do this in their spinup trampoline. */ - alpha_pal_wrval((u_long)&cpu_info[cpu_id]); + alpha_pal_wrval((u_long)&cpu_info_primary); + cpu_info[cpu_id] = &cpu_info_primary; #endif ci = curcpu(); @@ -772,6 +772,7 @@ nobootinfo: * Initialize debuggers, and break into them if appropriate. */ #ifdef DDB + db_machine_init(); ddb_init(); if (boothowto & RB_KDB) @@ -981,15 +982,8 @@ boot(howto) int howto; { #if defined(MULTIPROCESSOR) -#if 0 /* XXX See below. */ - u_long cpu_id; -#endif -#endif - -#if defined(MULTIPROCESSOR) - /* We must be running on the primary CPU. */ - if (alpha_pal_whami() != hwrpb->rpb_primary_cpu_id) - panic("cpu_reboot: not on primary CPU!"); + u_long wait_mask; + int i; #endif /* If system is cold, just halt. */ @@ -1024,6 +1018,28 @@ boot(howto) uvm_shutdown(); splhigh(); /* Disable interrupts. */ +#if defined(MULTIPROCESSOR) + /* + * Halt all other CPUs. + */ + wait_mask = (1UL << hwrpb->rpb_primary_cpu_id); + alpha_broadcast_ipi(ALPHA_IPI_HALT); + + /* Ensure any CPUs paused by DDB resume execution so they can halt */ + cpus_paused = 0; + + for (i = 0; i < 10000; i++) { + alpha_mb(); + if (cpus_running == wait_mask) + break; + delay(1000); + } + alpha_mb(); + if (cpus_running != wait_mask) + printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n", + cpus_running); +#endif + /* If rebooting and a dump is requested do it. */ if (howto & RB_DUMP) dumpsys(); @@ -1037,18 +1053,6 @@ haltsys: sio_intr_shutdown(NULL); #endif -#if defined(MULTIPROCESSOR) -#if 0 /* XXX doesn't work when called from here?! */ - /* Kill off any secondary CPUs. */ - for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) { - if (cpu_id == hwrpb->rpb_primary_cpu_id || - cpu_info[cpu_id].ci_softc == NULL) - continue; - cpu_halt_secondary(cpu_id); - } -#endif -#endif - #ifdef BOOTKEY printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot"); cnpollc(1); /* for proper keyboard command handling */ @@ -1754,10 +1758,15 @@ void fpusave_cpu(struct cpu_info *ci, int save) { struct proc *p; +#if defined(MULTIPROCESSOR) + int s; +#endif KDASSERT(ci == curcpu()); #if defined(MULTIPROCESSOR) + /* Need to block IPIs */ + s = splhigh(); atomic_setbits_ulong(&ci->ci_flags, CPUF_FPUSAVE); #endif @@ -1778,6 +1787,7 @@ fpusave_cpu(struct cpu_info *ci, int save) out: #if defined(MULTIPROCESSOR) atomic_clearbits_ulong(&ci->ci_flags, CPUF_FPUSAVE); + splx(s); #endif return; } @@ -1792,25 +1802,35 @@ fpusave_proc(struct proc *p, int save) struct cpu_info *oci; #if defined(MULTIPROCESSOR) u_long ipi = save ? ALPHA_IPI_SYNCH_FPU : ALPHA_IPI_DISCARD_FPU; - int spincount; + int s, spincount; #endif KDASSERT(p->p_addr != NULL); +#if defined(MULTIPROCESSOR) + /* Need to block IPIs */ + s = splhigh(); +#endif + oci = p->p_addr->u_pcb.pcb_fpcpu; if (oci == NULL) { +#if defined(MULTIPROCESSOR) + splx(s); +#endif return; } #if defined(MULTIPROCESSOR) if (oci == ci) { KASSERT(ci->ci_fpcurproc == p); + splx(s); fpusave_cpu(ci, save); return; } KASSERT(oci->ci_fpcurproc == p); alpha_send_ipi(oci->ci_cpuid, ipi); + splx(s); spincount = 0; while (p->p_addr->u_pcb.pcb_fpcpu != NULL) { @@ -1871,7 +1891,7 @@ delay(n) * the usec counter. */ cycles += curcycle; - while (cycles > cycles_per_usec) { + while (cycles >= cycles_per_usec) { usec++; cycles -= cycles_per_usec; } diff --git a/sys/arch/alpha/alpha/multiproc.s b/sys/arch/alpha/alpha/multiproc.s index 917d212ff6c..053f9007773 100644 --- a/sys/arch/alpha/alpha/multiproc.s +++ b/sys/arch/alpha/alpha/multiproc.s @@ -1,4 +1,4 @@ -/* $OpenBSD: multiproc.s,v 1.4 2008/06/26 05:42:08 ray Exp $ */ +/* $OpenBSD: multiproc.s,v 1.5 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: multiproc.s,v 1.5 1999/12/16 20:17:23 thorpej Exp $ */ /*- @@ -55,19 +55,27 @@ NESTED_NOPROFILE(cpu_spinup_trampoline,0,0,ra,0,0) br pv, 1f /* compute new GP */ 1: LDGP(pv) - /* Invalidate TLB and I-stream. */ - ldiq a0, -2 /* TBIA */ - call_pal PAL_OSF1_tbi - call_pal PAL_imb - - /* Load KGP with current GP. */ + /* Write new KGP. */ mov gp, a0 call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */ - /* Restore argument and write it in SysValue. */ + /* Store our CPU info in SysValue. */ mov s0, a0 call_pal PAL_OSF1_wrval + /* Switch to this CPU's idle thread. */ + ldq a0, CPU_INFO_IDLE_PCB_PADDR(s0) + SWITCH_CONTEXT + + /* Invalidate TLB and I-stream. */ + ldiq a0, -2 /* TBIA */ + call_pal PAL_OSF1_tbi + call_pal PAL_imb + + /* Make sure the FPU is turned off. */ + mov zero, a0 + call_pal PAL_OSF1_wrfen + /* Restore argument and call cpu_hatch() */ mov s0, a0 CALL(cpu_hatch) diff --git a/sys/arch/alpha/alpha/mutex.c b/sys/arch/alpha/alpha/mutex.c index 1ced8104d95..57d07f5ac52 100644 --- a/sys/arch/alpha/alpha/mutex.c +++ b/sys/arch/alpha/alpha/mutex.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.c,v 1.7 2011/04/21 04:34:12 miod Exp $ */ +/* $OpenBSD: mutex.c,v 1.8 2014/01/26 17:40:09 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -31,18 +31,40 @@ #include <machine/intr.h> +static inline int +try_lock(struct mutex *mtx) +{ #ifdef MULTIPROCESSOR -#error This code needs more work + unsigned long t0, v0; + + __asm __volatile( + "1: ldl_l %0, %3 \n" /* t0 = mtx->mtx_lock */ + " bne %0, 2f \n" + " bis $31, 1, %0 \n" /* t0 = 1 */ + " stl_c %0, %2 \n" /* mtx->mtx_lock = 1 */ + " beq %0, 3f \n" + " mb \n" + " bis $31, 1, %1 \n" /* v0 = 1 */ + " br 4f \n" + "2: bis $31, $31, %1 \n" /* v0 = 0 */ + " br 4f \n" + "3: br 1b \n" /* update failed */ + "4: \n" + : "=&r" (t0), "=r" (v0), "=m" (mtx->mtx_lock) + : "m" (mtx->mtx_lock) + : "memory"); + + return (v0 != 0); +#else + mtx->mtx_lock = 1; + return 1; #endif +} -/* - * Single processor systems don't need any mutexes, but they need the spl - * raising semantics of the mutexes. - */ void mtx_init(struct mutex *mtx, int wantipl) { - mtx->mtx_oldipl = 0; + mtx->mtx_oldipl = IPL_NONE; mtx->mtx_wantipl = wantipl; mtx->mtx_lock = 0; } @@ -50,37 +72,61 @@ mtx_init(struct mutex *mtx, int wantipl) void mtx_enter(struct mutex *mtx) { - if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = _splraise(mtx->mtx_wantipl); - MUTEX_ASSERT_UNLOCKED(mtx); - mtx->mtx_lock = 1; + int s; + + for (;;) { + if (mtx->mtx_wantipl != IPL_NONE) + s = _splraise(mtx->mtx_wantipl); + if (try_lock(mtx)) { + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; + mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + curcpu()->ci_mutex_level++; #endif + return; + } + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + } } int mtx_enter_try(struct mutex *mtx) { + int s; + if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = _splraise(mtx->mtx_wantipl); - MUTEX_ASSERT_UNLOCKED(mtx); - mtx->mtx_lock = 1; + s = _splraise(mtx->mtx_wantipl); + if (try_lock(mtx)) { + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; + mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + curcpu()->ci_mutex_level++; #endif - - return 1; + return 1; + } + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + return 0; } void mtx_leave(struct mutex *mtx) { + int s; + MUTEX_ASSERT_LOCKED(mtx); - mtx->mtx_lock = 0; #ifdef DIAGNOSTIC curcpu()->ci_mutex_level--; #endif + s = mtx->mtx_oldipl; + mtx->mtx_owner = NULL; + mtx->mtx_lock = 0; +#ifdef MULTIPROCESSOR + alpha_wmb(); +#endif if (mtx->mtx_wantipl != IPL_NONE) - splx(mtx->mtx_oldipl); + splx(s); } diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c index cc7c6f82a4e..c64cc367e10 100644 --- a/sys/arch/alpha/alpha/pmap.c +++ b/sys/arch/alpha/alpha/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.69 2014/01/06 20:27:44 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.70 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */ /*- @@ -140,6 +140,7 @@ #include <sys/pool.h> #include <sys/user.h> #include <sys/buf.h> +#include <sys/mutex.h> #ifdef SYSVSHM #include <sys/shm.h> #endif @@ -210,9 +211,9 @@ pt_entry_t *kernel_lev1map; */ pt_entry_t *VPT; -struct pmap kernel_pmap_store; -u_int kernel_pmap_asn_store[ALPHA_MAXPROCS]; -u_long kernel_pmap_asngen_store[ALPHA_MAXPROCS]; +struct pmap kernel_pmap_store + [(PMAP_SIZEOF(ALPHA_MAXPROCS) + sizeof(struct pmap) - 1) + / sizeof(struct pmap)]; paddr_t avail_start; /* PA of first available physical page */ paddr_t avail_end; /* PA of last available physical page */ @@ -246,8 +247,6 @@ TAILQ_HEAD(, pmap) pmap_all_pmaps; */ struct pool pmap_pmap_pool; struct pool pmap_l1pt_pool; -struct pool pmap_asn_pool; -struct pool pmap_asngen_pool; struct pool pmap_pv_pool; /* @@ -307,9 +306,9 @@ struct pool pmap_pv_pool; * the ASN generation in this particular case) to keep the logic sane * in other parts of the code. */ -u_int pmap_max_asn; /* max ASN supported by the system */ -u_int pmap_next_asn[ALPHA_MAXPROCS]; /* next free ASN to use */ -u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */ +u_int pmap_max_asn; /* max ASN supported by the system */ + /* next ASN and current ASN generation */ +struct pmap_asn_info pmap_asn_info[ALPHA_MAXPROCS]; /* * Locking: @@ -328,13 +327,25 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */ * memory allocation *must* be blocked while this lock is * asserted. * - * * pmap_all_pmaps_slock - This lock protects the global list of + * * pmap_all_pmaps_mtx - This lock protects the global list of * all pmaps. Note that a pm_slock must never be held while this * lock is held. * - * * pmap_growkernel_slock - This lock protects pmap_growkernel() + * * pmap_growkernel_mtx - This lock protects pmap_growkernel() * and the pmap_maxkvaddr variable. * + * There is a lock ordering constraint for pmap_growkernel_mtx. + * pmap_growkernel() acquires the locks in the following order: + * + * pmap_growkernel_mtx -> pmap_all_pmaps_mtx -> + * pmap->pm_slock + * + * But pmap_lev1map_create() is called with pmap->pm_slock held, + * and also needs to acquire the pmap_growkernel_mtx. So, + * we require that the caller of pmap_lev1map_create() (currently, + * the only caller is pmap_enter()) acquire pmap_growkernel_mtx + * before acquiring pmap->pm_slock. + * * Address space number management (global ASN counters and per-pmap * ASN state) are not locked; they use arrays of values indexed * per-processor. @@ -343,8 +354,8 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */ * with the pmap already locked by the caller (which will be * an interface function). */ -struct simplelock pmap_all_pmaps_slock; -struct simplelock pmap_growkernel_slock; +struct mutex pmap_all_pmaps_mtx; +struct mutex pmap_growkernel_mtx; #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */ #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */ @@ -378,26 +389,19 @@ struct pmap_tlb_shootdown_q { TAILQ_HEAD(, pmap_tlb_shootdown_job) pq_head; int pq_pte; /* aggregate PTE bits */ int pq_count; /* number of pending requests */ - struct simplelock pq_slock; /* spin lock on queue */ + int pq_tbia; /* pending global flush */ + struct mutex pq_mtx; /* queue lock */ } pmap_tlb_shootdown_q[ALPHA_MAXPROCS]; -#define PSJQ_LOCK(pq, s) \ -do { \ - s = splvm(); \ - simple_lock(&(pq)->pq_slock); \ -} while (0) - -#define PSJQ_UNLOCK(pq, s) \ -do { \ - simple_unlock(&(pq)->pq_slock); \ - splx(s); \ -} while (0) +#define PSJQ_LOCK(pq, s) mtx_enter(&(pq)->pq_mtx) +#define PSJQ_UNLOCK(pq, s) mtx_leave(&(pq)->pq_mtx) /* If we have more pending jobs than this, we just nail the whole TLB. */ #define PMAP_TLB_SHOOTDOWN_MAXJOBS 6 struct pool pmap_tlb_shootdown_job_pool; +void pmap_tlb_shootdown_q_drain(struct pmap_tlb_shootdown_q *); struct pmap_tlb_shootdown_job *pmap_tlb_shootdown_job_get (struct pmap_tlb_shootdown_q *); void pmap_tlb_shootdown_job_put(struct pmap_tlb_shootdown_q *, @@ -419,12 +423,12 @@ void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, cpuid_t); * PT page management functions. */ int pmap_lev1map_create(pmap_t, cpuid_t); -void pmap_lev1map_destroy(pmap_t, cpuid_t); +void pmap_lev1map_destroy(pmap_t); int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int); void pmap_ptpage_free(pmap_t, pt_entry_t *); void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, cpuid_t); -void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, cpuid_t); -void pmap_l1pt_delref(pmap_t, pt_entry_t *, cpuid_t); +void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *); +void pmap_l1pt_delref(pmap_t, pt_entry_t *); void *pmap_l1pt_alloc(struct pool *, int, int *); void pmap_l1pt_free(struct pool *, void *); @@ -443,9 +447,11 @@ int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, boolean_t); void *pmap_pv_page_alloc(struct pool *, int, int *); void pmap_pv_page_free(struct pool *, void *); -struct pool_allocator pmap_pv_allocator = { + +struct pool_allocator pmap_pv_page_allocator = { pmap_pv_page_alloc, pmap_pv_page_free, 0, }; + #ifdef DEBUG void pmap_pv_dump(paddr_t); #endif @@ -501,6 +507,9 @@ int pmap_physpage_delref(void *); #ifdef DEBUG #define PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id) \ do { \ + struct pmap_asn_info *__pma = &(pmap)->pm_asni[(cpu_id)]; \ + struct pmap_asn_info *__cpma = &pmap_asn_info[(cpu_id)]; \ + \ if ((pmap)->pm_lev1map == kernel_lev1map) { \ /* \ * This pmap implementation also ensures that pmaps \ @@ -508,25 +517,24 @@ do { \ * ASN to prevent the PALcode from servicing a TLB \ * miss with the wrong PTE. \ */ \ - if ((pmap)->pm_asn[(cpu_id)] != PMAP_ASN_RESERVED) { \ + if (__pma->pma_asn != PMAP_ASN_RESERVED) { \ printf("kernel_lev1map with non-reserved ASN " \ "(line %d)\n", __LINE__); \ panic("PMAP_ACTIVATE_ASN_SANITY"); \ } \ } else { \ - if ((pmap)->pm_asngen[(cpu_id)] != \ - pmap_asn_generation[(cpu_id)]) { \ + if (__pma->pma_asngen != __cpma->pma_asngen) { \ /* \ * ASN generation number isn't valid! \ */ \ printf("pmap asngen %lu, current %lu " \ "(line %d)\n", \ - (pmap)->pm_asngen[(cpu_id)], \ - pmap_asn_generation[(cpu_id)], \ + __pma->pma_asngen, \ + __cpma->pma_asngen, \ __LINE__); \ panic("PMAP_ACTIVATE_ASN_SANITY"); \ } \ - if ((pmap)->pm_asn[(cpu_id)] == PMAP_ASN_RESERVED) { \ + if (__pma->pma_asn == PMAP_ASN_RESERVED) { \ /* \ * DANGER WILL ROBINSON! We're going to \ * pollute the VPT TLB entries! \ @@ -557,7 +565,8 @@ do { \ \ (p)->p_addr->u_pcb.pcb_hw.apcb_ptbr = \ ALPHA_K0SEG_TO_PHYS((vaddr_t)(pmap)->pm_lev1map) >> PGSHIFT; \ - (p)->p_addr->u_pcb.pcb_hw.apcb_asn = (pmap)->pm_asn[(cpu_id)]; \ + (p)->p_addr->u_pcb.pcb_hw.apcb_asn = \ + (pmap)->pm_asni[(cpu_id)].pma_asn; \ \ if ((p) == curproc) { \ /* \ @@ -623,7 +632,7 @@ do { \ */ #define PMAP_INVALIDATE_ASN(pmap, cpu_id) \ do { \ - (pmap)->pm_asn[(cpu_id)] = PMAP_ASN_RESERVED; \ + (pmap)->pm_asni[(cpu_id)].pma_asn = PMAP_ASN_RESERVED; \ } while (0) /* @@ -639,8 +648,8 @@ do { \ * works in this case. \ */ \ ALPHA_TBIS((va)); \ - } else if ((pmap)->pm_asngen[(cpu_id)] == \ - pmap_asn_generation[(cpu_id)]) { \ + } else if ((pmap)->pm_asni[(cpu_id)].pma_asngen == \ + pmap_asn_info[(cpu_id)].pma_asngen) { \ /* \ * We can't directly invalidate the TLB entry \ * in this case, so we have to force allocation \ @@ -816,8 +825,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) (i*PAGE_SIZE*NPTEPG))] = pte; } - /* Initialize the pmap_growkernel_slock. */ - simple_lock_init(&pmap_growkernel_slock); + /* Initialize the pmap_growkernel_mtx. */ + mtx_init(&pmap_growkernel_mtx, IPL_NONE); /* * Set up level three page table (lev3map) @@ -843,16 +852,12 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) * Initialize the pmap pools and list. */ pmap_ncpuids = ncpuids; - pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", + pool_init(&pmap_pmap_pool, PMAP_SIZEOF(pmap_ncpuids), 0, 0, 0, "pmappl", &pool_allocator_nointr); pool_init(&pmap_l1pt_pool, PAGE_SIZE, 0, 0, 0, "l1ptpl", &pmap_l1pt_allocator); - pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0, - "pmasnpl", &pool_allocator_nointr); - pool_init(&pmap_asngen_pool, pmap_ncpuids * sizeof(u_long), 0, 0, 0, - "pmasngenpl", &pool_allocator_nointr); pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", - &pmap_pv_allocator); + &pmap_pv_page_allocator); TAILQ_INIT(&pmap_all_pmaps); @@ -861,14 +866,14 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) */ pmap_max_asn = maxasn; for (i = 0; i < ALPHA_MAXPROCS; i++) { - pmap_next_asn[i] = 1; - pmap_asn_generation[i] = 0; + pmap_asn_info[i].pma_asn = 1; + pmap_asn_info[i].pma_asngen = 0; } /* * Initialize the locks. */ - simple_lock_init(&pmap_all_pmaps_slock); + mtx_init(&pmap_all_pmaps_mtx, IPL_NONE); /* * Initialize kernel pmap. Note that all kernel mappings @@ -877,14 +882,13 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) * references kernel_lev1map, it always has an invalid ASN * generation. */ - memset(pmap_kernel(), 0, sizeof(struct pmap)); + memset(pmap_kernel(), 0, sizeof(pmap_kernel())); pmap_kernel()->pm_lev1map = kernel_lev1map; pmap_kernel()->pm_count = 1; - pmap_kernel()->pm_asn = kernel_pmap_asn_store; - pmap_kernel()->pm_asngen = kernel_pmap_asngen_store; for (i = 0; i < ALPHA_MAXPROCS; i++) { - pmap_kernel()->pm_asn[i] = PMAP_ASN_RESERVED; - pmap_kernel()->pm_asngen[i] = pmap_asn_generation[i]; + pmap_kernel()->pm_asni[i].pma_asn = PMAP_ASN_RESERVED; + pmap_kernel()->pm_asni[i].pma_asngen = + pmap_asn_info[i].pma_asngen; } simple_lock_init(&pmap_kernel()->pm_slock); TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); @@ -894,11 +898,11 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) * Initialize the TLB shootdown queues. */ pool_init(&pmap_tlb_shootdown_job_pool, - sizeof(struct pmap_tlb_shootdown_job), 0, 0, 0, "pmaptlbpl", - NULL); + sizeof(struct pmap_tlb_shootdown_job), 0, 0, 0, "pmaptlbpl", NULL); + pool_setipl(&pmap_tlb_shootdown_job_pool, IPL_IPI); for (i = 0; i < ALPHA_MAXPROCS; i++) { TAILQ_INIT(&pmap_tlb_shootdown_q[i].pq_head); - simple_lock_init(&pmap_tlb_shootdown_q[i].pq_slock); + mtx_init(&pmap_tlb_shootdown_q[i].pq_mtx, IPL_IPI); } #endif @@ -909,7 +913,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; proc0.p_addr->u_pcb.pcb_hw.apcb_asn = - pmap_kernel()->pm_asn[cpu_number()]; + pmap_kernel()->pm_asni[cpu_number()].pma_asn; /* * Mark the kernel pmap `active' on this processor. @@ -971,7 +975,6 @@ pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) if (uvm.page_init_done == TRUE) panic("pmap_steal_memory: called _after_ bootstrap"); - #if 0 printf(" bank %d: avail_start 0x%lx, start 0x%lx, " "avail_end 0x%lx\n", bank, vm_physmem[bank].avail_start, @@ -1099,27 +1102,26 @@ pmap_create(void) pmap = pool_get(&pmap_pmap_pool, PR_WAITOK|PR_ZERO); - pmap->pm_asn = pool_get(&pmap_asn_pool, PR_WAITOK); - pmap->pm_asngen = pool_get(&pmap_asngen_pool, PR_WAITOK); - - /* - * Defer allocation of a new level 1 page table until - * the first new mapping is entered; just take a reference - * to the kernel kernel_lev1map. - */ - pmap->pm_lev1map = kernel_lev1map; - pmap->pm_count = 1; for (i = 0; i < pmap_ncpuids; i++) { - pmap->pm_asn[i] = PMAP_ASN_RESERVED; + pmap->pm_asni[i].pma_asn = PMAP_ASN_RESERVED; /* XXX Locking? */ - pmap->pm_asngen[i] = pmap_asn_generation[i]; + pmap->pm_asni[i].pma_asngen = pmap_asn_info[i].pma_asngen; } simple_lock_init(&pmap->pm_slock); - simple_lock(&pmap_all_pmaps_slock); + for (;;) { + mtx_enter(&pmap_growkernel_mtx); + i = pmap_lev1map_create(pmap, cpu_number()); + mtx_leave(&pmap_growkernel_mtx); + if (i == 0) + break; + uvm_wait(__func__); + } + + mtx_enter(&pmap_all_pmaps_mtx); TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list); - simple_unlock(&pmap_all_pmaps_slock); + mtx_leave(&pmap_all_pmaps_mtx); return (pmap); } @@ -1139,8 +1141,6 @@ pmap_destroy(pmap_t pmap) if (pmapdebug & PDB_FOLLOW) printf("pmap_destroy(%p)\n", pmap); #endif - if (pmap == NULL) - return; PMAP_LOCK(pmap); refs = --pmap->pm_count; @@ -1152,32 +1152,14 @@ pmap_destroy(pmap_t pmap) /* * Remove it from the global list of all pmaps. */ - simple_lock(&pmap_all_pmaps_slock); + mtx_enter(&pmap_all_pmaps_mtx); TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list); - simple_unlock(&pmap_all_pmaps_slock); + mtx_leave(&pmap_all_pmaps_mtx); -#ifdef DIAGNOSTIC - /* - * Since the pmap is supposed to contain no valid - * mappings at this point, this should never happen. - */ - if (pmap->pm_lev1map != kernel_lev1map) { - printf("pmap_destroy: pmap still contains valid mappings!\n"); - if (pmap->pm_nlev2) - printf("pmap_destroy: %ld level 2 tables left\n", - pmap->pm_nlev2); - if (pmap->pm_nlev3) - printf("pmap_destroy: %ld level 3 tables left\n", - pmap->pm_nlev3); - pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS); - pmap_update(pmap); - if (pmap->pm_lev1map != kernel_lev1map) - panic("pmap_destroy: pmap_remove() didn't"); - } -#endif + mtx_enter(&pmap_growkernel_mtx); + pmap_lev1map_destroy(pmap); + mtx_leave(&pmap_growkernel_mtx); - pool_put(&pmap_asn_pool, pmap->pm_asn); - pool_put(&pmap_asngen_pool, pmap->pm_asngen); pool_put(&pmap_pmap_pool, pmap); } @@ -1194,11 +1176,10 @@ pmap_reference(pmap_t pmap) if (pmapdebug & PDB_FOLLOW) printf("pmap_reference(%p)\n", pmap); #endif - if (pmap != NULL) { - PMAP_LOCK(pmap); - pmap->pm_count++; - PMAP_UNLOCK(pmap); - } + + PMAP_LOCK(pmap); + pmap->pm_count++; + PMAP_UNLOCK(pmap); } /* @@ -1243,9 +1224,6 @@ pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, boolean_t dowired) printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); #endif - if (pmap == NULL) - return; - /* * If this is the kernel pmap, we can use a faster method * for accessing the PTEs (since the PT pages are always @@ -1366,7 +1344,7 @@ pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, boolean_t dowired) * Remove the reference to the L2 table that we * added above. This may free the L2 table. */ - pmap_l2pt_delref(pmap, l1pte, saved_l2pte, cpu_id); + pmap_l2pt_delref(pmap, l1pte, saved_l2pte); } } @@ -1374,7 +1352,7 @@ pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, boolean_t dowired) * Remove the reference to the L1 table that we added above. * This may free the L1 table. */ - pmap_l1pt_delref(pmap, saved_l1pte, cpu_id); + pmap_l1pt_delref(pmap, saved_l1pte); if (needisync) PMAP_SYNC_ISTREAM_USER(pmap); @@ -1397,6 +1375,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) pv_entry_t pv, nextpv; boolean_t needkisync = FALSE; cpuid_t cpu_id = cpu_number(); + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || @@ -1413,8 +1392,20 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) case VM_PROT_READ|VM_PROT_EXECUTE: case VM_PROT_READ: PMAP_HEAD_TO_MAP_LOCK(); -/* XXX */ pmap_changebit(pg, 0, ~(PG_KWE | PG_UWE), cpu_id); + for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) { + PMAP_LOCK(pv->pv_pmap); + if (*pv->pv_pte & (PG_KWE | PG_UWE)) { + *pv->pv_pte &= ~(PG_KWE | PG_UWE); + PMAP_INVALIDATE_TLB(pv->pv_pmap, pv->pv_va, + pmap_pte_asm(pv->pv_pte), + PMAP_ISACTIVE(pv->pv_pmap, cpu_id), cpu_id); + PMAP_TLB_SHOOTDOWN(pv->pv_pmap, pv->pv_va, + pmap_pte_asm(pv->pv_pte)); + } + PMAP_UNLOCK(pv->pv_pmap); + } PMAP_HEAD_TO_MAP_UNLOCK(); + PMAP_TLB_SHOOTNOW(); return; /* remove_all */ @@ -1433,25 +1424,13 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot) pmap_pte_pa(pv->pv_pte) != pa) panic("pmap_page_protect: bad mapping"); #endif - if (pmap_pte_w(pv->pv_pte) == 0) { - if (pmap_remove_mapping(pmap, pv->pv_va, pv->pv_pte, - FALSE, cpu_id) == TRUE) { - if (pmap == pmap_kernel()) - needkisync |= TRUE; - else - PMAP_SYNC_ISTREAM_USER(pmap); - } - } -#ifdef DEBUG - else { - if (pmapdebug & PDB_PARANOIA) { - printf("%s wired mapping for %lx not removed\n", - "pmap_page_protect:", pa); - printf("vm wire count %d\n", - PHYS_TO_VM_PAGE(pa)->wire_count); - } + if (pmap_remove_mapping(pmap, pv->pv_va, pv->pv_pte, + FALSE, cpu_id) == TRUE) { + if (pmap == pmap_kernel()) + needkisync |= TRUE; + else + PMAP_SYNC_ISTREAM_USER(pmap); } -#endif PMAP_UNLOCK(pmap); } @@ -1475,6 +1454,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) boolean_t hadasm; vaddr_t l1eva, l2eva; cpuid_t cpu_id = cpu_number(); + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) @@ -1482,17 +1462,11 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) pmap, sva, eva, prot); #endif - if (pmap == NULL) - return; - if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } - if (prot & VM_PROT_WRITE) - return; - PMAP_LOCK(pmap); bits = pte_prot(pmap, prot); @@ -1501,36 +1475,35 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) l1pte = pmap_l1pte(pmap, sva); for (; sva < eva; sva = l1eva, l1pte++) { l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; - if (pmap_pte_v(l1pte)) { - l2pte = pmap_l2pte(pmap, sva, l1pte); - for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { - l2eva = - alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; - if (pmap_pte_v(l2pte)) { - l3pte = pmap_l3pte(pmap, sva, l2pte); - for (; sva < l2eva && sva < eva; - sva += PAGE_SIZE, l3pte++) { - if (pmap_pte_v(l3pte) && - pmap_pte_prot_chg(l3pte, - bits)) { - hadasm = - (pmap_pte_asm(l3pte) - != 0); - pmap_pte_set_prot(l3pte, - bits); - PMAP_INVALIDATE_TLB( - pmap, sva, hadasm, - isactive, cpu_id); - PMAP_TLB_SHOOTDOWN( - pmap, sva, - hadasm ? PG_ASM : 0); - } - } + if (!pmap_pte_v(l1pte)) + continue; + + l2pte = pmap_l2pte(pmap, sva, l1pte); + for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { + l2eva = alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; + if (!pmap_pte_v(l2pte)) + continue; + + l3pte = pmap_l3pte(pmap, sva, l2pte); + for (; sva < l2eva && sva < eva; + sva += PAGE_SIZE, l3pte++) { + if (!pmap_pte_v(l3pte)) + continue; + + if (pmap_pte_prot_chg(l3pte, bits)) { + hadasm = (pmap_pte_asm(l3pte) != 0); + pmap_pte_set_prot(l3pte, bits); + PMAP_INVALIDATE_TLB(pmap, sva, hadasm, + isactive, cpu_id); + PMAP_TLB_SHOOTDOWN(pmap, sva, + hadasm ? PG_ASM : 0); } } } } + PMAP_TLB_SHOOTNOW(); + if (prot & VM_PROT_EXECUTE) PMAP_SYNC_ISTREAM(pmap); @@ -1565,6 +1538,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) boolean_t wired; cpuid_t cpu_id = cpu_number(); int error = 0; + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) @@ -1613,20 +1587,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) panic("pmap_enter: user pmap, invalid va 0x%lx", va); #endif - /* - * If we're still referencing the kernel kernel_lev1map, - * create a new level 1 page table. A reference will be - * added to the level 1 table when the level 2 table is - * created. - */ - if (pmap->pm_lev1map == kernel_lev1map) { - error = pmap_lev1map_create(pmap, cpu_id); - if (error) { - if (flags & PMAP_CANFAIL) - goto out; - panic("pmap_enter: unable to create lev1map"); - } - } + KASSERT(pmap->pm_lev1map != kernel_lev1map); /* * Check to see if the level 1 PTE is valid, and @@ -1639,13 +1600,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) pmap_physpage_addref(l1pte); error = pmap_ptpage_alloc(pmap, l1pte, PGU_L2PT); if (error) { - pmap_l1pt_delref(pmap, l1pte, cpu_id); + pmap_l1pt_delref(pmap, l1pte); if (flags & PMAP_CANFAIL) goto out; panic("pmap_enter: unable to create L2 PT " "page"); } - pmap->pm_nlev2++; #ifdef DEBUG if (pmapdebug & PDB_PTPAGE) printf("pmap_enter: new level 2 table at " @@ -1664,13 +1624,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) pmap_physpage_addref(l2pte); error = pmap_ptpage_alloc(pmap, l2pte, PGU_L3PT); if (error) { - pmap_l2pt_delref(pmap, l1pte, l2pte, cpu_id); + pmap_l2pt_delref(pmap, l1pte, l2pte); if (flags & PMAP_CANFAIL) goto out; panic("pmap_enter: unable to create L3 PT " "page"); } - pmap->pm_nlev3++; #ifdef DEBUG if (pmapdebug & PDB_PTPAGE) printf("pmap_enter: new level 3 table at " @@ -1842,6 +1801,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if (tflush) { PMAP_INVALIDATE_TLB(pmap, va, hadasm, isactive, cpu_id); PMAP_TLB_SHOOTDOWN(pmap, va, hadasm ? PG_ASM : 0); + PMAP_TLB_SHOOTNOW(); } if (setisync) PMAP_SET_NEEDISYNC(pmap); @@ -1870,6 +1830,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) cpuid_t cpu_id = cpu_number(); boolean_t needisync = FALSE; pmap_t pmap = pmap_kernel(); + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) @@ -1915,6 +1876,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) */ PMAP_INVALIDATE_TLB(pmap, va, TRUE, TRUE, cpu_id); PMAP_TLB_SHOOTDOWN(pmap, va, PG_ASM); + PMAP_TLB_SHOOTNOW(); if (needisync) PMAP_SYNC_ISTREAM_KERNEL(); @@ -1923,8 +1885,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) /* * pmap_kremove: [ INTERFACE ] * - * Remove a mapping entered with pmap_kenter_pa() - * starting at va, for size bytes (assumed to be page rounded). + * Remove a mapping entered with pmap_kenter_pa() starting at va, + * for size bytes (assumed to be page rounded). */ void pmap_kremove(vaddr_t va, vsize_t size) @@ -1933,6 +1895,7 @@ pmap_kremove(vaddr_t va, vsize_t size) boolean_t needisync = FALSE; cpuid_t cpu_id = cpu_number(); pmap_t pmap = pmap_kernel(); + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) @@ -1970,6 +1933,8 @@ pmap_kremove(vaddr_t va, vsize_t size) } } + PMAP_TLB_SHOOTNOW(); + if (needisync) PMAP_SYNC_ISTREAM_KERNEL(); } @@ -1990,8 +1955,6 @@ pmap_unwire(pmap_t pmap, vaddr_t va) if (pmapdebug & PDB_FOLLOW) printf("pmap_unwire(%p, %lx)\n", pmap, va); #endif - if (pmap == NULL) - return; PMAP_LOCK(pmap); @@ -2096,16 +2059,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) * * This routine is only advisory and need not do anything. */ -void -pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, - vaddr_t src_addr) -{ -#ifdef DEBUG - if (pmapdebug & PDB_FOLLOW) - printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n", - dst_pmap, src_pmap, dst_addr, len, src_addr); -#endif -} +/* call deleted in <machine/pmap.h> */ /* * pmap_collect: [ INTERFACE ] @@ -2164,21 +2118,13 @@ pmap_activate(struct proc *p) printf("pmap_activate(%p)\n", p); #endif - /* - * Mark the pmap in use by this processor. - */ + /* Mark the pmap in use by this processor. */ atomic_setbits_ulong(&pmap->pm_cpus, (1UL << cpu_id)); - PMAP_LOCK(pmap); - - /* - * Allocate an ASN. - */ + /* Allocate an ASN. */ pmap_asn_alloc(pmap, cpu_id); PMAP_ACTIVATE(pmap, p, cpu_id); - - PMAP_UNLOCK(pmap); } /* @@ -2228,6 +2174,7 @@ pmap_zero_page(struct vm_page *pg) #endif p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys); + p1 = NULL; pend = (u_long *)((u_long)p0 + PAGE_SIZE); /* @@ -2454,6 +2401,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, boolean_t hadasm; boolean_t isactive; boolean_t needisync = FALSE; + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) @@ -2508,6 +2456,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, PMAP_INVALIDATE_TLB(pmap, va, hadasm, isactive, cpu_id); PMAP_TLB_SHOOTDOWN(pmap, va, hadasm ? PG_ASM : 0); + PMAP_TLB_SHOOTNOW(); /* * If we're removing a user mapping, check to see if we @@ -2555,6 +2504,7 @@ pmap_changebit(struct vm_page *pg, u_long set, u_long mask, cpuid_t cpu_id) pt_entry_t *pte, npte; vaddr_t va; boolean_t hadasm, isactive; + PMAP_TLB_SHOOTDOWN_CPUSET_DECL #ifdef DEBUG if (pmapdebug & PDB_BITS) @@ -2583,6 +2533,8 @@ pmap_changebit(struct vm_page *pg, u_long set, u_long mask, cpuid_t cpu_id) } PMAP_UNLOCK(pv->pv_pmap); } + + PMAP_TLB_SHOOTNOW(); } /* @@ -2649,11 +2601,6 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type) #ifdef DEBUG /* These checks are more expensive */ if (!pmap_pte_v(pte)) panic("pmap_emulate_reference: invalid pte"); -#if 0 - /* - * Can't do these, because cpu_fork call pmap_emulate_reference(), - * and the bits aren't guaranteed, for them... - */ if (type == ALPHA_MMCSR_FOW) { if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE))) panic("pmap_emulate_reference: write but unwritable"); @@ -2665,7 +2612,6 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type) if (!(*pte & (PG_FOR | PG_FOE))) panic("pmap_emulate_reference: !write but not FOR|FOE"); } -#endif /* Other diagnostics? */ #endif pa = pmap_pte_pa(pte); @@ -2686,7 +2632,8 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type) #ifdef DIAGNOSTIC if (pg == NULL) - panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", p, v, user, type, pa); + panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): " + "pa 0x%lx not managed", p, v, user, type, pa); #endif /* @@ -2728,9 +2675,6 @@ pmap_pv_dump(paddr_t pa) { struct vm_page *pg; pv_entry_t pv; - static const char *usage[] = { - "normal", "pvent", "l1pt", "l2pt", "l3pt", - }; pg = PHYS_TO_VM_PAGE(pa); @@ -2996,14 +2940,13 @@ pmap_growkernel(vaddr_t maxkvaddr) paddr_t ptaddr; pt_entry_t *l1pte, *l2pte, pte; vaddr_t va; - int s, l1idx; + int l1idx; + + mtx_enter(&pmap_growkernel_mtx); if (maxkvaddr <= pmap_maxkvaddr) goto out; /* we are OK */ - s = splhigh(); /* to be safe */ - simple_lock(&pmap_growkernel_slock); - va = pmap_maxkvaddr; while (va < maxkvaddr) { @@ -3035,7 +2978,7 @@ pmap_growkernel(vaddr_t maxkvaddr) l1idx = l1pte_index(va); /* Update all the user pmaps. */ - simple_lock(&pmap_all_pmaps_slock); + mtx_enter(&pmap_all_pmaps_mtx); for (pm = TAILQ_FIRST(&pmap_all_pmaps); pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { /* Skip the kernel pmap. */ @@ -3043,14 +2986,11 @@ pmap_growkernel(vaddr_t maxkvaddr) continue; PMAP_LOCK(pm); - if (pm->pm_lev1map == kernel_lev1map) { - PMAP_UNLOCK(pm); - continue; - } + KDASSERT(pm->pm_lev1map != kernel_lev1map); pm->pm_lev1map[l1idx] = pte; PMAP_UNLOCK(pm); } - simple_unlock(&pmap_all_pmaps_slock); + mtx_leave(&pmap_all_pmaps_mtx); } /* @@ -3078,10 +3018,9 @@ pmap_growkernel(vaddr_t maxkvaddr) pmap_maxkvaddr = va; - simple_unlock(&pmap_growkernel_slock); - splx(s); - out: + mtx_leave(&pmap_growkernel_mtx); + return (pmap_maxkvaddr); die: @@ -3093,42 +3032,25 @@ pmap_growkernel(vaddr_t maxkvaddr) * * Create a new level 1 page table for the specified pmap. * - * Note: the pmap must already be locked. + * Note: growkernel must already by held and the pmap either + * already locked or unreferenced globally. */ int pmap_lev1map_create(pmap_t pmap, cpuid_t cpu_id) { pt_entry_t *l1pt; -#ifdef DIAGNOSTIC - if (pmap == pmap_kernel()) - panic("pmap_lev1map_create: got kernel pmap"); - - if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED) - panic("pmap_lev1map_create: pmap uses non-reserved ASN"); -#endif - - simple_lock(&pmap_growkernel_slock); + KASSERT(pmap != pmap_kernel()); + KASSERT(pmap->pm_asni[cpu_id].pma_asn == PMAP_ASN_RESERVED); + /* Don't sleep -- we're called with locks held. */ l1pt = pool_get(&pmap_l1pt_pool, PR_NOWAIT); - if (l1pt == NULL) { - simple_unlock(&pmap_growkernel_slock); + if (l1pt == NULL) return (ENOMEM); - } pmap_l1pt_ctor(l1pt); pmap->pm_lev1map = l1pt; - simple_unlock(&pmap_growkernel_slock); - - /* - * The page table base has changed; if the pmap was active, - * reactivate it. - */ - if (PMAP_ISACTIVE(pmap, cpu_id)) { - pmap_asn_alloc(pmap, cpu_id); - PMAP_ACTIVATE(pmap, curproc, cpu_id); - } return (0); } @@ -3137,17 +3059,15 @@ pmap_lev1map_create(pmap_t pmap, cpuid_t cpu_id) * * Destroy the level 1 page table for the specified pmap. * - * Note: the pmap must already be locked. + * Note: growkernel must already by held and the pmap either + * already locked or unreferenced globally. */ void -pmap_lev1map_destroy(pmap_t pmap, cpuid_t cpu_id) +pmap_lev1map_destroy(pmap_t pmap) { pt_entry_t *l1pt = pmap->pm_lev1map; -#ifdef DIAGNOSTIC - if (pmap == pmap_kernel()) - panic("pmap_lev1map_destroy: got kernel pmap"); -#endif + KASSERT(pmap != pmap_kernel()); /* * Go back to referencing the global kernel_lev1map. @@ -3155,28 +3075,6 @@ pmap_lev1map_destroy(pmap_t pmap, cpuid_t cpu_id) pmap->pm_lev1map = kernel_lev1map; /* - * The page table base has changed; if the pmap was active, - * reactivate it. Note that allocation of a new ASN is - * not necessary here: - * - * (1) We've gotten here because we've deleted all - * user mappings in the pmap, invalidating the - * TLB entries for them as we go. - * - * (2) kernel_lev1map contains only kernel mappings, which - * were identical in the user pmap, and all of - * those mappings have PG_ASM, so the ASN doesn't - * matter. - * - * We do, however, ensure that the pmap is using the - * reserved ASN, to ensure that no two pmaps never have - * clashing TLB entries. - */ - PMAP_INVALIDATE_ASN(pmap, cpu_id); - if (PMAP_ISACTIVE(pmap, cpu_id)) - PMAP_ACTIVATE(pmap, curproc, cpu_id); - - /* * Free the old level 1 page table page. */ pool_put(&pmap_l1pt_pool, l1pt); @@ -3217,6 +3115,10 @@ pmap_l1pt_ctor(pt_entry_t *l1pt) * pmap_l1pt_alloc: * * Page allocator for L1 PT pages. + * + * Note: The growkernel lock is held accross allocations + * from this pool, so we don't need to acquire it + * ourselves. */ void * pmap_l1pt_alloc(struct pool *pp, int flags, int *slowdown) @@ -3312,6 +3214,7 @@ void pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, cpuid_t cpu_id) { pt_entry_t *l1pte, *l2pte; + PMAP_TLB_SHOOTDOWN_CPUSET_DECL l1pte = pmap_l1pte(pmap, va); l2pte = pmap_l2pte(pmap, va, l1pte); @@ -3331,7 +3234,6 @@ pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, cpuid_t cpu_id) "0x%lx\n", pmap_pte_pa(l2pte)); #endif pmap_ptpage_free(pmap, l2pte); - pmap->pm_nlev3--; /* * We've freed a level 3 table, so we must @@ -3346,12 +3248,13 @@ pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, cpuid_t cpu_id) PMAP_ISACTIVE(pmap, cpu_id), cpu_id); PMAP_TLB_SHOOTDOWN(pmap, (vaddr_t)(&VPT[VPT_INDEX(va)]), 0); + PMAP_TLB_SHOOTNOW(); /* * We've freed a level 3 table, so delete the reference * on the level 2 table. */ - pmap_l2pt_delref(pmap, l1pte, l2pte, cpu_id); + pmap_l2pt_delref(pmap, l1pte, l2pte); } } @@ -3364,15 +3267,9 @@ pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, cpuid_t cpu_id) * Note: the pmap must already be locked. */ void -pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, - cpuid_t cpu_id) +pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte) { - -#ifdef DIAGNOSTIC - if (pmap == pmap_kernel()) - panic("pmap_l2pt_delref: kernel pmap"); -#endif - + KASSERT(pmap != pmap_kernel()); if (pmap_physpage_delref(l2pte) == 0) { /* * No more mappings in this segment; we can free the @@ -3384,13 +3281,12 @@ pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, "0x%lx\n", pmap_pte_pa(l1pte)); #endif pmap_ptpage_free(pmap, l1pte); - pmap->pm_nlev2--; /* * We've freed a level 2 table, so delete the reference * on the level 1 table. */ - pmap_l1pt_delref(pmap, l1pte, cpu_id); + pmap_l1pt_delref(pmap, l1pte); } } @@ -3403,21 +3299,10 @@ pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, * Note: the pmap must already be locked. */ void -pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte, cpuid_t cpu_id) +pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte) { - -#ifdef DIAGNOSTIC - if (pmap == pmap_kernel()) - panic("pmap_l1pt_delref: kernel pmap"); -#endif - - if (pmap_physpage_delref(l1pte) == 0) { - /* - * No more level 2 tables left, go back to the global - * kernel_lev1map. - */ - pmap_lev1map_destroy(pmap, cpu_id); - } + KASSERT(pmap != pmap_kernel()); + pmap_physpage_delref(l1pte); } /******************** Address Space Number management ********************/ @@ -3427,11 +3312,15 @@ pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte, cpuid_t cpu_id) * * Allocate and assign an ASN to the specified pmap. * - * Note: the pmap must already be locked. + * Note: the pmap must already be locked. This may be called from + * an interprocessor interrupt, and in that case, the sender of + * the IPI has the pmap lock. */ void pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) { + struct pmap_asn_info *pma = &pmap->pm_asni[cpu_id]; + struct pmap_asn_info *cpma = &pmap_asn_info[cpu_id]; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) @@ -3444,6 +3333,11 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) * kernel mappings exist in that map, and all kernel mappings * have PG_ASM set. If the pmap eventually gets its own * lev1map, an ASN will be allocated at that time. + * + * Only the kernel pmap will reference kernel_lev1map. Do the + * same old fixups, but note that we no longer need the pmap + * to be locked if we're in this mode, since pm_lev1map will + * never change. */ if (pmap->pm_lev1map == kernel_lev1map) { #ifdef DEBUG @@ -3451,11 +3345,21 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) printf("pmap_asn_alloc: still references " "kernel_lev1map\n"); #endif -#ifdef DIAGNOSTIC - if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED) - panic("pmap_asn_alloc: kernel_lev1map without " - "PMAP_ASN_RESERVED"); -#endif +#if defined(MULTIPROCESSOR) + /* + * In a multiprocessor system, it's possible to + * get here without having PMAP_ASN_RESERVED in + * pmap->pm_asni[cpu_id].pma_asn; see pmap_lev1map_destroy(). + * + * So, what we do here, is simply assign the reserved + * ASN for kernel_lev1map users and let things + * continue on. We do, however, let uniprocessor + * configurations continue to make its assertion. + */ + pma->pma_asn = PMAP_ASN_RESERVED; +#else + KASSERT(pma->pma_asn == PMAP_ASN_RESERVED); +#endif /* MULTIPROCESSOR */ return; } @@ -3469,11 +3373,11 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) * Refresh the pmap's generation number, to * simplify logic elsewhere. */ - pmap->pm_asngen[cpu_id] = pmap_asn_generation[cpu_id]; + pma->pma_asngen = cpma->pma_asngen; #ifdef DEBUG if (pmapdebug & PDB_ASN) printf("pmap_asn_alloc: no ASNs, using asngen %lu\n", - pmap->pm_asngen[cpu_id]); + pma->pma_asngen); #endif return; } @@ -3481,15 +3385,15 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) /* * Hopefully, we can continue using the one we have... */ - if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED && - pmap->pm_asngen[cpu_id] == pmap_asn_generation[cpu_id]) { + if (pma->pma_asn != PMAP_ASN_RESERVED && + pma->pma_asngen == cpma->pma_asngen) { /* * ASN is still in the current generation; keep on using it. */ #ifdef DEBUG if (pmapdebug & PDB_ASN) printf("pmap_asn_alloc: same generation, keeping %u\n", - pmap->pm_asn[cpu_id]); + pma->pma_asn); #endif return; } @@ -3498,7 +3402,7 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) * Need to assign a new ASN. Grab the next one, incrementing * the generation number if we have to. */ - if (pmap_next_asn[cpu_id] > pmap_max_asn) { + if (cpma->pma_asn > pmap_max_asn) { /* * Invalidate all non-PG_ASM TLB entries and the * I-cache, and bump the generation number. @@ -3506,11 +3410,10 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) ALPHA_TBIAP(); alpha_pal_imb(); - pmap_next_asn[cpu_id] = 1; - - pmap_asn_generation[cpu_id]++; + cpma->pma_asn = 1; + cpma->pma_asngen++; #ifdef DIAGNOSTIC - if (pmap_asn_generation[cpu_id] == 0) { + if (cpma->pma_asngen == 0) { /* * The generation number has wrapped. We could * handle this scenario by traversing all of @@ -3533,20 +3436,20 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) #ifdef DEBUG if (pmapdebug & PDB_ASN) printf("pmap_asn_alloc: generation bumped to %lu\n", - pmap_asn_generation[cpu_id]); + cpma->pma_asngen); #endif } /* * Assign the new ASN and validate the generation number. */ - pmap->pm_asn[cpu_id] = pmap_next_asn[cpu_id]++; - pmap->pm_asngen[cpu_id] = pmap_asn_generation[cpu_id]; + pma->pma_asn = cpma->pma_asn++; + pma->pma_asngen = cpma->pma_asngen; #ifdef DEBUG if (pmapdebug & PDB_ASN) printf("pmap_asn_alloc: assigning %u to pmap %p\n", - pmap->pm_asn[cpu_id], pmap); + pma->pma_asn, pmap); #endif /* @@ -3563,48 +3466,97 @@ pmap_asn_alloc(pmap_t pmap, cpuid_t cpu_id) * pmap_tlb_shootdown: * * Cause the TLB entry for pmap/va to be shot down. + * + * NOTE: The pmap must be locked here. */ void -pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte) +pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte, u_long *cpumaskp) { - u_long ipinum; - cpuid_t i, cpu_id = cpu_number(); struct pmap_tlb_shootdown_q *pq; struct pmap_tlb_shootdown_job *pj; + struct cpu_info *ci, *self = curcpu(); + u_long cpumask; + CPU_INFO_ITERATOR cii; +#if 0 int s; +#endif + + cpumask = 0; - for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { - if (i == cpu_id || (cpus_running & (1UL << i)) == 0) + CPU_INFO_FOREACH(cii, ci) { + if (ci == self) + continue; + + /* + * The pmap must be locked (unless its the kernel + * pmap, in which case it is okay for it to be + * unlocked), which prevents it from becoming + * active on any additional processors. This makes + * it safe to check for activeness. If it's not + * active on the processor in question, then just + * mark it as needing a new ASN the next time it + * does, saving the IPI. We always have to send + * the IPI for the kernel pmap. + * + * Note if it's marked active now, and it becomes + * inactive by the time the processor receives + * the IPI, that's okay, because it does the right + * thing with it later. + */ + if (pmap != pmap_kernel() && + PMAP_ISACTIVE(pmap, ci->ci_cpuid) == 0) { + PMAP_INVALIDATE_ASN(pmap, ci->ci_cpuid); continue; + } - pq = &pmap_tlb_shootdown_q[i]; + cpumask |= 1UL << ci->ci_cpuid; + + pq = &pmap_tlb_shootdown_q[ci->ci_cpuid]; PSJQ_LOCK(pq, s); - pj = pmap_tlb_shootdown_job_get(pq); pq->pq_pte |= pte; + + /* + * If a global flush is already pending, we + * don't really have to do anything else. + */ + if (pq->pq_tbia) { + PSJQ_UNLOCK(pq, s); + continue; + } + + pj = pmap_tlb_shootdown_job_get(pq); if (pj == NULL) { /* - * Couldn't allocate a job entry. Just do a - * TBIA[P]. + * Couldn't allocate a job entry. Just + * tell the processor to kill everything. */ - if (pq->pq_pte & PG_ASM) - ipinum = ALPHA_IPI_SHOOTDOWN; - else - ipinum = ALPHA_IPI_IMB; - alpha_send_ipi(i, ipinum); + pq->pq_tbia = 1; } else { pj->pj_pmap = pmap; pj->pj_va = va; pj->pj_pte = pte; TAILQ_INSERT_TAIL(&pq->pq_head, pj, pj_list); - ipinum = ALPHA_IPI_SHOOTDOWN; } - alpha_send_ipi(i, ipinum); - PSJQ_UNLOCK(pq, s); } + + *cpumaskp |= cpumask; +} + +/* + * pmap_tlb_shootnow: + * + * Process the TLB shootdowns that we have been accumulating + * for the specified processor set. + */ +void +pmap_tlb_shootnow(u_long cpumask) +{ + + alpha_multicast_ipi(cpumask, ALPHA_IPI_SHOOTDOWN); } /* @@ -3615,22 +3567,34 @@ pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte) void pmap_do_tlb_shootdown(struct cpu_info *ci, struct trapframe *framep) { - cpuid_t cpu_id = ci->ci_cpuid; + u_long cpu_id = ci->ci_cpuid; u_long cpu_mask = (1UL << cpu_id); struct pmap_tlb_shootdown_q *pq = &pmap_tlb_shootdown_q[cpu_id]; struct pmap_tlb_shootdown_job *pj; +#if 0 int s; +#endif PSJQ_LOCK(pq, s); - while ((pj = TAILQ_FIRST(&pq->pq_head)) != NULL) { - TAILQ_REMOVE(&pq->pq_head, pj, pj_list); - PMAP_INVALIDATE_TLB(pj->pj_pmap, pj->pj_va, - pj->pj_pte & PG_ASM, pj->pj_pmap->pm_cpus & cpu_mask, - cpu_id); - pmap_tlb_shootdown_job_put(pq, pj); + if (pq->pq_tbia) { + if (pq->pq_pte & PG_ASM) + ALPHA_TBIA(); + else + ALPHA_TBIAP(); + pq->pq_tbia = 0; + pmap_tlb_shootdown_q_drain(pq); + } else { + while ((pj = TAILQ_FIRST(&pq->pq_head)) != NULL) { + TAILQ_REMOVE(&pq->pq_head, pj, pj_list); + PMAP_INVALIDATE_TLB(pj->pj_pmap, pj->pj_va, + pj->pj_pte & PG_ASM, + pj->pj_pmap->pm_cpus & cpu_mask, cpu_id); + pmap_tlb_shootdown_job_put(pq, pj); + } + + pq->pq_pte = 0; } - pq->pq_pte = 0; PSJQ_UNLOCK(pq, s); } @@ -3641,28 +3605,19 @@ pmap_do_tlb_shootdown(struct cpu_info *ci, struct trapframe *framep) * Drain a processor's TLB shootdown queue. We do not perform * the shootdown operations. This is merely a convenience * function. + * + * Note: We expect the queue to be locked. */ void -pmap_tlb_shootdown_q_drain(cpuid_t cpu_id, boolean_t all) +pmap_tlb_shootdown_q_drain(struct pmap_tlb_shootdown_q *pq) { - struct pmap_tlb_shootdown_q *pq = &pmap_tlb_shootdown_q[cpu_id]; - struct pmap_tlb_shootdown_job *pj, *npj; - pt_entry_t npte = 0; - int s; - - PSJQ_LOCK(pq, s); + struct pmap_tlb_shootdown_job *pj; - for (pj = TAILQ_FIRST(&pq->pq_head); pj != NULL; pj = npj) { - npj = TAILQ_NEXT(pj, pj_list); - if (all || (pj->pj_pte & PG_ASM) == 0) { - TAILQ_REMOVE(&pq->pq_head, pj, pj_list); - pmap_tlb_shootdown_job_put(pq, pj); - } else - npte |= pj->pj_pte; + while ((pj = TAILQ_FIRST(&pq->pq_head)) != NULL) { + TAILQ_REMOVE(&pq->pq_head, pj, pj_list); + pmap_tlb_shootdown_job_put(pq, pj); } - pq->pq_pte = npte; - - PSJQ_UNLOCK(pq, s); + pq->pq_pte = 0; } /* diff --git a/sys/arch/alpha/alpha/process_machdep.c b/sys/arch/alpha/alpha/process_machdep.c index a385d98c63d..6fe711eb74a 100644 --- a/sys/arch/alpha/alpha/process_machdep.c +++ b/sys/arch/alpha/alpha/process_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: process_machdep.c,v 1.11 2005/12/12 19:44:30 miod Exp $ */ +/* $OpenBSD: process_machdep.c,v 1.12 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: process_machdep.c,v 1.7 1996/07/11 20:14:21 cgd Exp $ */ /*- @@ -112,11 +112,8 @@ process_read_fpregs(p, regs) struct fpreg *regs; { - if (p == fpcurproc) { - alpha_pal_wrfen(1); - savefpstate(process_fpframe(p)); - alpha_pal_wrfen(0); - } + if (p->p_addr->u_pcb.pcb_fpcpu != NULL) + fpusave_proc(p, 1); bcopy(process_fpframe(p), regs, sizeof(struct fpreg)); return (0); @@ -154,7 +151,7 @@ process_write_fpregs(p, regs) { if (p->p_addr->u_pcb.pcb_fpcpu != NULL) - fpusave_proc(p, 1); + fpusave_proc(p, 0); bcopy(regs, process_fpframe(p), sizeof(struct fpreg)); return (0); diff --git a/sys/arch/alpha/alpha/trap.c b/sys/arch/alpha/alpha/trap.c index 878ca3fb32c..87e3cf22096 100644 --- a/sys/arch/alpha/alpha/trap.c +++ b/sys/arch/alpha/alpha/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.65 2014/01/06 20:27:44 miod Exp $ */ +/* $OpenBSD: trap.c,v 1.66 2014/01/26 17:40:09 miod Exp $ */ /* $NetBSD: trap.c,v 1.52 2000/05/24 16:48:33 thorpej Exp $ */ /*- @@ -239,7 +239,7 @@ trap(a0, a1, a2, entry, framep) vm_prot_t ftype; unsigned long onfault; - uvmexp.traps++; + atomic_add_int(&uvmexp.traps, 1); p = curproc; ucode = 0; v = 0; @@ -256,7 +256,10 @@ trap(a0, a1, a2, entry, framep) */ if (user) { #ifndef SMALL_KERNEL - if ((i = unaligned_fixup(a0, a1, a2, p)) == 0) + KERNEL_LOCK(); + i = unaligned_fixup(a0, a1, a2, p); + KERNEL_UNLOCK(); + if (i == 0) goto out; #endif @@ -343,7 +346,10 @@ trap(a0, a1, a2, entry, framep) break; case ALPHA_IF_CODE_OPDEC: - if ((i = handle_opdec(p, &ucode)) == 0) + KERNEL_LOCK(); + i = handle_opdec(p, &ucode); + KERNEL_UNLOCK(); + if (i == 0) goto out; break; @@ -364,15 +370,17 @@ trap(a0, a1, a2, entry, framep) case ALPHA_MMCSR_FOR: case ALPHA_MMCSR_FOE: case ALPHA_MMCSR_FOW: + KERNEL_LOCK(); if (pmap_emulate_reference(p, a0, user, a1)) { ftype = VM_PROT_EXECUTE; goto do_fault; } + KERNEL_UNLOCK(); goto out; case ALPHA_MMCSR_INVALTRANS: case ALPHA_MMCSR_ACCESS: - { + { vaddr_t va; struct vmspace *vm = NULL; struct vm_map *map; @@ -391,6 +399,7 @@ trap(a0, a1, a2, entry, framep) break; } + KERNEL_LOCK(); do_fault: /* * It is only a kernel address space fault iff: @@ -433,6 +442,7 @@ do_fault: rv = EFAULT; } if (rv == 0) { + KERNEL_UNLOCK(); goto out; } @@ -443,10 +453,13 @@ do_fault: framep->tf_regs[FRAME_PC] = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; + KERNEL_UNLOCK(); goto out; } + KERNEL_UNLOCK(); goto dopanic; } + KERNEL_UNLOCK(); ucode = ftype; v = (caddr_t)a0; typ = SEGV_MAPERR; @@ -476,7 +489,9 @@ do_fault: printtrap(a0, a1, a2, entry, framep, 1, user); #endif sv.sival_ptr = v; + KERNEL_LOCK(); trapsignal(p, i, ucode, typ, sv); + KERNEL_UNLOCK(); out: if (user) { /* Do any deferred user pmap operations. */ @@ -530,7 +545,7 @@ syscall(code, framep) u_long args[10]; /* XXX */ u_int hidden, nargs; - uvmexp.syscalls++; + atomic_add_int(&uvmexp.syscalls, 1); p = curproc; p->p_md.md_tf = framep; opc = framep->tf_regs[FRAME_PC] - 4; @@ -628,6 +643,8 @@ child_return(arg) framep->tf_regs[FRAME_A4] = 0; framep->tf_regs[FRAME_A3] = 0; + KERNEL_UNLOCK(); + /* Do any deferred user pmap operations. */ PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map)); @@ -643,6 +660,9 @@ void alpha_enable_fp(struct proc *p, int check) { struct cpu_info *ci = curcpu(); +#if defined(MULTIPROCESSOR) + int s; +#endif if (check && ci->ci_fpcurproc == p) { alpha_pal_wrfen(1); @@ -663,9 +683,16 @@ alpha_enable_fp(struct proc *p, int check) KDASSERT(p->p_addr->u_pcb.pcb_fpcpu == NULL); #endif +#if defined(MULTIPROCESSOR) + /* Need to block IPIs */ + s = splhigh(); +#endif p->p_addr->u_pcb.pcb_fpcpu = ci; ci->ci_fpcurproc = p; - uvmexp.fpswtch++; +#if defined(MULTIPROCESSOR) + splx(s); +#endif + atomic_add_int(&uvmexp.fpswtch, 1); p->p_md.md_flags |= MDP_FPUSED; alpha_pal_wrfen(1); @@ -691,10 +718,12 @@ ast(framep) panic("ast and not user"); #endif - uvmexp.softs++; + atomic_add_int(&uvmexp.softs, 1); if (p->p_flag & P_OWEUPC) { + KERNEL_LOCK(); ADDUPROF(p); + KERNEL_UNLOCK(); } if (ci->ci_want_resched) diff --git a/sys/arch/alpha/compile/.cvsignore b/sys/arch/alpha/compile/.cvsignore index ea4818e02b3..f9df98b101c 100644 --- a/sys/arch/alpha/compile/.cvsignore +++ b/sys/arch/alpha/compile/.cvsignore @@ -1,4 +1,5 @@ GENERIC +GENERIC.MP RAMDISK RAMDISKB RAMDISKC diff --git a/sys/arch/alpha/conf/GENERIC.MP b/sys/arch/alpha/conf/GENERIC.MP new file mode 100644 index 00000000000..8577c8ebb9f --- /dev/null +++ b/sys/arch/alpha/conf/GENERIC.MP @@ -0,0 +1,8 @@ +# $OpenBSD: GENERIC.MP,v 1.1 2014/01/26 17:40:11 miod Exp $ + +include "arch/alpha/conf/GENERIC" + +option MULTIPROCESSOR +#option MP_LOCKDEBUG + +cpu* at mainbus? diff --git a/sys/arch/alpha/conf/files.alpha b/sys/arch/alpha/conf/files.alpha index 0f724285b02..7182b685a10 100644 --- a/sys/arch/alpha/conf/files.alpha +++ b/sys/arch/alpha/conf/files.alpha @@ -1,4 +1,4 @@ -# $OpenBSD: files.alpha,v 1.97 2013/11/04 14:07:15 deraadt Exp $ +# $OpenBSD: files.alpha,v 1.98 2014/01/26 17:40:11 miod Exp $ # $NetBSD: files.alpha,v 1.32 1996/11/25 04:03:21 cgd Exp $ # # alpha-specific configuration info @@ -300,6 +300,7 @@ file arch/alpha/alpha/db_interface.c ddb file arch/alpha/alpha/db_trace.c ddb file arch/alpha/alpha/interrupt.c file arch/alpha/alpha/ipifuncs.c multiprocessor +file arch/alpha/alpha/lock_machdep.c multiprocessor file arch/alpha/alpha/machdep.c file arch/alpha/alpha/mainbus.c file arch/alpha/alpha/mem.c diff --git a/sys/arch/alpha/include/atomic.h b/sys/arch/alpha/include/atomic.h index df807e4f0c2..7198176e944 100644 --- a/sys/arch/alpha/include/atomic.h +++ b/sys/arch/alpha/include/atomic.h @@ -1,4 +1,4 @@ -/* $OpenBSD: atomic.h,v 1.10 2011/11/25 05:25:00 miod Exp $ */ +/* $OpenBSD: atomic.h,v 1.11 2014/01/26 17:40:11 miod Exp $ */ /* $NetBSD: atomic.h,v 1.7 2001/12/17 23:34:57 thorpej Exp $ */ /*- @@ -224,5 +224,31 @@ atomic_clearbits_int(__volatile unsigned int *uip, unsigned int v) : "memory"); } +/* + * atomic_add_int: + * + * Atomically add a value to an `int'. + */ +static __inline void +atomic_add_int(__volatile int *ulp, int v) +{ + unsigned long t0; + + __asm __volatile( + "# BEGIN atomic_add_int\n" + "1: ldl_l %0, %1 \n" + " addl %0, %2, %0 \n" + " stl_c %0, %1 \n" + " beq %0, 2f \n" + " mb \n" + " br 3f \n" + "2: br 1b \n" + "3: \n" + " # END atomic_add_ulong" + : "=&r" (t0), "=m" (*ulp) + : "r" (v) + : "memory"); +} + #endif /* defined(_KERNEL) */ #endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/arch/alpha/include/cpu.h b/sys/arch/alpha/include/cpu.h index 8f9e454e4a4..8b2f93fa2b8 100644 --- a/sys/arch/alpha/include/cpu.h +++ b/sys/arch/alpha/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.50 2013/05/31 17:00:57 tedu Exp $ */ +/* $OpenBSD: cpu.h,v 1.51 2014/01/26 17:40:11 miod Exp $ */ /* $NetBSD: cpu.h,v 1.45 2000/08/21 02:03:12 thorpej Exp $ */ /*- @@ -98,6 +98,7 @@ typedef union alpha_t_float { #include <machine/frame.h> #include <machine/bus.h> #include <machine/intr.h> +#include <sys/cdefs.h> #include <sys/device.h> #include <sys/sched.h> @@ -155,6 +156,8 @@ struct cpu_info; int cpu_iccb_send(cpuid_t, const char *); void cpu_iccb_receive(void); void cpu_hatch(struct cpu_info *); +__dead +void cpu_halt(void); void cpu_halt_secondary(unsigned long); void cpu_spinup_trampoline(void); /* MAGIC */ void cpu_pause(unsigned long); @@ -169,7 +172,12 @@ struct mchkinfo { }; struct cpu_info { - struct device *ci_dev; /* pointer to our device */ + /* + * Private members accessed in assembly with 8 bit offsets. + */ + struct proc *ci_curproc; /* current owner of the processor */ + paddr_t ci_curpcb; /* PA of current HW PCB */ + /* * Public members. */ @@ -177,55 +185,54 @@ struct cpu_info { #ifdef DIAGNOSTIC int ci_mutex_level; #endif - struct proc *ci_curproc; /* current owner of the processor */ struct simplelock ci_slock; /* lock on this data structure */ cpuid_t ci_cpuid; /* our CPU ID */ struct cpu_info *ci_next; + u_int32_t ci_randseed; /* * Private members. */ struct mchkinfo ci_mcinfo; /* machine check info */ struct proc *ci_fpcurproc; /* current owner of the FPU */ - paddr_t ci_curpcb; /* PA of current HW PCB */ struct pcb *ci_idle_pcb; /* our idle PCB */ paddr_t ci_idle_pcb_paddr; /* PA of idle PCB */ - struct cpu_softc *ci_softc; /* pointer to our device */ + struct device *ci_dev; /* pointer to our device */ u_long ci_want_resched; /* preempt current process */ u_long ci_intrdepth; /* interrupt trap depth */ struct trapframe *ci_db_regs; /* registers for debuggers */ + #if defined(MULTIPROCESSOR) - u_long ci_flags; /* flags; see below */ - u_long ci_ipis; /* interprocessor interrupts pending */ + __volatile u_long ci_flags; /* flags; see below */ + __volatile u_long ci_ipis; /* interprocessor interrupts pending */ #endif - u_int32_t ci_randseed; #ifdef GPROF struct gmonparam *ci_gmon; #endif }; #define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ -#define CPUF_PRESENT 0x02 /* CPU is present */ -#define CPUF_RUNNING 0x04 /* CPU is running */ -#define CPUF_PAUSED 0x08 /* CPU is paused */ -#define CPUF_FPUSAVE 0x10 /* CPU is currently in fpusave_cpu() */ +#define CPUF_RUNNING 0x02 /* CPU is running */ +#define CPUF_PAUSED 0x04 /* CPU is paused */ +#define CPUF_FPUSAVE 0x08 /* CPU is currently in fpusave_cpu() */ void fpusave_cpu(struct cpu_info *, int); void fpusave_proc(struct proc *, int); +extern struct cpu_info cpu_info_primary; +extern struct cpu_info *cpu_info_list; + #define CPU_INFO_UNIT(ci) ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0) #define CPU_INFO_ITERATOR int -#define CPU_INFO_FOREACH(cii, ci) for (cii = 0, ci = curcpu(); \ +#define CPU_INFO_FOREACH(cii, ci) for (cii = 0, ci = cpu_info_list; \ ci != NULL; ci = ci->ci_next) #define MAXCPUS ALPHA_MAXPROCS -#define cpu_unidle(ci) - #if defined(MULTIPROCESSOR) extern __volatile u_long cpus_running; extern __volatile u_long cpus_paused; -extern struct cpu_info cpu_info[]; +extern struct cpu_info *cpu_info[]; #define curcpu() ((struct cpu_info *)alpha_pal_rdval()) #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) @@ -234,11 +241,14 @@ void cpu_boot_secondary_processors(void); void cpu_pause_resume(unsigned long, int); void cpu_pause_resume_all(int); +void cpu_unidle(struct cpu_info *); + #else /* ! MULTIPROCESSOR */ -extern struct cpu_info cpu_info_store; -#define curcpu() (&cpu_info_store) -#define CPU_IS_PRIMARY(ci) 1 +#define curcpu() (&cpu_info_primary) +#define CPU_IS_PRIMARY(ci) 1 +#define cpu_unidle(ci) do { /* nothing */ } while (0) + #endif /* MULTIPROCESSOR */ #define curproc curcpu()->ci_curproc @@ -305,12 +315,6 @@ do { \ #define signotify(p) aston(p) #endif -/* - * XXXSMP - * Should we send an AST IPI? Or just let it handle it next time - * it sees a normal kernel entry? I guess letting it happen later - * follows the `asynchronous' part of the name... - */ #define aston(p) (p)->p_md.md_astpending = 1 #endif /* _KERNEL */ diff --git a/sys/arch/alpha/include/db_machdep.h b/sys/arch/alpha/include/db_machdep.h index 87942a1e499..7456b8f12be 100644 --- a/sys/arch/alpha/include/db_machdep.h +++ b/sys/arch/alpha/include/db_machdep.h @@ -1,4 +1,4 @@ -/* $OpenBSD: db_machdep.h,v 1.21 2011/03/23 16:54:34 pirofti Exp $ */ +/* $OpenBSD: db_machdep.h,v 1.22 2014/01/26 17:40:11 miod Exp $ */ /* * Copyright (c) 1997 Niklas Hallqvist. All rights reserved. @@ -99,4 +99,6 @@ db_addr_t next_instr_address(db_addr_t, int); #define inst_call db_inst_call #endif +#define DB_MACHINE_COMMANDS + #endif /* _MACHINE_DB_MACHDEP_H_ */ diff --git a/sys/arch/alpha/include/intr.h b/sys/arch/alpha/include/intr.h index ad34ba1f2da..eb92b1e81f8 100644 --- a/sys/arch/alpha/include/intr.h +++ b/sys/arch/alpha/include/intr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: intr.h,v 1.40 2013/05/17 19:38:51 kettenis Exp $ */ +/* $OpenBSD: intr.h,v 1.41 2014/01/26 17:40:11 miod Exp $ */ /* $NetBSD: intr.h,v 1.26 2000/06/03 20:47:41 thorpej Exp $ */ /*- @@ -118,6 +118,7 @@ struct scbvec { #define IPL_VM ALPHA_PSL_IPL_IO #define IPL_CLOCK ALPHA_PSL_IPL_CLOCK #define IPL_SCHED ALPHA_PSL_IPL_HIGH +#define IPL_IPI ALPHA_PSL_IPL_HIGH /* occur on _IO, though */ #define IPL_HIGH ALPHA_PSL_IPL_HIGH #define IPL_SOFTSERIAL 0 /* serial software interrupts */ @@ -173,15 +174,16 @@ int _splraise(int); #define splsoftserial() splsoft() #define splsoftclock() splsoft() #define splsoftnet() splsoft() -#define splnet() _splraise(IPL_NET) -#define splbio() _splraise(IPL_BIO) -#define spltty() _splraise(IPL_TTY) -#define splserial() _splraise(IPL_SERIAL) +#define splnet() _splraise(IPL_NET) +#define splbio() _splraise(IPL_BIO) +#define spltty() _splraise(IPL_TTY) +#define splserial() _splraise(IPL_SERIAL) #define splaudio() _splraise(IPL_AUDIO) #define splvm() _splraise(IPL_VM) -#define splclock() _splraise(IPL_CLOCK) -#define splstatclock() _splraise(IPL_CLOCK) -#define splhigh() _splraise(IPL_HIGH) +#define splclock() _splraise(IPL_CLOCK) +#define splstatclock() _splraise(IPL_CLOCK) +#define splipi() _splraise(IPL_IPI) +#define splhigh() _splraise(IPL_HIGH) #define spllock() splhigh() #define splsched() splhigh() @@ -189,21 +191,20 @@ int _splraise(int); /* * Interprocessor interrupts. In order how we want them processed. */ -#define ALPHA_IPI_HALT 0x0000000000000001UL -#define ALPHA_IPI_TBIA 0x0000000000000002UL -#define ALPHA_IPI_TBIAP 0x0000000000000004UL -#define ALPHA_IPI_SHOOTDOWN 0x0000000000000008UL -#define ALPHA_IPI_IMB 0x0000000000000010UL -#define ALPHA_IPI_AST 0x0000000000000020UL -#define ALPHA_IPI_SYNCH_FPU 0x0000000000000040UL -#define ALPHA_IPI_DISCARD_FPU 0x0000000000000080UL -#define ALPHA_IPI_PAUSE 0x0000000000000100UL +#define ALPHA_IPI_HALT (1UL << 0) +#define ALPHA_IPI_SHOOTDOWN (1UL << 1) +#define ALPHA_IPI_IMB (1UL << 2) +#define ALPHA_IPI_AST (1UL << 3) +#define ALPHA_IPI_SYNCH_FPU (1UL << 4) +#define ALPHA_IPI_DISCARD_FPU (1UL << 5) +#define ALPHA_IPI_PAUSE (1UL << 6) -#define ALPHA_NIPIS 6 /* must not exceed 64 */ +#define ALPHA_NIPIS 7 /* must not exceed 64 */ -typedef void (*ipifunc_t)(void); -extern ipifunc_t ipifuncs[ALPHA_NIPIS]; +struct cpu_info; +struct trapframe; +void alpha_ipi_process(struct cpu_info *, struct trapframe *); void alpha_send_ipi(unsigned long, unsigned long); void alpha_broadcast_ipi(unsigned long); void alpha_multicast_ipi(unsigned long, unsigned long); diff --git a/sys/arch/alpha/include/mplock.h b/sys/arch/alpha/include/mplock.h new file mode 100644 index 00000000000..8f8523a539e --- /dev/null +++ b/sys/arch/alpha/include/mplock.h @@ -0,0 +1,52 @@ +/* $OpenBSD: mplock.h,v 1.1 2014/01/26 17:40:11 miod Exp $ */ + +/* + * Copyright (c) 2004 Niklas Hallqvist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_MPLOCK_H_ +#define _MACHINE_MPLOCK_H_ + +/* + * Really simple spinlock implementation with recursive capabilities. + * Correctness is paramount, no fancyness allowed. + */ + +struct __mp_lock { + volatile struct cpu_info *mpl_cpu; + volatile long mpl_count; +}; + +#ifndef _LOCORE + +void __mp_lock_init(struct __mp_lock *); +void __mp_lock(struct __mp_lock *); +void __mp_unlock(struct __mp_lock *); +int __mp_release_all(struct __mp_lock *); +int __mp_release_all_but_one(struct __mp_lock *); +void __mp_acquire_count(struct __mp_lock *, int); +int __mp_lock_held(struct __mp_lock *); + +#endif + +#endif /* !_MACHINE_MPLOCK_H */ diff --git a/sys/arch/alpha/include/mutex.h b/sys/arch/alpha/include/mutex.h index 90f216a2c12..0485ccb90ac 100644 --- a/sys/arch/alpha/include/mutex.h +++ b/sys/arch/alpha/include/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.3 2011/03/23 16:54:34 pirofti Exp $ */ +/* $OpenBSD: mutex.h,v 1.4 2014/01/26 17:40:11 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -28,18 +28,16 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -/* - * Simple non-mp implementation. - */ struct mutex { int mtx_lock; int mtx_wantipl; int mtx_oldipl; + void *mtx_owner; }; void mtx_init(struct mutex *, int); -#define MUTEX_INITIALIZER(ipl) { 0, (ipl), 0 } +#define MUTEX_INITIALIZER(ipl) { 0, (ipl), IPL_NONE, NULL } #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h index ecad132d406..4cef887f13e 100644 --- a/sys/arch/alpha/include/pmap.h +++ b/sys/arch/alpha/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.30 2014/01/05 14:37:08 miod Exp $ */ +/* $OpenBSD: pmap.h,v 1.31 2014/01/26 17:40:11 miod Exp $ */ /* $NetBSD: pmap.h,v 1.37 2000/11/19 03:16:35 thorpej Exp $ */ /*- @@ -90,27 +90,37 @@ * The kernel pmap is a special case; it gets statically-allocated * arrays which hold enough for ALPHA_MAXPROCS. */ +struct pmap_asn_info { + unsigned int pma_asn; /* address space number */ + unsigned long pma_asngen; /* ASN generation number */ +}; + struct pmap { TAILQ_ENTRY(pmap) pm_list; /* list of all pmaps */ pt_entry_t *pm_lev1map; /* level 1 map */ int pm_count; /* pmap reference count */ struct simplelock pm_slock; /* lock on pmap */ struct pmap_statistics pm_stats; /* pmap statistics */ - long pm_nlev2; /* level 2 pt page count */ - long pm_nlev3; /* level 3 pt page count */ - unsigned int *pm_asn; /* address space number */ - unsigned long *pm_asngen; /* ASN generation number */ unsigned long pm_cpus; /* mask of CPUs using pmap */ unsigned long pm_needisync; /* mask of CPUs needing isync */ + struct pmap_asn_info pm_asni[1]; /* ASN information */ + /* variable length */ }; - typedef struct pmap *pmap_t; +/* + * Compute the sizeof of a pmap structure. Subtract one because one + * ASN info structure is already included in the pmap structure itself. + */ +#define PMAP_SIZEOF(x) \ + (ALIGN(sizeof(struct pmap) + \ + (sizeof(struct pmap_asn_info) * ((x) - 1)))) + #define PMAP_ASN_RESERVED 0 /* reserved for Lev1map users */ -extern struct pmap kernel_pmap_store; +extern struct pmap kernel_pmap_store[]; -#define pmap_kernel() (&kernel_pmap_store) +#define pmap_kernel() kernel_pmap_store /* * For each vm_page_t, there is a list of all currently valid virtual @@ -150,18 +160,25 @@ typedef struct pv_entry { #endif /* NEW_SCC_DRIVER */ #if defined(MULTIPROCESSOR) -void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t); +void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, u_long *); +void pmap_tlb_shootnow(u_long); void pmap_do_tlb_shootdown(struct cpu_info *, struct trapframe *); -void pmap_tlb_shootdown_q_drain(u_long, boolean_t); +#define PMAP_TLB_SHOOTDOWN_CPUSET_DECL u_long shootset = 0; #define PMAP_TLB_SHOOTDOWN(pm, va, pte) \ - pmap_tlb_shootdown((pm), (va), (pte)) + pmap_tlb_shootdown((pm), (va), (pte), &shootset) +#define PMAP_TLB_SHOOTNOW() \ + pmap_tlb_shootnow(shootset) #else +#define PMAP_TLB_SHOOTDOWN_CPUSET_DECL /* nothing */ #define PMAP_TLB_SHOOTDOWN(pm, va, pte) /* nothing */ +#define PMAP_TLB_SHOOTNOW() /* nothing */ #endif /* MULTIPROCESSOR */ #endif /* _LKM */ #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) + +#define pmap_copy(dp, sp, da, l, sa) /* nothing */ #define pmap_update(pmap) /* nothing (yet) */ #define pmap_proc_iflush(p, va, len) /* nothing */ diff --git a/sys/arch/alpha/include/pte.h b/sys/arch/alpha/include/pte.h index a1dbfae0d39..ff3034d5e73 100644 --- a/sys/arch/alpha/include/pte.h +++ b/sys/arch/alpha/include/pte.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pte.h,v 1.11 2014/01/06 20:27:44 miod Exp $ */ +/* $OpenBSD: pte.h,v 1.12 2014/01/26 17:40:11 miod Exp $ */ /* $NetBSD: pte.h,v 1.26 1999/04/09 00:38:11 thorpej Exp $ */ /*- @@ -98,7 +98,7 @@ typedef alpha_pt_entry_t pt_entry_t; #define PG_WIRED 0x0000000000010000 /* Wired. [SOFTWARE] */ #define PG_PVLIST 0x0000000000020000 /* on pv list [SOFTWARE] */ #define PG_EXEC 0x0000000000040000 /* execute perms [SOFTWARE] */ -#define PG_FRAME ALPHA_PTE_RAME +#define PG_FRAME ALPHA_PTE_PFN #define PG_SHIFT 32 #define PG_PFNUM(x) ALPHA_PTE_TO_PFN(x) |