summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/macppc/dev/openpic.c10
-rw-r--r--sys/arch/macppc/macppc/clock.c89
-rw-r--r--sys/arch/macppc/macppc/cpu.c12
-rw-r--r--sys/arch/macppc/macppc/locore.S16
-rw-r--r--sys/arch/macppc/macppc/machdep.c25
-rw-r--r--sys/arch/powerpc/include/cpu.h11
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c103
7 files changed, 197 insertions, 69 deletions
diff --git a/sys/arch/macppc/dev/openpic.c b/sys/arch/macppc/dev/openpic.c
index f3be80fe373..5429b905212 100644
--- a/sys/arch/macppc/dev/openpic.c
+++ b/sys/arch/macppc/dev/openpic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: openpic.c,v 1.41 2007/10/27 22:37:03 kettenis Exp $ */
+/* $OpenBSD: openpic.c,v 1.42 2008/04/26 22:37:41 drahn Exp $ */
/*-
* Copyright (c) 1995 Per Fogelstrom
@@ -484,8 +484,10 @@ openpic_do_pending_int()
while(ih) {
ppc_intr_enable(1);
+ KERNEL_LOCK();
if ((*ih->ih_fun)(ih->ih_arg))
ih->ih_count.ec_count++;
+ KERNEL_UNLOCK();
(void)ppc_intr_disable();
@@ -498,7 +500,9 @@ openpic_do_pending_int()
do {
if((ci->ci_ipending & SINT_CLOCK) & ~pcpl) {
ci->ci_ipending &= ~SINT_CLOCK;
+ KERNEL_LOCK();
softclock();
+ KERNEL_UNLOCK();
}
if((ci->ci_ipending & SINT_NET) & ~pcpl) {
extern int netisr;
@@ -507,12 +511,16 @@ openpic_do_pending_int()
ci->ci_ipending &= ~SINT_NET;
while ((pisr = netisr) != 0) {
atomic_clearbits_int(&netisr, pisr);
+ KERNEL_LOCK();
softnet(pisr);
+ KERNEL_UNLOCK();
}
}
if((ci->ci_ipending & SINT_TTY) & ~pcpl) {
ci->ci_ipending &= ~SINT_TTY;
+ KERNEL_LOCK();
softtty();
+ KERNEL_UNLOCK();
}
} while ((ci->ci_ipending & SINT_MASK) & ~pcpl);
ci->ci_ipending &= pcpl;
diff --git a/sys/arch/macppc/macppc/clock.c b/sys/arch/macppc/macppc/clock.c
index 98f7a66c4a4..f207f407c4c 100644
--- a/sys/arch/macppc/macppc/clock.c
+++ b/sys/arch/macppc/macppc/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.20 2007/11/24 15:24:54 mbalmer Exp $ */
+/* $OpenBSD: clock.c,v 1.21 2008/04/26 22:37:41 drahn Exp $ */
/* $NetBSD: clock.c,v 1.1 1996/09/30 16:34:40 ws Exp $ */
/*
@@ -54,7 +54,6 @@ u_int tb_get_timecount(struct timecounter *);
static u_int32_t ticks_per_sec = 3125000;
static u_int32_t ns_per_tick = 320;
static int32_t ticks_per_intr;
-static volatile u_int64_t lasttb;
static struct timecounter tb_timecounter = {
tb_get_timecount, NULL, 0x7fffffff, 0, "tb", 0, NULL
@@ -69,9 +68,6 @@ extern char *hw_prod;
time_read_t *time_read;
time_write_t *time_write;
-/* event tracking variables, when the next events of each time should occur */
-u_int64_t nexttimerevent, prevtb, nextstatevent;
-
/* vars for stats */
int statint;
u_int32_t statvar;
@@ -82,6 +78,7 @@ static struct evcount stat_count;
static int clk_irq = PPC_CLK_IRQ;
static int stat_irq = PPC_STAT_IRQ;
+
/*
* Set up the system's time, given a `reasonable' time value.
*/
@@ -180,13 +177,13 @@ resettodr(void)
}
}
-volatile int statspending;
void
decr_intr(struct clockframe *frame)
{
u_int64_t tb;
u_int64_t nextevent;
+ struct cpu_info *ci = curcpu();
int nstats;
int s;
@@ -196,33 +193,32 @@ decr_intr(struct clockframe *frame)
if (!ticks_per_intr)
return;
-
/*
* Based on the actual time delay since the last decrementer reload,
* we arrange for earlier interrupt next time.
*/
tb = ppc_mftb();
- while (nexttimerevent <= tb)
- nexttimerevent += ticks_per_intr;
+ while (ci->ci_nexttimerevent <= tb)
+ ci->ci_nexttimerevent += ticks_per_intr;
- prevtb = nexttimerevent - ticks_per_intr;
+ ci->ci_prevtb = ci->ci_nexttimerevent - ticks_per_intr;
- for (nstats = 0; nextstatevent <= tb; nstats++) {
+ for (nstats = 0; ci->ci_nextstatevent <= tb; nstats++) {
int r;
do {
r = random() & (statvar -1);
} while (r == 0); /* random == 0 not allowed */
- nextstatevent += statmin + r;
+ ci->ci_nextstatevent += statmin + r;
}
/* only count timer ticks for CLK_IRQ */
stat_count.ec_count += nstats;
- if (nexttimerevent < nextstatevent)
- nextevent = nexttimerevent;
+ if (ci->ci_nexttimerevent < ci->ci_nextstatevent)
+ nextevent = ci->ci_nexttimerevent;
else
- nextevent = nextstatevent;
+ nextevent = ci->ci_nextstatevent;
/*
* Need to work about the near constant skew this introduces???
@@ -231,10 +227,12 @@ decr_intr(struct clockframe *frame)
ppc_mtdec(nextevent - tb);
if (curcpu()->ci_cpl & SPL_CLOCK) {
- statspending += nstats;
+ ci->ci_statspending += nstats;
} else {
- nstats += statspending;
- statspending = 0;
+ KERNEL_LOCK();
+
+ nstats += ci->ci_statspending;
+ ci->ci_statspending = 0;
s = splclock();
@@ -245,20 +243,10 @@ decr_intr(struct clockframe *frame)
/*
* Do standard timer interrupt stuff.
- * Do softclock stuff only on the last iteration.
*/
- frame->pri = s | SINT_CLOCK;
- while (lasttb < prevtb - ticks_per_intr) {
- /* sync lasttb with hardclock */
- lasttb += ticks_per_intr;
- clk_count.ec_count++;
- hardclock(frame);
- }
-
- frame->pri = s;
- while (lasttb < prevtb) {
+ while (ci->ci_lasttb < ci->ci_prevtb) {
/* sync lasttb with hardclock */
- lasttb += ticks_per_intr;
+ ci->ci_lasttb += ticks_per_intr;
clk_count.ec_count++;
hardclock(frame);
}
@@ -272,16 +260,17 @@ decr_intr(struct clockframe *frame)
/* if a tick has occurred while dealing with these,
* dont service it now, delay until the next tick.
*/
+ KERNEL_UNLOCK();
}
}
+void cpu_startclock(void);
+
void
cpu_initclocks()
{
int intrstate;
- int r;
int minint;
- u_int64_t nextevent;
u_int32_t first_tb, second_tb;
time_t first_sec, sec;
int calibrate = 0, n;
@@ -326,30 +315,36 @@ cpu_initclocks()
minint = statint / 2 + 100;
while (statvar > minint)
statvar >>= 1;
-
statmin = statint - (statvar >> 1);
- lasttb = ppc_mftb();
- nexttimerevent = lasttb + ticks_per_intr;
- do {
- r = random() & (statvar -1);
- } while (r == 0); /* random == 0 not allowed */
- nextstatevent = lasttb + statmin + r;
-
- if (nexttimerevent < nextstatevent)
- nextevent = nexttimerevent;
- else
- nextevent = nextstatevent;
-
evcount_attach(&clk_count, "clock", (void *)&clk_irq, &evcount_intr);
evcount_attach(&stat_count, "stat", (void *)&stat_irq, &evcount_intr);
+ cpu_startclock();
+
tb_timecounter.tc_frequency = ticks_per_sec;
tc_init(&tb_timecounter);
-
- ppc_mtdec(nextevent-lasttb);
ppc_intr_enable(intrstate);
}
+void
+cpu_startclock()
+{
+ struct cpu_info *ci = curcpu();
+ u_int64_t nextevent;
+
+ ci->ci_lasttb = ppc_mftb();
+
+ /*
+ * no point in having random on the first tick,
+ * it just complicates the code.
+ */
+ ci->ci_nexttimerevent = ci->ci_lasttb + ticks_per_intr;
+ nextevent = ci->ci_nextstatevent = ci->ci_nexttimerevent;
+
+ ci->ci_statspending = 0;
+
+ ppc_mtdec(nextevent - ci->ci_lasttb);
+}
void
calc_delayconst(void)
diff --git a/sys/arch/macppc/macppc/cpu.c b/sys/arch/macppc/macppc/cpu.c
index db275c6ea7a..531a222937f 100644
--- a/sys/arch/macppc/macppc/cpu.c
+++ b/sys/arch/macppc/macppc/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.47 2008/04/23 15:34:18 drahn Exp $ */
+/* $OpenBSD: cpu.c,v 1.48 2008/04/26 22:37:41 drahn Exp $ */
/*
* Copyright (c) 1997 Per Fogelstrom
@@ -598,10 +598,6 @@ cpu_spinup(struct device *self, struct cpu_info *ci)
h->sdr1 = ppc_mfsdr1();
cpu_hatch_data = h;
-#ifdef notyet
- ci->ci_lasttb = curcpu()->ci_lasttb;
-#endif
-
__asm volatile ("sync; isync");
/* XXX OpenPIC */
@@ -668,10 +664,12 @@ cpu_boot_secondary_processors(void)
__asm volatile ("sync");
}
+void cpu_startclock(void);
void
cpu_hatch(void)
{
volatile struct cpu_hatch_data *h = cpu_hatch_data;
+ int intrstate;
int scratch, i, s;
/* Initialize timebase. */
@@ -760,6 +758,10 @@ cpu_hatch(void)
microuptime(&curcpu()->ci_schedstate.spc_runtime);
splx(s);
+ intrstate = ppc_intr_disable();
+ cpu_startclock();
+ ppc_intr_enable(intrstate);
+
SCHED_LOCK(s);
cpu_switchto(NULL, sched_chooseproc());
}
diff --git a/sys/arch/macppc/macppc/locore.S b/sys/arch/macppc/macppc/locore.S
index 04e3fe67283..ab873201e58 100644
--- a/sys/arch/macppc/macppc/locore.S
+++ b/sys/arch/macppc/macppc/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.36 2008/02/15 17:33:51 drahn Exp $ */
+/* $OpenBSD: locore.S,v 1.37 2008/04/26 22:37:41 drahn Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
@@ -142,7 +142,7 @@ _ENTRY(_C_LABEL(cpu_spinup_trampoline))
* void cpu_switchto(struct proc *old, struct proc *new)
* Switch from "old" proc to "new".
*/
-_ENTRY(_C_LABEL(cpu_switchto))
+_ENTRY(_C_LABEL(cpu_switchto_asm))
mflr %r0 /* save lr */
stw %r0,4(%r1)
stwu %r1,-16(%r1)
@@ -159,12 +159,6 @@ _ENTRY(_C_LABEL(cpu_switchto))
/* just did this resched thing, clear resched */
stw %r31,CI_WANT_RESCHED(%r5)
-#ifdef MULTIPROCESSOR
- stw %r5,P_CPU(%r4)
-#endif
-
- stw %r4,CI_CURPROC(%r5) /* record new process */
-
li %r31,SONPROC
stb %r31,P_STAT(%r4)
@@ -185,6 +179,12 @@ switch_exited:
andi. %r30,%r30,~PSL_EE@l
mtmsr %r30
+ stw %r4,CI_CURPROC(%r5) /* record new process */
+
+#ifdef MULTIPROCESSOR
+ stw %r5,P_CPU(%r4)
+#endif
+
lwz %r31,P_ADDR(%r4)
stw %r31,CI_CURPCB(%r5) /* indicate new pcb */
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index 21e03c7cfbf..b25537a5b16 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.96 2008/04/09 16:58:10 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.97 2008/04/26 22:37:41 drahn Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -87,6 +87,9 @@
#include <ddb/db_extern.h>
#endif
+#include <powerpc/reg.h>
+#include <powerpc/fpu.h>
+
/*
* Global variables used here and there
*/
@@ -1436,3 +1439,23 @@ kcopy(const void *from, void *to, size_t size)
return 0;
}
+
+/* prototype for locore function */
+void cpu_switchto_asm(struct proc *oldproc, struct proc *newproc);
+
+void cpu_switchto( struct proc *oldproc, struct proc *newproc)
+{
+ /*
+ * if this CPU is running a new process, flush the
+ * FPU/Altivec context to avoid an IPI.
+ */
+#ifdef MULTIPROCESSOR
+ struct cpu_info *ci = curcpu();
+ if (ci->ci_fpuproc)
+ save_fpu();
+ if (ci->ci_vecproc)
+ save_vec(ci->ci_vecproc);
+#endif
+
+ cpu_switchto_asm(oldproc, newproc);
+}
diff --git a/sys/arch/powerpc/include/cpu.h b/sys/arch/powerpc/include/cpu.h
index de9c477b74a..5a270eca04f 100644
--- a/sys/arch/powerpc/include/cpu.h
+++ b/sys/arch/powerpc/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.32 2007/12/04 22:36:39 kettenis Exp $ */
+/* $OpenBSD: cpu.h,v 1.33 2008/04/26 22:37:41 drahn Exp $ */
/* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */
/*
@@ -57,6 +57,7 @@ struct cpu_info {
volatile int ci_cpl;
volatile int ci_iactive;
volatile int ci_ipending;
+
int ci_intrdepth;
char *ci_intstk;
#define CPUSAVE_LEN 8
@@ -64,6 +65,14 @@ struct cpu_info {
register_t ci_ddbsave[CPUSAVE_LEN];
#define DISISAVE_LEN 4
register_t ci_disisave[DISISAVE_LEN];
+
+ volatile u_int64_t ci_nexttimerevent;
+ volatile u_int64_t ci_prevtb;
+ volatile u_int64_t ci_lasttb;
+ volatile u_int64_t ci_nextstatevent;
+ int ci_statspending;
+
+ u_long ci_randseed;
};
static __inline struct cpu_info *
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index fbd61b827fe..e2cedd6a540 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.103 2007/11/04 13:43:39 martin Exp $ */
+/* $OpenBSD: pmap.c,v 1.104 2008/04/26 22:37:41 drahn Exp $ */
/*
* Copyright (c) 2001, 2002, 2007 Dale Rahn.
@@ -48,6 +48,8 @@
#include <ddb/db_extern.h>
#include <ddb/db_output.h>
+#include <powerpc/lock.h>
+
struct pmap kernel_pmap_;
static struct mem_region *pmap_mem, *pmap_avail;
struct mem_region pmap_allocated[10];
@@ -154,6 +156,64 @@ int pmap_initialized = 0;
int physmem;
int physmaxaddr;
+void pmap_hash_lock_init(void);
+void pmap_hash_lock(int entry);
+void pmap_hash_unlock(int entry);
+int pmap_hash_lock_try(int entry);
+
+volatile unsigned int pmap_hash_lock_word = 0;
+
+void
+pmap_hash_lock_init()
+{
+ pmap_hash_lock_word = 0;
+}
+
+int
+pmap_hash_lock_try(int entry)
+{
+ int val = 1 << entry;
+ int success, tmp;
+ __asm volatile (
+ "1: lwarx %0, 0, %3 \n"
+ " and. %1, %2, %0 \n"
+ " li %1, 0 \n"
+ " bne 2f \n"
+ " or %0, %2, %0 \n"
+ " stwcx. %0, 0, %3 \n"
+ " li %1, 1 \n"
+ " bne- 1b \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (success)
+ : "r" (val), "r" (&pmap_hash_lock_word)
+ : "memory");
+ return success;
+}
+
+
+void
+pmap_hash_lock(int entry)
+{
+ int attempt = 0;
+ int locked = 0;
+ do {
+ if (pmap_hash_lock_word & (1 << entry)) {
+ attempt++;
+ if(attempt >0x20000000)
+ panic("unable to obtain lock on entry %d\n",
+ entry);
+ continue;
+ }
+ locked = pmap_hash_lock_try(entry);
+ } while (locked == 0);
+}
+
+void
+pmap_hash_unlock(int entry)
+{
+ atomic_clearbits_int(&pmap_hash_lock_word, 1 << entry);
+}
+
/* virtual to physical helpers */
static inline int
VP_SR(vaddr_t va)
@@ -828,8 +888,10 @@ pmap_hash_remove(struct pte_desc *pted)
/* determine which pteg mapping is present in */
if (ppc_proc_is_64b) {
+ int entry = PTED_PTEGIDX(pted);
ptp64 = pmap_ptable64 + (idx * 8);
- ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
+ ptp64 += entry; /* increment by entry into pteg */
+ pmap_hash_lock(entry);
/*
* We now have the pointer to where it will be, if it is
* currently mapped. If the mapping was thrown away in
@@ -840,9 +902,12 @@ pmap_hash_remove(struct pte_desc *pted)
(PTED_HID(pted) ? PTE_HID_64 : 0)) == ptp64->pte_hi) {
pte_zap((void*)ptp64, pted);
}
+ pmap_hash_unlock(entry);
} else {
+ int entry = PTED_PTEGIDX(pted);
ptp32 = pmap_ptable32 + (idx * 8);
- ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
+ ptp32 += entry; /* increment by entry into pteg */
+ pmap_hash_lock(entry);
/*
* We now have the pointer to where it will be, if it is
* currently mapped. If the mapping was thrown away in
@@ -853,6 +918,7 @@ pmap_hash_remove(struct pte_desc *pted)
(PTED_HID(pted) ? PTE_HID_32 : 0)) == ptp32->pte_hi) {
pte_zap((void*)ptp32, pted);
}
+ pmap_hash_unlock(entry);
}
}
@@ -2267,7 +2333,6 @@ pte_insert64(struct pte_desc *pted)
int sr, idx;
int i;
- /* HASH lock? */
sr = ptesr(pted->pted_pmap->pm_sr, pted->pted_va);
idx = pteidx(sr, pted->pted_va);
@@ -2293,6 +2358,8 @@ pte_insert64(struct pte_desc *pted)
for (i = 0; i < 8; i++) {
if (ptp64[i].pte_hi & PTE_VALID_64)
continue;
+ if (pmap_hash_lock_try(i) == 0)
+ continue;
/* not valid, just load */
pted->pted_va |= i;
@@ -2302,6 +2369,8 @@ pte_insert64(struct pte_desc *pted)
__asm__ volatile ("sync");
ptp64[i].pte_hi |= PTE_VALID_64;
__asm volatile ("sync");
+
+ pmap_hash_unlock(i);
return;
}
/* try fill of secondary hash */
@@ -2309,6 +2378,8 @@ pte_insert64(struct pte_desc *pted)
for (i = 0; i < 8; i++) {
if (ptp64[i].pte_hi & PTE_VALID_64)
continue;
+ if (pmap_hash_lock_try(i) == 0)
+ continue;
pted->pted_va |= (i | PTED_VA_HID_M);
ptp64[i].pte_hi =
@@ -2317,12 +2388,19 @@ pte_insert64(struct pte_desc *pted)
__asm__ volatile ("sync");
ptp64[i].pte_hi |= PTE_VALID_64;
__asm volatile ("sync");
+
+ pmap_hash_unlock(i);
return;
}
/* need decent replacement algorithm */
+busy:
__asm__ volatile ("mftb %0" : "=r"(off));
secondary = off & 8;
+
+ if (pmap_hash_lock_try(off & 7) == 0)
+ goto busy;
+
pted->pted_va |= off & (PTED_VA_PTEGIDX_M|PTED_VA_HID_M);
idx = (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0));
@@ -2362,6 +2440,8 @@ pte_insert64(struct pte_desc *pted)
ptp64->pte_lo = pted->p.pted_pte64.pte_lo;
__asm__ volatile ("sync");
ptp64->pte_hi |= PTE_VALID_64;
+
+ pmap_hash_unlock(off & 7);
}
void
@@ -2373,8 +2453,6 @@ pte_insert32(struct pte_desc *pted)
int sr, idx;
int i;
- /* HASH lock? */
-
sr = ptesr(pted->pted_pmap->pm_sr, pted->pted_va);
idx = pteidx(sr, pted->pted_va);
@@ -2401,6 +2479,8 @@ pte_insert32(struct pte_desc *pted)
for (i = 0; i < 8; i++) {
if (ptp32[i].pte_hi & PTE_VALID_32)
continue;
+ if (pmap_hash_lock_try(i) == 0)
+ continue;
/* not valid, just load */
pted->pted_va |= i;
@@ -2409,6 +2489,8 @@ pte_insert32(struct pte_desc *pted)
__asm__ volatile ("sync");
ptp32[i].pte_hi |= PTE_VALID_32;
__asm volatile ("sync");
+
+ pmap_hash_unlock(i);
return;
}
/* try fill of secondary hash */
@@ -2416,6 +2498,8 @@ pte_insert32(struct pte_desc *pted)
for (i = 0; i < 8; i++) {
if (ptp32[i].pte_hi & PTE_VALID_32)
continue;
+ if (pmap_hash_lock_try(i) == 0)
+ continue;
pted->pted_va |= (i | PTED_VA_HID_M);
ptp32[i].pte_hi =
@@ -2424,12 +2508,18 @@ pte_insert32(struct pte_desc *pted)
__asm__ volatile ("sync");
ptp32[i].pte_hi |= PTE_VALID_32;
__asm volatile ("sync");
+
+ pmap_hash_unlock(i);
return;
}
/* need decent replacement algorithm */
+busy:
__asm__ volatile ("mftb %0" : "=r"(off));
secondary = off & 8;
+ if (pmap_hash_lock_try(off & 7) == 0)
+ goto busy;
+
pted->pted_va |= off & (PTED_VA_PTEGIDX_M|PTED_VA_HID_M);
idx = (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0));
@@ -2460,6 +2550,7 @@ pte_insert32(struct pte_desc *pted)
__asm__ volatile ("sync");
ptp32->pte_hi |= PTE_VALID_32;
+ pmap_hash_unlock(off & 7);
}
#ifdef DEBUG_PMAP