diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2003-08-11 20:45:18 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2003-08-11 20:45:18 +0000 |
commit | ba29d03891af8f040301c2d85702dda393c1f77a (patch) | |
tree | d4c3b0e3c0e121a83a2b4a66b36247d81201054d /sys/arch/mvme88k | |
parent | f34975471d43e3c76a7316627f99efa7a86bb592 (diff) |
Sprinkle proper use of _C_LABEL and _ASM_LABEL in the .S files (except for
the _fp.S which are too scary at the moment). This will be necessary to
move to ELF in the future.
Use local symbols whenever possible.
Attempt to use delayed branches whenever possible.
Remove stupid or straightforward comments, some hardcoded values, and a
few unused variables or routines.
Diffstat (limited to 'sys/arch/mvme88k')
-rw-r--r-- | sys/arch/mvme88k/mvme88k/eh.S | 522 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/locore.S | 253 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/locore_asm_routines.S | 725 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/m88100_fp.S | 13 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/m88110_fp.S | 5 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/m88110_mmu.S | 86 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/machdep.c | 8 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/process.S | 204 |
8 files changed, 839 insertions, 977 deletions
diff --git a/sys/arch/mvme88k/mvme88k/eh.S b/sys/arch/mvme88k/mvme88k/eh.S index eea70201cc5..f0637af7eb6 100644 --- a/sys/arch/mvme88k/mvme88k/eh.S +++ b/sys/arch/mvme88k/mvme88k/eh.S @@ -1,4 +1,4 @@ -/* $OpenBSD: eh.S,v 1.30 2003/08/03 23:34:09 miod Exp $ */ +/* $OpenBSD: eh.S,v 1.31 2003/08/11 20:45:17 miod Exp $ */ /* * Mach Operating System * Copyright (c) 1993-1991 Carnegie Mellon University @@ -31,11 +31,10 @@ /* * HISTORY * 1. Should get rid of SR0 reference for thread stuff. - * 2. Make up my mind what is _kstack. I think it should be p->p_addr+UPAGES. + * 2. Make up my mind what is kstack. I think it should be p->p_addr+UPAGES. * (p_addr is pointing to user struct and swapin is making sure it is * updated) - * Whatever is _kstack, its usage in this file should be - * revisited. + * Whatever is kstack, its usage in this file should be revisited. */ /* @@ -214,7 +213,6 @@ #include <machine/board.h> #include <machine/param.h> /* CPU_ and BRD_ defines */ #include <machine/trap.h> /* for T_ defines */ -#include <machine/board.h> /* * The exception frame as defined in "machine/pcb.h" (among other places) is @@ -239,16 +237,16 @@ data align 4 -sbadcpupanic: - string "eh.S: bad cpu number in FLAGS" +ASLOCAL(sbadcpupanic) + string "eh.S: bad cpu number in FLAGS\000" text align 8 -Lbadcpupanic: - or.u r2, r0, hi16(sbadcpupanic) - or r2, r2, lo16(sbadcpupanic) - bsr _C_LABEL(panic) +ASLOCAL(Lbadcpupanic) + or.u r2, r0, hi16(_ASM_LABEL(sbadcpupanic)) + bsr.n _C_LABEL(panic) + or r2, r2, lo16(_ASM_LABEL(sbadcpupanic)) align 8 @@ -267,8 +265,8 @@ Lbadcpupanic: #define SAVE_CTX \ stcr r31, SRX ; \ - or.u r31, r0, hi16(_save_frame) ; \ - or r31, r31, lo16(_save_frame) ; \ + or.u r31, r0, hi16(_ASM_LABEL(save_frame)) ; \ + or r31, r31, lo16(_ASM_LABEL(save_frame)) ; \ /* save old R31 and other R registers */; \ st.d r0 , r31, GENREG_OFF(0) ; \ st.d r2 , r31, GENREG_OFF(2) ; \ @@ -356,28 +354,28 @@ Lbadcpupanic: #define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK) \ xcr FLAGS, FLAGS, SR1 ; \ FLAG_PRECHECK ; \ - /* the bsr later clobbers r1, so save now */ ; \ + /* the bsr later clobbers r1, so save now */ \ stcr r1, SR2 /* r1 now free */ ; \ - /* set or clear the FLAG_FROM_KERNEL bit */ ; \ + /* set or clear the FLAG_FROM_KERNEL bit */ \ ldcr r1, EPSR ; \ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f ; \ clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \ set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \ - /* get a stack (exception frame) */ ; \ -1: bsr setup_phase_one ; \ - /* TMP2 now free -- use to set EF_VECTOR */ ; \ + /* get a stack (exception frame) */ \ +1: bsr _ASM_LABEL(setup_phase_one) ; \ + /* TMP2 now free -- use to set EF_VECTOR */ \ or TMP2, r0, NUM ; \ st TMP2, r31, REG_OFF(EF_VECTOR) ; \ - /* TMP3 also free -- use to set last_vector */ ; \ - or.u TMP3, r0, hi16(_last_vector) ; \ - st TMP2, TMP3, lo16(_last_vector) ; \ - /* Clear any bits in the SSBR (held in TMP) */ ; \ - /* SSBR_STUFF may be empty, though. */ ; \ + /* TMP3 also free -- use to set last_vector */ \ + or.u TMP3, r0, hi16(_C_LABEL(last_vector)) ; \ + st TMP2, TMP3, lo16(_C_LABEL(last_vector)) ; \ + /* Clear any bits in the SSBR (held in TMP) */ \ + /* SSBR_STUFF may be empty, though. */ \ SSBR_STUFF ; \ - /* call setup_phase_two to restart the FPU */ ; \ - /* and to save all general registers. */ ; \ - bsr setup_phase_two ; \ - /* All general regs free -- do any debugging */ ; \ + /* call setup_phase_two to restart the FPU */ \ + /* and to save all general registers. */ \ + bsr _ASM_LABEL(setup_phase_two) ; \ + /* All general regs free -- do any debugging */ \ PREP_DEBUG(BIT, NAME) #endif @@ -394,13 +392,13 @@ Lbadcpupanic: clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \ set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \ /* get a stack (exception frame) */ ; \ -1: bsr m88110_setup_phase_one ; \ +1: bsr _ASM_LABEL(m88110_setup_phase_one) ; \ /* TMP2 now free -- use to set EF_VECTOR */ ; \ or TMP2, r0, NUM ; \ st TMP2, r31, REG_OFF(EF_VECTOR) ; \ /* call setup_phase_two to restart the FPU */ ; \ /* and to save all general registers. */ ; \ - bsr m88110_setup_phase_two ; \ + bsr _ASM_LABEL(m88110_setup_phase_two) ; \ /* All general regs free -- do any debugging */ ; \ PREP_DEBUG(BIT, NAME) #endif @@ -408,12 +406,14 @@ Lbadcpupanic: /* Some defines for use with PREP() */ #define No_SSBR_Stuff /* empty */ #define Clear_SSBR_Dest \ - bsr clear_dest_ssbr_bit + bsr _ASM_LABEL(clear_dest_ssbr_bit) #define No_Precheck /* empty */ #define Data_Precheck \ - bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, ignore_data_exception + bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, \ + _ASM_LABEL(ignore_data_exception) #define M88110_Data_Precheck \ - bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, m88110_ignore_data_exception + bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, \ + _ASM_LABEL(m88110_ignore_data_exception) #ifdef EH_DEBUG /* @@ -425,7 +425,7 @@ Lbadcpupanic: * The bits are defined in "asm.h" */ -GLOBAL(eh_debug) +ASGLOBAL(eh_debug) word 0x00000000 /* @@ -434,24 +434,24 @@ GLOBAL(eh_debug) * need be. */ #define PREP_DEBUG(DebugNumber, Name) \ - or.u r2, r0, hi16(_eh_debug) ; \ - ld r3, r2, lo16(_eh_debug) ; \ - bb0 DebugNumber, r3, 4f ; \ - /* call MY_info(ef,thread,flags,kind)*/ ; \ - or r2, r30, r0 ; \ - ldcr r3, SR0 ; \ - ldcr r4, SR1 ; \ - or.u r5, r0, hi16(2f) ; \ - or r5, r5, lo16(2f) ; \ - bsr.n _MY_info ; \ - subu r31, r31, 40 ; \ - br.n 4f ; \ - addu r31, r31, 40 ; \ - data ; \ -2: string Name ; \ - byte 0 ; \ - align 4 ; \ - text ; \ + or.u r2, r0, hi16(_ASM_LABEL(eh_debug)) ; \ + ld r3, r2, lo16(_ASM_LABEL(eh_debug)) ; \ + bb0 DebugNumber, r3, 4f ; \ + /* call MY_info(ef,thread,flags,kind)*/ \ + or r2, r30, r0 ; \ + ldcr r3, SR0 ; \ + ldcr r4, SR1 ; \ + or.u r5, r0, hi16(2f) ; \ + or r5, r5, lo16(2f) ; \ + bsr.n _C_LABEL(MY_info) ; \ + subu r31, r31, 40 ; \ + br.n 4f ; \ + addu r31, r31, 40 ; \ + data ; \ +2: string Name ; \ + byte 0 ; \ + align 4 ; \ + text ; \ 4: @@ -463,12 +463,12 @@ GLOBAL(eh_debug) * Then, return from the interrupt handler. */ #define DONE(DebugNumber) \ - or.u r2, r0, hi16(_eh_debug) ; \ - ld r3, r2, lo16(_eh_debug) ; \ - bb0 DebugNumber, r3, 2f ; \ - ldcr r4, SR1 ; \ - CALL(_MY_info_done, r31, r4) ; \ -2: br return_from_exception_handler + or.u r2, r0, hi16(_ASM_LABEL(eh_debug)) ; \ + ld r3, r2, lo16(_ASM_LABEL(eh_debug)) ; \ + bb0 DebugNumber, r3, 2f ; \ + ldcr r4, SR1 ; \ + CALL(_C_LABEL(MY_info_done), r31, r4) ; \ +2: br _ASM_LABEL(return_from_exception_handler) #else /* * If not debugging, then no debug-prep to do. @@ -476,7 +476,7 @@ GLOBAL(eh_debug) */ #define PREP_DEBUG(bit, name) #define DONE(num) \ - br return_from_exception_handler + br _ASM_LABEL(return_from_exception_handler) #endif /* EH_DEBUG */ #ifdef M88100 @@ -495,7 +495,6 @@ GLOBAL(interrupt_handler) PREP("interrupt", 1, DEBUG_INTERRUPT_BIT, No_SSBR_Stuff, No_Precheck) /* interrupt_func is set in mvme_bootstrap() */ CALL(_C_LABEL(m88100_trap), T_INT, r30) - /*CALLP(_interrupt_func, 1, r30) */ DONE(DEBUG_INTERRUPT_BIT) /* instruction access exception handler */ @@ -562,32 +561,32 @@ GLOBAL(overflow_handler) /* Floating-point precise handler */ #define FPp_SSBR_STUFF \ - bsr clear_FPp_ssbr_bit + bsr _ASM_LABEL(clear_FPp_ssbr_bit) GLOBAL(fp_precise_handler) PREP("FPU precise", 114, DEBUG_FPp_BIT, FPp_SSBR_STUFF, No_Precheck) - CALL(_m88100_Xfp_precise, r0, r30) + CALL(_ASM_LABEL(m88100_Xfp_precise), r0, r30) DONE(DEBUG_FPp_BIT) /* Floating-point imprecise handler */ #define FPi_SSBR_STUFF \ - bsr clear_FPi_ssbr_bit + bsr _ASM_LABEL(clear_FPi_ssbr_bit) GLOBAL(fp_imprecise_handler) PREP("FPU imprecise", 115, DEBUG_FPi_BIT, FPi_SSBR_STUFF, No_Precheck) - CALL(_Xfp_imprecise, r0, r30) + CALL(_ASM_LABEL(Xfp_imprecise), r0, r30) DONE(DEBUG_FPi_BIT) /* All standard system calls. */ GLOBAL(syscall_handler) PREP("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck) ld r13, r30, GENREG_OFF(13) - CALL(_m88100_syscall, r13, r30) /* system call no. is in r13 */ + CALL(_C_LABEL(m88100_syscall), r13, r30) DONE(DEBUG_SYSCALL_BIT) /* trap 496 comes here */ GLOBAL(bugtrap) PREP("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck) ld r9, r30, GENREG_OFF(9) - CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */ + CALL(_C_LABEL(bugsyscall), r9, r30) DONE(DEBUG_BUGCALL_BIT) GLOBAL(sigsys) @@ -656,12 +655,12 @@ GLOBAL(entry) GLOBAL(error_handler) /* pick up the slavestack */ or r26, r0, r31 /* save old stack */ - or.u r31, r0, hi16(_intstack_end) - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) /* zero the stack, so we'll know what we're lookin' at */ - or.u r27, r0, hi16(_intstack) - or r27, r27, lo16(_intstack) + or.u r27, r0, hi16(_C_LABEL(intstack)) + or r27, r27, lo16(_C_LABEL(intstack)) 1: cmp r28, r27, r31 bb1 ge, r28, 2f /* branch if at the end of the stack */ st r0, r0, r27 @@ -742,8 +741,8 @@ GLOBAL(error_handler) #if 0 /* MVME188 */ #define IST_REG 0xfff84040 /* interrupt status addr */ /* check if it's a mvme188 */ - or.u r10, r0, hi16(_brdtyp) - ld r11, r10, lo16(_brdtyp) + or.u r10, r0, hi16(_C_LABEL(brdtyp)) + ld r11, r10, lo16(_C_LABEL(brdtyp)) cmp r10, r11, BRD_188 bb1 ne, r10, 3f or.u r10, r0, hi16(IST_REG) /* interrupt status register */ @@ -778,8 +777,8 @@ GLOBAL(error_handler) stcr r1, PSR FLUSH_PIPELINE -GLOBAL(error_loop) - bsr _error_loop +ASLOCAL(error_loop) + bsr _ASM_LABEL(error_loop) /* never returns*/ /* @@ -804,12 +803,12 @@ GLOBAL(error_loop) GLOBAL(reset_handler) /* pick up the slavestack */ or r26, r0, r31 /* save old stack */ - or.u r31, r0, hi16(_intstack_end) - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) /* zero the stack, so we'll know what we're lookin' at */ - or.u r27, r0, hi16(_intstack) - or r27, r27, lo16(_intstack) + or.u r27, r0, hi16(_C_LABEL(intstack)) + or r27, r27, lo16(_C_LABEL(intstack)) 1: cmp r28, r27, r31 bb1 ge, r28, 2f /* branch if at the end of the stack */ st r0, r0, r27 @@ -917,14 +916,14 @@ GLOBAL(reset_handler) stcr r1, PSR FLUSH_PIPELINE -GLOBAL(error_loop2) - bsr _error_loop2 +ASLOCAL(error_loop2) + bsr _ASM_LABEL(error_loop2) /* never returns*/ /* * This is part of baddadr (below). */ -ASGLOBAL(ignore_data_exception) +ASLOCAL(ignore_data_exception) /* * SR0: pointer to the current thread structure * SR1: previous FLAGS reg @@ -945,8 +944,8 @@ ASGLOBAL(ignore_data_exception) */ /* the "+2" below is to set the VALID bit. */ - or.u r2, r0, hi16(badaddr__return_nonzero + 2) - or r2, r2, lo16(badaddr__return_nonzero + 2) + or.u r2, r0, hi16(_ASM_LABEL(badaddr__return_nonzero) + 2) + or r2, r2, lo16(_ASM_LABEL(badaddr__return_nonzero) + 2) stcr r2, SNIP /* Make it the next instruction to execute */ addu r2, r2, 4 @@ -961,7 +960,7 @@ ASGLOBAL(ignore_data_exception) /* * This is part of baddadr (below). */ -ASGLOBAL(m88110_ignore_data_exception) +ASLOCAL(m88110_ignore_data_exception) /* * SR0: pointer to the current thread structure * SR1: previous FLAGS reg @@ -981,8 +980,8 @@ ASGLOBAL(m88110_ignore_data_exception) * to blast r2..r9 as we see fit. */ - or.u r2, r0, hi16(m88110_badaddr__return_nonzero) - or r2, r2, lo16(m88110_badaddr__return_nonzero) + or.u r2, r0, hi16(_ASM_LABEL(m88110_badaddr__return_nonzero)) + or r2, r2, lo16(_ASM_LABEL(m88110_badaddr__return_nonzero)) stcr r2, EXIP /* Make it the next instruction to execute */ /* the following jumps to "m88110_badaddr__return_nonzero" below */ @@ -1030,15 +1029,15 @@ GLOBAL(badaddr) * see if it's a halfword..... */ sub r6, r3, 4 - bcnd.n ne0, r6, badaddr__maybe_halfword + bcnd.n ne0, r6, _ASM_LABEL(badaddr__maybe_halfword) stcr r5, SR1 FLUSH_PIPELINE /* * It's a bad address if it's misaligned. */ - bb1 0, r2, badaddr__return_nonzero - bb1 1, r2, badaddr__return_nonzero + bb1 0, r2, _ASM_LABEL(badaddr__return_nonzero) + bb1 1, r2, _ASM_LABEL(badaddr__return_nonzero) /* * The next line will either fault or not. If it faults, execution * will go to: data_access_handler (see above) @@ -1050,44 +1049,41 @@ GLOBAL(badaddr) */ ld r5, r2, 0 FLUSH_PIPELINE - br.n badaddr__return + br.n _ASM_LABEL(badaddr__return) or r2, r0, r0 /* indicate a zero (address not bad) return.*/ -badaddr__maybe_halfword: +ASLOCAL(badaddr__maybe_halfword) /* More or less like the code for checking a word above */ sub r6, r3, 2 - bcnd ne0, r6, badaddr__maybe_byte + bcnd ne0, r6, _ASM_LABEL(badaddr__maybe_byte) /* it's bad if it's misaligned */ - bb1 0, r2, badaddr__return_nonzero + bb1 0, r2, _ASM_LABEL(badaddr__return_nonzero) FLUSH_PIPELINE ld.h r5, r2, 0 FLUSH_PIPELINE - br.n badaddr__return + br.n _ASM_LABEL(badaddr__return) or r2, r0, r0 -badaddr__maybe_byte: +ASLOCAL(badaddr__maybe_byte) /* More or less like the code for checking a word above */ sub r6, r3, 1 - bcnd ne0, r6, badaddr__unknown_size + bcnd ne0, r6, _ASM_LABEL(badaddr__unknown_size) FLUSH_PIPELINE ld.b r5, r2, 0 FLUSH_PIPELINE - br.n badaddr__return + br.n _ASM_LABEL(badaddr__return) or r2, r0, r0 -badaddr__unknown_size: +ASLOCAL(badaddr__unknown_size) #ifdef DEBUG data -1: string "bad length (%d) to badaddr() from 0x%x" +1: string "bad length (%d) to badaddr() from 0x%x\000" text or.u r2, r0, hi16(1b) or r2, r2, lo16(1b) - or r4, r0, r1 - bsr _C_LABEL(printf) - or.u r2, r0, hi16(1b) - or r2, r2, lo16(1b) - bsr _C_LABEL(panic) + bsr.n _C_LABEL(panic) + or r4, r0, r1 /*NOTREACHED*/ #endif @@ -1111,7 +1107,7 @@ ASGLOBAL(badaddr__return) stcr r8, PSR jmp r1 -ASGLOBAL(m88110_badaddr__return_nonzero) +ASLOCAL(m88110_badaddr__return_nonzero) /* * On mc88110, we possibly took an exception * and we have to clear DSR after the rte @@ -1120,12 +1116,12 @@ ASGLOBAL(m88110_badaddr__return_nonzero) */ stcr r0, DSR /* Clear DSR reg on mc88110 */ stcr r0, DLAR /* Clear DLAR reg on mc88110 */ - br.n badaddr__return_nonzero + br.n _ASM_LABEL(badaddr__return_nonzero) stcr r0, DPAR /* Clear DPAR reg on mc88110 */ #ifdef M88100 -ASGLOBAL(setup_phase_one) +ASLOCAL(setup_phase_one) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -1135,8 +1131,8 @@ ASGLOBAL(setup_phase_one) * FLAGS: CPU status flags * * immediate goal: - * Decide where we're going to put the exception frame. - * Might be at the end of R31, SR3, or the thread's pcb. + * Decide where we're going to put the exception frame. + * Might be at the end of R31, SR3, or the thread's pcb. */ /* Check if we are coming in from a FPU restart exception. @@ -1147,12 +1143,11 @@ ASGLOBAL(setup_phase_one) NOP NOP - bb1 FLAG_ENABLING_FPU, FLAGS, use_SR3_pcb + bb1 FLAG_ENABLING_FPU, FLAGS, _ASM_LABEL(use_SR3_pcb) /* are we coming in from user mode? If so, pick up thread pcb */ - bb0 FLAG_FROM_KERNEL, FLAGS, pickup_stack + bb0 FLAG_FROM_KERNEL, FLAGS, _ASM_LABEL(pickup_stack) /* Interrupt in kernel mode, not FPU restart */ -ASGLOBAL(already_on_kernel_stack) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -1173,10 +1168,10 @@ ASGLOBAL(already_on_kernel_stack) st r1, r31, REG_OFF(EF_SR3) addu r1, r31, SIZEOF_EF /* save previous r31 */ - br.n have_pcb + br.n _ASM_LABEL(have_pcb) st r1, r31, GENREG_OFF(31) -ASGLOBAL(use_SR3_pcb) +ASLOCAL(use_SR3_pcb) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -1213,15 +1208,15 @@ ASGLOBAL(use_SR3_pcb) or r31, r0, r30 /* make r31 our pointer. */ addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */ st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */ - br.n have_pcb + br.n _ASM_LABEL(have_pcb) xcr r30, r30, SR3 /* restore r30 */ 1: /* we took an exception while restarting the FPU from user space. * Consequently, we never picked up a stack. Do so now. * R1 is currently free (saved in the exception frame pointed at by * r30) */ - or.u r1, r0, hi16(_kstack) - ld r1, r1, lo16(_kstack) + or.u r1, r0, hi16(_ASM_LABEL(kstack)) + ld r1, r1, lo16(_ASM_LABEL(kstack)) addu r1, r1, USIZE-SIZEOF_EF st FLAGS,r1, REG_OFF(EF_FLAGS) /* store flags */ st r31, r1, GENREG_OFF(31) /* store r31 - now free */ @@ -1230,10 +1225,10 @@ ASGLOBAL(use_SR3_pcb) ld r1, r30, GENREG_OFF(0) /* restore old r1 */ st r0, r30, GENREG_OFF(0) /* repair that frame */ st r1, r31, GENREG_OFF(1) /* store r1 */ - br.n have_pcb + br.n _ASM_LABEL(have_pcb) xcr r30, r30, SR3 /* restore r30 */ -ASGLOBAL(pickup_stack) +ASLOCAL(pickup_stack) /* * SR0: current thread * SR1: saved copy of exception-time register now holding FLAGS @@ -1249,8 +1244,8 @@ ASGLOBAL(pickup_stack) stcr r31, SR3 /* save previous r31 */ /* switch to the thread's kernel stack. */ - or.u r31, r0, hi16(_curpcb) - ld r31, r31, lo16(_curpcb) + or.u r31, r0, hi16(_C_LABEL(curpcb)) + ld r31, r31, lo16(_C_LABEL(curpcb)) addu r31, r31, PCB_USER_STATE /* point to user save area */ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now free) */ @@ -1258,7 +1253,7 @@ ASGLOBAL(pickup_stack) st r1, r31, GENREG_OFF(31) /* FALLTHROUGH */ -ASGLOBAL(have_pcb) +ASLOCAL(have_pcb) /* * SR0: current thread * SR1: saved copy of exception-time register now holding FLAGS @@ -1295,8 +1290,8 @@ ASGLOBAL(have_pcb) */ #ifdef MVME188 /* check if it's a mvme188 */ - or.u TMP, r0, hi16(_brdtyp) - ld TMP2, TMP, lo16(_brdtyp) + or.u TMP, r0, hi16(_C_LABEL(brdtyp)) + ld TMP2, TMP, lo16(_C_LABEL(brdtyp)) cmp TMP, TMP2, BRD_188 bb1 ne, TMP, 5f @@ -1310,7 +1305,7 @@ ASGLOBAL(have_pcb) cmp TMP2, TMP, 0x3 /* CPU3 ? */ bb1 eq, TMP2, 4f /* Arrrrg! bad cpu# */ - br Lbadcpupanic + br _ASM_LABEL(Lbadcpupanic) 1: /* must be CPU0 */ or.u TMP, r0, hi16(VME_CMMU_I0) @@ -1319,7 +1314,7 @@ ASGLOBAL(have_pcb) or.u TMP, r0, hi16(VME_CMMU_D0) ld TMP2, TMP, lo16(VME_CMMU_D0) + 0x108 st TMP2, r31, REG_OFF(EF_DPFSR) - br pfsr_done + br _ASM_LABEL(pfsr_done) 2: /* must be CPU1 */ or.u TMP, r0, hi16(VME_CMMU_I1) @@ -1328,7 +1323,7 @@ ASGLOBAL(have_pcb) or.u TMP, r0, hi16(VME_CMMU_D1) ld TMP2, TMP, lo16(VME_CMMU_D1) + 0x108 st TMP2, r31, REG_OFF(EF_DPFSR) - br pfsr_done + br _ASM_LABEL(pfsr_done) 3: /* must be CPU2 */ or.u TMP, r0, hi16(VME_CMMU_I2) @@ -1337,7 +1332,7 @@ ASGLOBAL(have_pcb) or.u TMP, r0, hi16(VME_CMMU_D2) ld TMP2, TMP, lo16(VME_CMMU_D2) + 0x108 st TMP2, r31, REG_OFF(EF_DPFSR) - br pfsr_done + br _ASM_LABEL(pfsr_done) 4: /* must be CPU3 */ or.u TMP, r0, hi16(VME_CMMU_I3) @@ -1346,7 +1341,7 @@ ASGLOBAL(have_pcb) or.u TMP, r0, hi16(VME_CMMU_D3) ld TMP2, TMP, lo16(VME_CMMU_D3) + 0x108 st TMP2, r31, REG_OFF(EF_DPFSR) - br pfsr_done + br _ASM_LABEL(pfsr_done) 5: #endif /* MVME188 */ /* it's a single processor SBC */ @@ -1357,7 +1352,7 @@ ASGLOBAL(have_pcb) ld TMP2, TMP, lo16(SBC_CMMU_D) + 0x108 st TMP2, r31, REG_OFF(EF_DPFSR) -ASGLOBAL(pfsr_done) +ASLOCAL(pfsr_done) ldcr TMP, SSBR ldcr TMP2, SXIP ldcr TMP3, DMT0 @@ -1386,7 +1381,7 @@ ASGLOBAL(pfsr_done) * the appropriate SSBR bits for the destination registers of * loads or xmems. */ - bb0.n DMT_VALID_BIT, TMP3, DMT_check_finished + bb0.n DMT_VALID_BIT, TMP3, 8f st TMP3, r31, REG_OFF(EF_DMT0) ldcr TMP2, DMT1 @@ -1415,48 +1410,41 @@ ASGLOBAL(pfsr_done) * we restart the FPU */ -ASGLOBAL(check_DMT0) ldcr TMP2, DMT0 - bb0.n DMT_VALID_BIT, TMP2, DMT_check_finished + bb0.n DMT_VALID_BIT, TMP2, 8f /* make sure an exception in fpu_enable will not see our DMT0 */ stcr r0, DMT0 - bb1 DMT_LOCK_BIT, TMP2, do_DMT0 - bb1 DMT_WRITE_BIT, TMP2, check_DMT1 - -ASGLOBAL(do_DMT0) + bb1 DMT_LOCK_BIT, TMP2, 1f + bb1 DMT_WRITE_BIT, TMP2, 2f +1: extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET> set TMP2, TMP2, 1<5> clr TMP, TMP, TMP2 - -ASGLOBAL(check_DMT1) +2: ldcr TMP2, DMT1 - bb0 DMT_VALID_BIT, TMP2, check_DMT2 - bb1 DMT_LOCK_BIT, TMP2, do_DMT1 - bb1 DMT_WRITE_BIT, TMP2, check_DMT2 - -ASGLOBAL(do_DMT1) + bb0 DMT_VALID_BIT, TMP2, 4f + bb1 DMT_LOCK_BIT, TMP2, 3f + bb1 DMT_WRITE_BIT, TMP2, 4f +3: extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET> set TMP2, TMP2, 1<5> clr TMP, TMP, TMP2 - -ASGLOBAL(check_DMT2) +4: ldcr TMP2, DMT2 - bb0 DMT_VALID_BIT, TMP2, DMT_check_finished - bb1 DMT_LOCK_BIT, TMP2, do_DMT2_single - bb1 DMT_WRITE_BIT, TMP2, DMT_check_finished - bb1 DMT_DOUBLE_BIT,TMP2, do_DMT2_double - -ASGLOBAL(do_DMT2_single) + bb0 DMT_VALID_BIT, TMP2, 8f + bb1 DMT_LOCK_BIT, TMP2, 5f + bb1 DMT_WRITE_BIT, TMP2, 8f + bb1 DMT_DOUBLE_BIT,TMP2, 6f +5: extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET> - br.n 1f - set TMP2, TMP2, 1<5> - -ASGLOBAL(do_DMT2_double) + br.n 7f + set TMP2, TMP2, 1<5> /* single */ +6: extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET> - set TMP2, TMP2, 1<6> -1: clr TMP, TMP, TMP2 - -ASGLOBAL(DMT_check_finished) + set TMP2, TMP2, 1<6> /* double */ +7: + clr TMP, TMP, TMP2 +8: /* * SR0: current thread * SR1: saved copy of exception-time register now holding FLAGS @@ -1481,7 +1469,7 @@ ASGLOBAL(DMT_check_finished) #endif /* M88100 */ -ASGLOBAL(clear_FPi_ssbr_bit) +ASLOCAL(clear_FPi_ssbr_bit) /* * Clear floatingpont-imprecise ssbr bits. * Also, save appropriate FPU control registers to the E.F. @@ -1511,16 +1499,15 @@ ASGLOBAL(clear_FPi_ssbr_bit) * 2nd reg of a double result [see section 6.8.5] */ #define FPIT_SIZE_BIT 10 - bb0 FPIT_SIZE_BIT, TMP2, not_double_fpi + bb0 FPIT_SIZE_BIT, TMP2, 1f extu TMP2, TMP2, 5<0> /* get the reg. */ set TMP2, TMP2, 1<6> /* set width */ clr TMP, TMP, TMP2 - -ASGLOBAL(not_double_fpi) +1: jmp r1 -ASGLOBAL(clear_FPp_ssbr_bit) +ASLOCAL(clear_FPp_ssbr_bit) /* * Clear floating pont precise ssbr bits. * Also, save appropriate FPU control registers to the E.F. @@ -1556,12 +1543,14 @@ ASGLOBAL(clear_FPp_ssbr_bit) extu TMP3, TMP2, 5<0> /* get FP operation dest reg */ br.n 3f set TMP3, TMP3, 1<5> /* size=1 - clear one bit for float */ -2: set TMP3, TMP3, 1<6> /* size=2 - clear two bit for double */ -3: clr TMP, TMP, TMP3 /* clear bit(s) in ssbr. */ -4: jmp r1 +2: + set TMP3, TMP3, 1<6> /* size=2 - clear two bit for double */ +3: + clr TMP, TMP, TMP3 /* clear bit(s) in ssbr. */ + jmp r1 -ASGLOBAL(clear_dest_ssbr_bit) +ASLOCAL(clear_dest_ssbr_bit) /* * There are various cases where an exception can leave the * destination register's bit in the SB set. @@ -1593,7 +1582,7 @@ ASGLOBAL(clear_dest_ssbr_bit) bb1.n PSR_SUPERVISOR_MODE_BIT, TMP3, 2f clr TMP2, TMP2, 2<0> /* get rid of valid and error bits. */ -1: /* user space load here */ + /* user space load here */ #if ERRATA__XXX_USR NOP ld.usr TMP2,TMP2, r0 /* get the instruction itself */ @@ -1632,36 +1621,36 @@ ASGLOBAL(clear_dest_ssbr_bit) extu TMP3, TMP2, 16<16> /* get the upper 16 bits */ mask TMP3, TMP3, 0xFC00 /* apply the mask */ cmp TMP3, TMP3, 0x1000 /* if equal, it's a load double */ - bb1 eq, TMP3, misaligned_double + bb1 eq, TMP3, 2f /* still could be -- check the second pattern for ld.d */ /* look at the upper 16 bits first */ extu TMP3, TMP2, 16<16> /* get the upper 16 bits */ mask TMP3, TMP3, 0xFC00 /* apply the mask */ cmp TMP3, TMP3, 0xF400 /* if equal, might be a load double */ - bb1 ne, TMP3, misaligned_single /* not equal, must be single */ + bb1 ne, TMP3, 1f /* not equal, must be single */ /* now look at the lower 16 bits */ extu TMP3, TMP2, 16<0> /* get the lower 16 bits */ mask TMP3, TMP3, 0xFCE0 /* apply the mask */ cmp TMP3, TMP3, 0x1000 /* if equal, it's a load double */ - bb1 eq, TMP3, misaligned_double + bb1 eq, TMP3, 2f -ASGLOBAL(misaligned_single) +1: /* misaligned single */ extu TMP2, TMP2, 5<21> /* get the destination register */ - br.n 1f + br.n 3f set TMP2, TMP2, 1<5> /* set size=1 */ -ASGLOBAL(misaligned_double) +2: /* misaligned double */ extu TMP2, TMP2, 5<21> /* get the destination register */ set TMP2, TMP2, 1<6> /* set size=2 -- clear two bits */ - -1: jmp.n r1 +3: + jmp.n r1 clr TMP, TMP, TMP2 /* clear bit(s) in ssbr. */ #ifdef M88100 -ASGLOBAL(setup_phase_two) +ASLOCAL(setup_phase_two) /* * SR0: saved return address to calling exception handler * SR1: saved copy of exception-time register now holding FLAGS @@ -1701,8 +1690,8 @@ ASGLOBAL(setup_phase_two) stcr TMP, EPSR /* the "+2" below is to set the VALID_BIT */ - or.u TMP, r0, hi16(fpu_enable +2) - or TMP, TMP, lo16(fpu_enable +2) + or.u TMP, r0, hi16(_ASM_LABEL(fpu_enable) + 2) + or TMP, TMP, lo16(_ASM_LABEL(fpu_enable) + 2) stcr TMP, SNIP /* jump to here fpu_enable */ addu TMP, TMP, 4 stcr TMP, SFIP /* and then continue after that */ @@ -1713,13 +1702,12 @@ ASGLOBAL(setup_phase_two) stcr TMP, EPSR stcr r0, SXIP /* clear valid bit */ stcr r0, SNIP /* clear valid bit */ - or.u TMP, r0, hi16(fpu_enable) - or TMP, TMP, lo16(fpu_enable) + or.u TMP, r0, hi16(_ASM_LABEL(fpu_enable)) + or TMP, TMP, lo16(_ASM_LABEL(fpu_enable)) or TMP, TMP, 0x2 /* set the VALID_BIT and clear Exception bit */ stcr TMP, SFIP /* jump to here fpu_enable */ #endif -setup_phase_two_cont: set FLAGS, FLAGS, 1<FLAG_ENABLING_FPU> xcr FLAGS, FLAGS, SR1 st r1, r31, REG_OFF(EF_RET) /* save the return address */ @@ -1754,7 +1742,7 @@ setup_phase_two_cont: RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */ -ASGLOBAL(fpu_enable) +ASLOCAL(fpu_enable) FLUSH_PIPELINE xcr TMP, TMP, SR3 /* get E.F. pointer */ st.d r30, TMP, GENREG_OFF(30) /* save previous r30, r31 */ @@ -1827,27 +1815,25 @@ ASGLOBAL(fpu_enable) * If it's not the interrupt exception, enable interrupts and * take care of any data access exceptions...... */ -#ifdef INTSTACK - /* - * If interrupt exception, switch to interrupt stack if not - * already there. Else, switch to kernel stack. - */ -#endif or r30, r0, r31 /* get a copy of the e.f. pointer */ ld r2, r31, REG_OFF(EF_EPSR) bb1 PSR_SUPERVISOR_MODE_BIT, r2, 1f /* if in kernel mode */ #ifdef INTSTACK + /* + * If interrupt exception, switch to interrupt stack if not + * already there. Else, switch to kernel stack. + */ ld r3, r31, REG_OFF(EF_VECTOR) cmp r3, r3, 1 /* is interrupt ? */ bb0 eq, r3, 2f - or.u r31, r0, hi16(_intstack_end) /* switch to int stack */ - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) br 3f 2: #endif - or.u r31, r0, hi16(_kstack) - ld r31, r31, lo16(_kstack) + or.u r31, r0, hi16(_ASM_LABEL(kstack)) + ld r31, r31, lo16(_ASM_LABEL(kstack)) addu r31, r31, USIZE /* point at proper end */ br 3f 1: @@ -1855,13 +1841,12 @@ ASGLOBAL(fpu_enable) ld r3, r31, REG_OFF(EF_VECTOR) cmp r3, r3, 1 /* is interrupt ? */ bb0 eq, r3, 3f /* no, we will stay on kern stack */ - or.u r31, r0, hi16(_intstack_end) /* switch to int stack */ - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) #endif - /* This label is here for debugging */ -exception_handler_has_ksp: -global exception_handler_has_ksp -3: /* + +3: + /* * here - r30 holds a pointer to the exception frame. * r31 is a pointer to the kernel stack/interrupt stack. */ @@ -1872,23 +1857,23 @@ global exception_handler_has_ksp #endif /* DDB */ ld r2, r30, REG_OFF(EF_VECTOR) - bcnd.n eq0, r2, return_to_calling_exception_handler /* is error */ + bcnd.n eq0, r2, 8f /* is error */ ld r14, r30, REG_OFF(EF_RET) - cmp r3, r2, 1 /* is an interrupt? */ - bb1.n eq, r3, return_to_calling_exception_handler /* skip if so */ + cmp r3, r2, 1 /* is an interrupt? */ + bb1.n eq, r3, 8f /* skip if so */ #ifdef DDB cmp r3, r2, 130 /* DDB break exception */ - bb1.n eq, r3, return_to_calling_exception_handler + bb1.n eq, r3, 8f cmp r3, r2, 132 /* DDB entry exception */ - bb1.n eq, r3, return_to_calling_exception_handler + bb1.n eq, r3, 8f #endif /* enable interrupts */ ldcr r2, PSR clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> stcr r2, PSR -#ifdef DDB +#ifdef DDB FLUSH_PIPELINE #endif @@ -1896,7 +1881,7 @@ global exception_handler_has_ksp - check dmt0 anything outstanding? */ ld r3, r30, REG_OFF(EF_DMT0) - bb0 DMT_VALID_BIT, r3, return_to_calling_exception_handler + bb0 DMT_VALID_BIT, r3, 8f /* * r30 can be clobbered by calls. So stuff its value into a preserved @@ -1905,7 +1890,7 @@ global exception_handler_has_ksp or r15, r0, r30 CALL(_C_LABEL(m88100_trap), T_DATAFLT, r15) - CALL(_data_access_emulation, r15, r0) + CALL(_C_LABEL(data_access_emulation), r15, r0) /* restore it... */ or r30, r0, r15 @@ -1913,7 +1898,7 @@ global exception_handler_has_ksp /* clear the dmt0 word in the E.F */ st r0, r30, REG_OFF(EF_DMT0) -ASGLOBAL(return_to_calling_exception_handler) +8: jmp r14 /* loaded above */ #endif /* M88100 */ @@ -1936,7 +1921,6 @@ ENTRY(proc_trampoline) ld r2, r31,4 /* & proc pointer from switch frame */ jsr.n r1 addu r31,r31,8 - bsr _C_LABEL(panic) /* * proc_do_uret @@ -1950,9 +1934,9 @@ ENTRY(proc_do_uret) ld r3,r2,P_ADDR /* p->p_addr */ addu r3,r3,PCB_USER_STATE /* p->p_addr.u_pcb.user_state */ st r3,r31,0 /* put it on the stack */ - br return_from_exception_handler + br _ASM_LABEL(return_from_exception_handler) -ASGLOBAL(return_from_exception_handler) +ASLOCAL(return_from_exception_handler) /* * Regs r1-r30 are free. R31 is pointing at the word * on the kernel stack where our pointer to the exception frame @@ -1973,13 +1957,13 @@ ASGLOBAL(return_from_exception_handler) or.u r2, r0, hi16(_C_LABEL(cputyp)) ld r3, r2, lo16(_C_LABEL(cputyp)) cmp r2, r3, CPU_88110 - bb1 eq, r2, m88110_return_code + bb1 eq, r2, _ASM_LABEL(m88110_return_code) #endif #ifdef M88100 #define FPTR r14 ld FPTR, r31, 0 /* grab exception frame pointer */ ld r3, FPTR, REG_OFF(EF_DMT0) - bb0 DMT_VALID_BIT, r3, check_ast + bb0 DMT_VALID_BIT, r3, _ASM_LABEL(check_ast) #if 1 /* @@ -2035,7 +2019,7 @@ ASGLOBAL(return_from_exception_handler) /* clear the dmt0 word in the E.F. */ st r0 , FPTR, REG_OFF(EF_DMT0) 2: - br check_ast + br _ASM_LABEL(check_ast) #endif /* M88100 */ #ifdef M88110 @@ -2293,7 +2277,7 @@ GLOBAL(m88110_overflow_handler) /* Floating-point precise handler */ GLOBAL(m88110_fp_precise_handler) PREP2("FPU precise", 114, DEBUG_FPp_BIT, No_SSBR_Stuff, No_Precheck) - CALL(_m88110_Xfp_precise, r0, r30) + CALL(_ASM_LABEL(m88110_Xfp_precise), r0, r30) DONE(DEBUG_FPp_BIT) /* MVME197 non-maskable interrupt handler (ABORT button) */ @@ -2324,14 +2308,14 @@ GLOBAL(m88110_inst_atc_miss) GLOBAL(m88110_syscall_handler) PREP2("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck) ld r13, r30, GENREG_OFF(13) - CALL(_m88110_syscall, r13, r30) /* system call no. is in r13 */ + CALL(_C_LABEL(m88110_syscall), r13, r30) DONE(DEBUG_SYSCALL_BIT) /* trap 496 comes here */ GLOBAL(m88110_bugtrap) PREP2("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck) ld r9, r30, GENREG_OFF(9) - CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */ + CALL(_C_LABEL(bugsyscall), r9, r30) DONE(DEBUG_BUGCALL_BIT) GLOBAL(m88110_sigsys) @@ -2413,9 +2397,9 @@ GLOBAL(m88110_error_handler) * Upon a real reset, VBR is set to zero (0), so code must be at addr 0 * to handle it!!! * - * This is totaly different than _error_handler. Shadowing might or + * This is totaly different than error_handler. Shadowing might or * might not be on. - * R1-R31 could tell u alot about what happend, so we'll save them. + * R1-R31 could tell you alot about what happened, so we'll save them. * * We'll not worry about trashing r26-29 here, * since they aren't generally used. @@ -2426,12 +2410,12 @@ GLOBAL(m88110_reset_handler) GLOBAL(m88110_fatal) /* pick up the slavestack */ or r26, r0, r31 /* save old stack */ - or.u r31, r0, hi16(_intstack_end) - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) /* zero the stack, so we'll know what we're lookin' at */ - or.u r27, r0, hi16(_intstack) - or r27, r27, lo16(_intstack) + or.u r27, r0, hi16(_C_LABEL(intstack)) + or r27, r27, lo16(_C_LABEL(intstack)) 1: cmp r28, r27, r31 bb1 ge, r28, 2f /* branch if at the end of the stack */ st r0, r0, r27 @@ -2493,8 +2477,8 @@ GLOBAL(m88110_fatal) 1: /* retrieve saved shadow registers for error_handler, though) */ - or.u r30, r0, hi16(_save_frame) - or r30, r30, lo16(_save_frame) + or.u r30, r0, hi16(_ASM_LABEL(save_frame)) + or r30, r30, lo16(_ASM_LABEL(save_frame)) ld r10, r30, REG_OFF(EF_EPSR) st r10, r31, REG_OFF(EF_EPSR) ld r10, r30, REG_OFF(EF_EXIP) @@ -2550,7 +2534,7 @@ GLOBAL(m88110_fatal) st r20, r31, 0x04 st r20, r31, 0x00 - CALL(_error_fatal, r30, r30) + CALL(_C_LABEL(error_fatal), r30, r30) /* turn interupts back on */ ldcr r1, PSR @@ -2558,11 +2542,11 @@ GLOBAL(m88110_fatal) stcr r1, PSR FLUSH_PIPELINE -ASGLOBAL(m88110_error_loop) - bsr m88110_error_loop +ASLOCAL(m88110_error_loop) + bsr _ASM_LABEL(m88110_error_loop) /* never returns*/ -ASGLOBAL(m88110_setup_phase_one) +ASLOCAL(m88110_setup_phase_one) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -2584,12 +2568,11 @@ ASGLOBAL(m88110_setup_phase_one) NOP NOP - bb1 FLAG_ENABLING_FPU, FLAGS, m88110_use_SR3_pcb + bb1 FLAG_ENABLING_FPU, FLAGS, _ASM_LABEL(m88110_use_SR3_pcb) /* are we coming in from user mode? If so, pick up process pcb */ - bb0 FLAG_FROM_KERNEL, FLAGS, m88110_pickup_stack + bb0 FLAG_FROM_KERNEL, FLAGS, _ASM_LABEL(m88110_pickup_stack) /* Interrupt in kernel mode, not FPU restart */ -ASGLOBAL(m88110_already_on_kernel_stack) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -2610,10 +2593,10 @@ ASGLOBAL(m88110_already_on_kernel_stack) st r1, r31, REG_OFF(EF_SR3) addu r1, r31, SIZEOF_EF /* save previous r31 */ - br.n m88110_have_pcb + br.n _ASM_LABEL(m88110_have_pcb) st r1, r31, GENREG_OFF(31) -ASGLOBAL(m88110_use_SR3_pcb) +ASLOCAL(m88110_use_SR3_pcb) /* * SR0: current thread (if any, null if not) * SR1: saved copy of exception-time register now holding FLAGS @@ -2655,7 +2638,7 @@ ASGLOBAL(m88110_use_SR3_pcb) or r31, r0, r30 /* make r31 our pointer. */ addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */ st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */ - br.n m88110_have_pcb + br.n _ASM_LABEL(m88110_have_pcb) xcr r30, r30, SR3 /* restore r30 */ 1: /* @@ -2664,8 +2647,8 @@ ASGLOBAL(m88110_use_SR3_pcb) * R1 is currently free (saved in the exception frame pointed at by * r30) */ - or.u r1, r0, hi16(_kstack) - ld r1, r1, lo16(_kstack) + or.u r1, r0, hi16(_ASM_LABEL(kstack)) + ld r1, r1, lo16(_ASM_LABEL(kstack)) addu r1, r1, USIZE-SIZEOF_EF st FLAGS,r1, REG_OFF(EF_FLAGS) /* store flags */ st r31, r1, GENREG_OFF(31) /* store r31 - now free */ @@ -2674,10 +2657,10 @@ ASGLOBAL(m88110_use_SR3_pcb) ld r1, r30, GENREG_OFF(0) /* restore old r1 */ st r0, r30, GENREG_OFF(0) /* repair that frame */ st r1, r31, GENREG_OFF(1) /* store r1 */ - br.n m88110_have_pcb + br.n _ASM_LABEL(m88110_have_pcb) xcr r30, r30, SR3 /* restore r30 */ -ASGLOBAL(m88110_pickup_stack) +ASLOCAL(m88110_pickup_stack) /* * SR0: current thread * SR1: saved copy of exception-time register now holding FLAGS @@ -2693,8 +2676,8 @@ ASGLOBAL(m88110_pickup_stack) stcr r31, SR3 /* save previous r31 */ /* switch to the thread's kernel stack. */ - or.u r31, r0, hi16(_curpcb) - ld r31, r31, lo16(_curpcb) + or.u r31, r0, hi16(_C_LABEL(curpcb)) + ld r31, r31, lo16(_C_LABEL(curpcb)) addu r31, r31, PCB_USER_STATE /* point to user save area */ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now free)*/ @@ -2702,7 +2685,7 @@ ASGLOBAL(m88110_pickup_stack) st r1, r31, GENREG_OFF(31) /* FALLTHROUGH */ -ASGLOBAL(m88110_have_pcb) +ASLOCAL(m88110_have_pcb) /* * SR0: current thread * SR1: saved copy of exception-time register now holding FLAGS @@ -2769,7 +2752,7 @@ ASGLOBAL(m88110_have_pcb) ldcr r1, SR2 jmp r1 -ASGLOBAL(m88110_setup_phase_two) +ASLOCAL(m88110_setup_phase_two) /* * SR0: saved return address to calling exception handler * SR1: saved copy of exception-time register now holding FLAGS @@ -2805,9 +2788,9 @@ ASGLOBAL(m88110_setup_phase_two) clr TMP, TMP, 1<PSR_SHADOW_FREEZE_BIT> /* and shadowing */ stcr TMP, EPSR - or.u TMP, r0, hi16(m88110_fpu_enable) - or TMP, TMP, lo16(m88110_fpu_enable) - stcr TMP, EXIP /* jump to here fpu_enable */ + or.u TMP, r0, hi16(_ASM_LABEL(m88110_fpu_enable)) + or TMP, TMP, lo16(_ASM_LABEL(m88110_fpu_enable)) + stcr TMP, EXIP /* jump to here m88110_fpu_enable */ addu TMP, TMP, 4 stcr TMP, ENIP /* and then continue after that */ @@ -2839,14 +2822,14 @@ ASGLOBAL(m88110_setup_phase_two) * Return address to the calling excption handler. * * immediate goal: - * Do an RTE to restart the fpu and jump to "fpu_enable" + * Do an RTE to restart the fpu and jump to "m88110_fpu_enable" * Another exception (or exceptions) may be raised in * this, which is why FLAG_ENABLING_FPU is set in SR1. */ NOP - RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */ + RTE /* jumps to "m88110_fpu_enable" on the next line to enable the FPU. */ -ASGLOBAL(m88110_fpu_enable) +ASLOCAL(m88110_fpu_enable) FLUSH_PIPELINE /* Now we can handle another exception!!! */ /* Now that EFZE is cleared, we can clear these */ @@ -2935,13 +2918,13 @@ ASGLOBAL(m88110_fpu_enable) ld r3, r31, REG_OFF(EF_VECTOR) cmp r3, r3, 1 /* is interrupt ? */ bb0 eq, r3, 2f - or.u r31, r0, hi16(_intstack_end) /* switch to int stack */ - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) br 3f 2: #endif - or.u r31, r0, hi16(_kstack) - ld r31, r31, lo16(_kstack) + or.u r31, r0, hi16(_ASM_LABEL(kstack)) + ld r31, r31, lo16(_ASM_LABEL(kstack)) addu r31, r31, USIZE /* point at proper end */ br 3f 1: @@ -2949,12 +2932,10 @@ ASGLOBAL(m88110_fpu_enable) ld r3, r31, REG_OFF(EF_VECTOR) cmp r3, r3, 1 /* is interrupt ? */ bb0 eq, r3, 3f /* no, we will stay on kern stack */ - or.u r31, r0, hi16(_intstack_end) /* switch to int stack */ - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) #endif - /* This label is here for debugging */ -m88110_exception_handler_has_ksp: -global m88110_exception_handler_has_ksp + 3: /* * here - r30 holds a pointer to the exception frame. * r31 is a pointer to the kernel stack/interrupt stack. @@ -2966,35 +2947,35 @@ global m88110_exception_handler_has_ksp #endif /* DDB */ ld r2, r30, REG_OFF(EF_VECTOR) - bcnd.n eq0, r2, m88110_return_to_calling_exception_handler + bcnd.n eq0, r2, 8f ld r14, r30, REG_OFF(EF_RET) /* load return value XXX!!! */ cmp r3, r2, 1 /* is an interrupt? */ - bb1.n eq, r3, m88110_return_to_calling_exception_handler + bb1.n eq, r3, 8f #ifdef DDB cmp r3, r2, 130 /* DDB break exception */ - bb1.n eq, r3, m88110_return_to_calling_exception_handler + bb1.n eq, r3, 8f cmp r3, r2, 132 /* DDB entry exception */ - bb1.n eq, r3, m88110_return_to_calling_exception_handler + bb1.n eq, r3, 8f #endif /* enable interrupts */ ldcr r2, PSR clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> stcr r2, PSR -#ifdef DDB +#ifdef DDB FLUSH_PIPELINE #endif -#if 1 /* test */ - br m88110_return_to_calling_exception_handler -#endif +#if 1 /* XXX test */ + br 8f +#else /* service any outstanding data pipeline stuff - check dsr... anything outstanding? */ ld r3, r30, REG_OFF(EF_DSR) cmp r3, r3, 0 - bb1 eq, r3, m88110_return_to_calling_exception_handler + bb1 eq, r3, 8f /* * r30 can be clobbered by calls. So stuff its value into a @@ -3009,11 +2990,12 @@ global m88110_exception_handler_has_ksp /* clear the dsr word in the E.F */ st r0, r30, REG_OFF(EF_DSR) -ASGLOBAL(m88110_return_to_calling_exception_handler) +#endif +8: jmp r14 /* loaded above */ data .align 8 -GLOBAL(save_frame) +ASLOCAL(save_frame) space SIZEOF_EF #endif /* M88110 */ diff --git a/sys/arch/mvme88k/mvme88k/locore.S b/sys/arch/mvme88k/mvme88k/locore.S index de7e92d55e4..78a03963c13 100644 --- a/sys/arch/mvme88k/mvme88k/locore.S +++ b/sys/arch/mvme88k/mvme88k/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.27 2003/08/03 23:34:09 miod Exp $ */ +/* $OpenBSD: locore.S,v 1.28 2003/08/11 20:45:17 miod Exp $ */ /* * Copyright (c) 1998 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -96,9 +96,9 @@ GLOBAL(doboot) cmp r4, r3, BRD_188 bb1 ne, r4, 1f bsr _C_LABEL(m188_reset) - br m188_doboot_fail -#endif /* MVME188 */ + br 8f 1: +#endif /* MVME188 */ or.u r3,r0, 0xfff4 ld r4,r3, 0x0060 /* read offset (LCSR +0x60) */ set r4,r4,1<23> /* set SYSRST bit - bit 23 */ @@ -115,9 +115,9 @@ GLOBAL(doboot) */ /* Should we use idle_u instead? XXX nivas */ -m188_doboot_fail: - or.u r31, r0, hi16(_intstack_end) - or r31, r31, lo16(_intstack_end) +8: + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) clr r31, r31, 3<0> /* round down to 8-byte boundary */ bsr _C_LABEL(_doboot) @@ -139,33 +139,33 @@ GLOBAL(start_text) * (*entry)(flag, bugargs.ctrl_addr, cp, kernel.smini,kernel.emini, * bootdev, brdtyp); */ - or.u r13, r0, hi16(_boothowto) - st r2, r13, lo16(_boothowto) - or.u r13, r0, hi16(_bootaddr) - st r3, r13, lo16(_bootaddr) - or.u r13, r0, hi16(_first_addr) - st r4, r13, lo16(_first_addr) + or.u r13, r0, hi16(_C_LABEL(boothowto)) + st r2, r13, lo16(_C_LABEL(boothowto)) + or.u r13, r0, hi16(_C_LABEL(bootaddr)) + st r3, r13, lo16(_C_LABEL(bootaddr)) + or.u r13, r0, hi16(_C_LABEL(first_addr)) + st r4, r13, lo16(_C_LABEL(first_addr)) #if defined(DDB) || NKSYMS > 0 - or.u r13, r0, hi16(_esym) - st r4, r13, lo16(_esym) + or.u r13, r0, hi16(_C_LABEL(esym)) + st r4, r13, lo16(_C_LABEL(esym)) #endif - or.u r13, r0, hi16(_bootdev) - st r7, r13, lo16(_bootdev) - or.u r13, r0, hi16(_brdtyp) - st r8, r13, lo16(_brdtyp) + or.u r13, r0, hi16(_C_LABEL(bootdev)) + st r7, r13, lo16(_C_LABEL(bootdev)) + or.u r13, r0, hi16(_C_LABEL(brdtyp)) + st r8, r13, lo16(_C_LABEL(brdtyp)) /* set _cputyp */ cmp r2, r8, BRD_197 /* r8 contains brdtyp */ bb1 ne, r2, 1f /* if it's a '197, CPU is 88110 */ or.u r13, r0, hi16(CPU_88110) - or r8, r13, lo16(CPU_88110) /* r8 contains 0x110 */ + or r8, r13, lo16(CPU_88110) br 2f 1: or.u r13, r0, hi16(CPU_88100) - or r8, r13, lo16(CPU_88100) /* r8 contains 0x100 */ + or r8, r13, lo16(CPU_88100) 2: - or.u r13, r0, hi16(_cputyp) - st r8, r13, lo16(_cputyp) /* r8 contains cputyp */ + or.u r13, r0, hi16(_C_LABEL(cputyp)) + st r8, r13, lo16(_C_LABEL(cputyp)) /* * CPU Initialization @@ -218,72 +218,69 @@ GLOBAL(start_text) stcr r0, VBR /* set Vector Base Register to 0, ALWAYS! */ FLUSH_PIPELINE cmp r2, r8, CPU_88110 /* r8 contains cputyp */ - bb1 eq, r2, master_start /* if it's a '197, skip */ + bb1 eq, r2, _ASM_LABEL(master_start) /* if it's a '197, skip */ #if 0 /* clear BSS. Boot loader might have already done this... */ - or.u r2, r0, hi16(_edata) - or r2, r2, lo16(_edata) - or.u r4, r0, hi16(_end) - or r4, r4, lo16(_end) + or.u r2, r0, hi16(_C_LABEL(edata)) + or r2, r2, lo16(_C_LABEL(edata)) + or.u r4, r0, hi16(_C_LABEL(end)) + or r4, r4, lo16(_C_LABEL(end)) bsr.n _bzero /* bzero(edata, end-edata) */ subu r3, r4, r2 #endif /* * First time to count how many CPUs to attach */ - or.u r11, r0, hi16(initialized_cpu_lock) - or r11, r11, lo16(initialized_cpu_lock) -ASGLOBAL(check_init_lock) + or.u r11, r0, hi16(_ASM_LABEL(initialized_cpu_lock)) + or r11, r11, lo16(_ASM_LABEL(initialized_cpu_lock)) +1: FLUSH_PIPELINE or r22, r0, 1 - xmem r22, r11, r0 /* If r22 gets 0, we have the lock.. */ - bcnd eq0, r22, have_init_lock /* ..but if not, we must wait */ - -ASGLOBAL(wait_for_init_lock) + xmem r22, r11, r0 /* If r22 gets 0, we have the lock.. */ + bcnd eq0, r22, 3f /* ..but if not, we must wait */ +2: /* just watch the lock until it looks clear */ ld r22, r11, r0 - bcnd eq0, r22, check_init_lock - br wait_for_init_lock /* looks clear -- try to grab */ - -ASGLOBAL(have_init_lock) + bcnd eq0, r22, 1b + br 2b /* looks clear -- try to grab */ +3: FLUSH_PIPELINE - or.u r11, r0, hi16(_initialized_cpus) - ld r22, r11, lo16(_initialized_cpus) + or.u r11, r0, hi16(_ASM_LABEL(initialized_cpus)) + ld r22, r11, lo16(_ASM_LABEL(initialized_cpus)) add r23, r22, 1 - st r23, r11, lo16(_initialized_cpus) + st r23, r11, lo16(_ASM_LABEL(initialized_cpus)) - or.u r11, r0, hi16(initialized_cpu_lock) - st r0, r11, lo16(initialized_cpu_lock) + or.u r11, r0, hi16(_ASM_LABEL(initialized_cpu_lock)) + st r0, r11, lo16(_ASM_LABEL(initialized_cpu_lock)) /* * Now we view with any other processors to see who's the master. * We first try to obtain a lock to see who's allowed * to check/set the master lock. */ - or.u r11, r0, hi16(_inter_processor_lock) - or r11, r11, lo16(_inter_processor_lock) -ASGLOBAL(check_ip_lock) + or.u r11, r0, hi16(_ASM_LABEL(inter_processor_lock)) + or r11, r11, lo16(_ASM_LABEL(inter_processor_lock)) +1: FLUSH_PIPELINE or r22, r0, 1 xmem r22, r11, r0 /* If r22 gets 0, we have the lock.. */ - bcnd eq0, r22, have_ip_lock /* ..but if not, we must wait */ -ASGLOBAL(wait_for_ip_lock) + bcnd eq0, r22, 4f /* ..but if not, we must wait */ +2: /* just watch the lock until it looks clear */ ld r22, r11, r0 - bcnd ne0, r22, wait_for_ip_lock + bcnd ne0, r22, 2b /* since we can be here with caches off, add a few nops to keep the bus from getting overloaded */ or r2, r0, lo16(1000) -ASGLOBAL(ip_loop) +3: subu r2, r2, 1 - bcnd eq0, r2, ip_loop - br check_ip_lock /* looks clear -- try to grab */ - -ASGLOBAL(have_ip_lock) + bcnd eq0, r2, 3b + br 1b /* looks clear -- try to grab */ +4: /* now try to grab the master_processor_chosen prize */ FLUSH_PIPELINE - or.u r11, r0, hi16(master_processor_chosen) - or r11, r11, lo16(master_processor_chosen) + or.u r11, r0, hi16(_ASM_LABEL(master_processor_chosen)) + or r11, r11, lo16(_ASM_LABEL(master_processor_chosen)) or r22, r0, 1 xmem r22, r11, r0 @@ -296,56 +293,53 @@ ASGLOBAL(have_ip_lock) * (if we're the master, we'll do that in master_start below. * if we're a slave, we'll do it in slave_start below). */ - bcnd ne0, r22, slave_start + bcnd ne0, r22, _ASM_LABEL(slave_start) /* fall through to master start if that's appropriate */ -ASGLOBAL(master_start) +ASLOCAL(master_start) /* * Switch to interrupt stack * Use idle_u's stack instead? */ - or.u r31, r0, hi16(_intstack_end) - or r31, r31, lo16(_intstack_end) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31, r31, lo16(_ASM_LABEL(intstack_end)) clr r31, r31, 3<0> /* round down to 8-byte boundary */ #ifdef M88110 +#ifdef M88100 cmp r2, r8, CPU_88110 /* r8 contains cputyp */ bb1 ne, r2, 1f /* if it's a 'mc88110, use different vectors */ - or.u r3, r0, hi16(_m88110_vector_list) - or r3, r3, lo16(_m88110_vector_list) - bsr.n _vector_init - ldcr r2, VBR - br 2f -#endif /* M88110 */ +#endif + or.u r3, r0, hi16(_C_LABEL(m88110_vector_list)) + br.n 2f + or r3, r3, lo16(_C_LABEL(m88110_vector_list)) 1: +#endif /* M88110 */ #ifdef M88100 - /* - * Want to make the call: - * vector_init(VBR, vector_list) - */ - or.u r3, r0, hi16(_vector_list) - or r3, r3, lo16(_vector_list) - bsr.n _vector_init - ldcr r2, VBR + or.u r3, r0, hi16(_C_LABEL(vector_list)) + or r3, r3, lo16(_C_LABEL(vector_list)) #endif /* M88100 */ 2: + bsr.n _C_LABEL(vector_init) + ldcr r2, VBR + /* still on int stack */ - bsr.n _mvme_bootstrap + bsr.n _C_LABEL(mvme_bootstrap) subu r31, r31, 40 addu r31, r31, 40 /* * we now know our cpu number, so we - * can set interrupt_stack[cpu_number()] = _intstack + * can set interrupt_stack[cpu_number()] = intstack */ ldcr r10, SR1 extu r10, r10, FLAG_CPU_FIELD_WIDTH<0> /* r10 <-- CPU# */ /* figure interrupt_stack[cpu_number()] */ - or.u r11, r0, hi16(_interrupt_stack) - or r11, r11, lo16(_interrupt_stack) - or.u r12, r0, hi16(_intstack) - or r12, r12, lo16(_intstack) + or.u r11, r0, hi16(_C_LABEL(interrupt_stack)) + or r11, r11, lo16(_C_LABEL(interrupt_stack)) + or.u r12, r0, hi16(_C_LABEL(intstack)) + or r12, r12, lo16(_C_LABEL(intstack)) st r12, r11 [r10] /* switch to proc0 uarea */ @@ -358,14 +352,14 @@ ASGLOBAL(master_start) or r2, r2,lo16(UADDR) addu r2, r2, USIZE - 8 subu r31, r31, 40 - bsr _main + bsr _C_LABEL(main) addu r31, r31, 40 - bsr _panic + bsr _C_LABEL(panic) /* * slave CPUs starts here */ -ASGLOBAL(slave_start) +ASLOCAL(slave_start) /* * While holding the inter_processor_lock, the slave cpu can use * the slavestack to call slave_pre_main and determine its cpu number. @@ -374,14 +368,14 @@ ASGLOBAL(slave_start) */ /* r31 <-- slavestack */ - or.u r31, r0, hi16(_slavestack_end) - or r31, r31, lo16(_slavestack_end) + or.u r31, r0, hi16(_ASM_LABEL(slavestack_end)) + or r31, r31, lo16(_ASM_LABEL(slavestack_end)) clr r31, r31, 3<0> /* round down to 8-byte boundary */ - bsr.n _slave_pre_main /* set cpu number */ - subu r31, r31, 48 /* allocate frame */ + bsr.n _C_LABEL(slave_pre_main) /* set cpu number */ + subu r31, r31, 48 /* allocate frame */ - bsr _get_slave_stack + bsr _C_LABEL(get_slave_stack) addu r31, r2, INTSTACK_SIZE + 4096 /* @@ -390,15 +384,15 @@ ASGLOBAL(slave_start) * We also have an interrupt stack */ - or.u r10, r0, hi16(_inter_processor_lock) - st r0, r10, lo16(_inter_processor_lock) + or.u r10, r0, hi16(_ASM_LABEL(inter_processor_lock)) + st r0, r10, lo16(_ASM_LABEL(inter_processor_lock)) - br.n _slave_main /* does not return */ - subu r31, r31, 40 /* allocate frame */ + br.n _C_LABEL(slave_main) /* does not return */ + subu r31, r31, 40 /* allocate frame */ GLOBAL(spin_cpu) - or.u r3, r0, hi16(_start_text) - or r3, r3, lo16(_start_text) + or.u r3, r0, hi16(_C_LABEL(start_text)) + or r3, r3, lo16(_C_LABEL(start_text)) or r9, r0, 0x100 /* .FORKMPU */ tb0 0, r0, 0x200-16 /* call 188Bug */ jmp r1 @@ -407,36 +401,30 @@ GLOBAL(spin_cpu) data .align 4096 /* SDT (segment descriptor table */ - global _kernel_sdt -_kernel_sdt: +GLOBAL(kernel_sdt) space (0x2000) /* 8K - 4K phys, 4K virt*/ - global _ret_addr -_ret_addr: +GLOBAL(ret_addr) word 0 - global _msgsw -_msgsw: - word 0 /* Bits here turn on/off debugging somewhere */ -ASGLOBAL(initialized_cpu_lock) +ASLOCAL(initialized_cpu_lock) /* XMEM spin lock -- to count CPUs */ word 0 -GLOBAL(initialized_cpus) +ASLOCAL(initialized_cpus) /* CPU counter to initialize */ word 0 -ASGLOBAL(master_processor_chosen) +ASLOCAL(master_processor_chosen) /* The first processor that XMEMs this becomes the master */ word 0 -GLOBAL(inter_processor_lock) +ASLOCAL(inter_processor_lock) /* XMEM spin lock -- controls access to master_processor_chosen */ word 0 .align 4096 - global _intstack, _intstack_end, _slavestack, _slavestack_end -_intstack: +GLOBAL(intstack) space (4 * NBPG) /* 16K, just to be safe */ -_intstack_end: -_slavestack: +ASGLOBAL(intstack_end) +ASGLOBAL(slavestack) space (NBPG) /* 4K, small, interim stack */ -_slavestack_end: +ASGLOBAL(slavestack_end) /* * When a process exits and its u. area goes away, we set curpcb to point @@ -448,8 +436,7 @@ _slavestack_end: * Should be page aligned. */ .align 4096 - global _idle_u -_idle_u: +GLOBAL(idle_u) space UPAGES * NBPG /* @@ -458,36 +445,31 @@ _idle_u: * This must be page aligned */ .align 4096 - global _u0 -_u0: space UPAGES * NBPG -estack0: +ASLOCAL(u0) + space UPAGES * NBPG /* * UPAGES get mapped to kstack */ - global _kstack -_kstack: +ASGLOBAL(kstack) word UADDR #if defined(DDB) || NKSYMS > 0 - global _esym -_esym: +GLOBAL(esym) word 0 #endif /* DDB || NKSYMS > 0 */ - global _intiobase, _intiolimit -_intiobase: +GLOBAL(intiobase) word 0 | KVA of base of internal IO space -_intiolimit: +GLOBAL(intiolimit) word 0 | KVA of end of internal IO space - global _proc0paddr /* move to C code */ -_proc0paddr: - word _u0 /* KVA of proc0 uarea */ +GLOBAL(proc0paddr) + word _ASM_LABEL(u0) /* KVA of proc0 uarea */ /* - * _curpcb points to the current pcb (and hence u. area). + * curpcb points to the current pcb (and hence u. area). * Initially this is the special one. */ /* @@ -495,15 +477,14 @@ _proc0paddr: * I may have to change curpcb to u0 + PCB_USER based on what * other parts expect XXX nivas */ - global _curpcb /* move to C code */ -_curpcb: word _u0 /* curpcb = &u0 */ +GLOBAL(curpcb) + word _ASM_LABEL(u0) /* curpcb = &u0 */ /* * Trampoline code. Gets copied to the top of * user stack in exec. */ - global _sigcode -_sigcode: /* r31 points to sigframe */ +GLOBAL(sigcode) /* r31 points to sigframe */ ld r2, r31, 0 /* signo */ ld r3, r31, 4 /* siginfo_t* */ ld r4, r31, 8 /* sigcontext* */ @@ -521,12 +502,10 @@ _sigcode: /* r31 points to sigframe */ tb0 0, r0, 128 /* syscall trap, exit */ or r0, r0, 0 or r0, r0, 0 - global _esigcode -_esigcode: +GLOBAL(esigcode) /* interrupt counters */ - global _intrcnt,_eintrcnt,_intrnames,_eintrnames -_intrnames: +GLOBAL(intrnames) string "spur\0" string "lev1\0" string "lev2\0" @@ -539,8 +518,8 @@ _intrnames: string "sclk\0" string "pclk\0" string "nmi\0" -_eintrnames: +GLOBAL(eintrnames) .align 8 -_intrcnt: +GLOBAL(intrcnt) word 0,0,0,0,0,0,0,0,0,0,0,0 -_eintrcnt: +GLOBAL(eintrcnt) diff --git a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S index a96c50db5a9..7fa7de1c04a 100644 --- a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S +++ b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore_asm_routines.S,v 1.22 2003/08/03 23:34:09 miod Exp $ */ +/* $OpenBSD: locore_asm_routines.S,v 1.23 2003/08/11 20:45:17 miod Exp $ */ /* * Mach Operating System * Copyright (c) 1993-1992 Carnegie Mellon University @@ -177,10 +177,6 @@ ENTRY(db_are_interrupts_disabled) and r2, r2, r3 #endif /* DDB */ -GLOBAL(FAULT_ERROR) - or r2,r0,1 /* bad copy */ - jmp r1 - /* * Copy specified amount of data from user space into the kernel * copyin(from, to, len) @@ -196,48 +192,48 @@ GLOBAL(FAULT_ERROR) ENTRY(copyin) /* set up fault handler */ - or.u r5, r0, hi16(_curpcb) - ld r6, r5, lo16(_curpcb) - or.u r5, r0, hi16(.Lciflt) - or r5, r5, lo16(.Lciflt) - st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lciflt */ + or.u r5, r0, hi16(_C_LABEL(curpcb)) + ld r6, r5, lo16(_C_LABEL(curpcb)) + or.u r5, r0, hi16(_ASM_LABEL(Lciflt)) + or r5, r5, lo16(_ASM_LABEL(Lciflt)) + st r5, r6, PCB_ONFAULT /* pcb_onfault = Lciflt */ #if 0 bcnd ne0, LEN, 1f /* XXX optimize len = 0 case */ or r2, r0, 0 - br .Lcidone -1: bcnd lt0, LEN, .Lciflt /* EFAULT if len < 0 */ + br _ASM_LABEL(Lcidone) +1: bcnd lt0, LEN, _ASM_LABEL(Lciflt) /* EFAULT if len < 0 */ #endif /* If it's a small length (less than 8), then do byte-by-byte */ cmp r9, LEN, 8 - bb1 lt, r9, copyin_byte_only + bb1 lt, r9, _ASM_LABEL(copyin_byte_only) /* If they're not aligned similiarly, use byte only... */ xor r9, SRC, DEST mask r8, r9, 0x3 - bcnd ne0, r8, copyin_byte_only + bcnd ne0, r8, _ASM_LABEL(copyin_byte_only) /* * At this point, we don't know if they're word aligned or not, * but we know that what needs to be done to one to align * it is what's needed for the other. */ - bb1 0, SRC, copyin_left_align_to_halfword -copyin_left_aligned_to_halfword: - bb1 1, SRC, copyin_left_align_to_word -copyin_left_aligned_to_word: - bb1 0, LEN, copyin_right_align_to_halfword -copyin_right_aligned_to_halfword: - bb1 1, LEN, copyin_right_align_to_word -copyin_right_aligned_to_word: + bb1 0, SRC, _ASM_LABEL(copyin_left_align_to_halfword) +ASLOCAL(copyin_left_aligned_to_halfword) + bb1 1, SRC, _ASM_LABEL(copyin_left_align_to_word) +ASLOCAL(copyin_left_aligned_to_word) + bb1 0, LEN, _ASM_LABEL(copyin_right_align_to_halfword) +ASLOCAL(copyin_right_aligned_to_halfword) + bb1 1, LEN, _ASM_LABEL(copyin_right_align_to_word) +ASLOCAL(copyin_right_aligned_to_word) /* At this point, both SRC and DEST are aligned to a word */ /* boundry, and LEN is an even multiple of 4. */ - bb1.n 2, LEN, copyin_right_align_to_doubleword + bb1.n 2, LEN, _ASM_LABEL(copyin_right_align_to_doubleword) or r7, r0, 4 -copyin_right_aligned_to_doubleword: +ASLOCAL(copyin_right_aligned_to_doubleword) #ifdef ERRATA__XXX_USR NOP ld.usr r5, SRC, r0 @@ -256,14 +252,14 @@ copyin_right_aligned_to_doubleword: st r5, DEST, r0 addu SRC, SRC, 8 st r6, DEST, r7 - bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword + bcnd.n ne0, LEN, _ASM_LABEL(copyin_right_aligned_to_doubleword) addu DEST, DEST, 8 - or r2, r0, r0 /* successful return */ - br .Lcidone + br.n _ASM_LABEL(Lcidone) + or r2, r0, r0 /* successful return */ /***************************************************/ -copyin_left_align_to_halfword: +ASLOCAL(copyin_left_align_to_halfword) #ifdef ERRATA__XXX_USR NOP ld.b.usr r5, SRC, r0 @@ -276,10 +272,10 @@ copyin_left_align_to_halfword: subu LEN, LEN, 1 st.b r5, DEST, r0 addu SRC, SRC, 1 - br.n copyin_left_aligned_to_halfword + br.n _ASM_LABEL(copyin_left_aligned_to_halfword) addu DEST, DEST, 1 -copyin_left_align_to_word: +ASLOCAL(copyin_left_align_to_word) #ifdef ERRATA__XXX_USR NOP ld.h.usr r5, SRC, r0 @@ -292,10 +288,10 @@ copyin_left_align_to_word: subu LEN, LEN, 2 st.h r5, DEST, r0 addu SRC, SRC, 2 - br.n copyin_left_aligned_to_word + br.n _ASM_LABEL(copyin_left_aligned_to_word) addu DEST, DEST, 2 -copyin_right_align_to_halfword: +ASLOCAL(copyin_right_align_to_halfword) subu LEN, LEN, 1 #ifdef ERRATA__XXX_USR NOP @@ -306,10 +302,10 @@ copyin_right_align_to_halfword: #else ld.b.usr r5, SRC, LEN #endif - br.n copyin_right_aligned_to_halfword + br.n _ASM_LABEL(copyin_right_aligned_to_halfword) st.b r5, DEST, LEN -copyin_right_align_to_word: +ASLOCAL(copyin_right_align_to_word) subu LEN, LEN, 2 #ifdef ERRATA__XXX_USR NOP @@ -320,10 +316,10 @@ copyin_right_align_to_word: #else ld.h.usr r5, SRC, LEN #endif - br.n copyin_right_aligned_to_word + br.n _ASM_LABEL(copyin_right_aligned_to_word) st.h r5, DEST, LEN -copyin_right_align_to_doubleword: +ASLOCAL(copyin_right_align_to_doubleword) subu LEN, LEN, 4 #ifdef ERRATA__XXX_USR NOP @@ -334,12 +330,12 @@ copyin_right_align_to_doubleword: #else ld.usr r5, SRC, LEN #endif - bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword + bcnd.n ne0, LEN, _ASM_LABEL(copyin_right_aligned_to_doubleword) st r5, DEST, LEN - or r2, r0, r0 /* successful return */ - br .Lcidone + br.n _ASM_LABEL(Lcidone) + or r2, r0, r0 /* successful return */ -copyin_byte_only: +ASLOCAL(copyin_byte_only) bcnd eq0, LEN, 2f 1: subu LEN, LEN, 1 @@ -354,16 +350,19 @@ copyin_byte_only: #endif bcnd.n ne0, LEN, 1b st.b r5, DEST, LEN -2: or r2, r0, r0 /* successful return */ - br .Lcidone -.Lcidone: - or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) - st r0,r6,PCB_ONFAULT - jmp r1 -.Lciflt: - or r2, r0, EFAULT /* return fault */ - br .Lcidone +2: + br.n _ASM_LABEL(Lcidone) + or r2, r0, r0 /* successful return */ + +ASLOCAL(Lcidone) + or.u r5,r0,hi16(_C_LABEL(curpcb)) + ld r6,r5,lo16(_C_LABEL(curpcb)) + jmp.n r1 + st r0,r6,PCB_ONFAULT + +ASLOCAL(Lciflt) + br.n _ASM_LABEL(Lcidone) + or r2, r0, EFAULT /* return fault */ #undef SRC #undef DEST @@ -390,14 +389,14 @@ copyin_byte_only: ENTRY(copyinstr) /* setup fault handler */ - or.u r6, r0, hi16(_curpcb) - ld r7, r6, lo16(_curpcb) - or.u r6, r0, hi16(.Lcisflt) - or r6, r6, lo16(.Lcisflt) + or.u r6, r0, hi16(_C_LABEL(curpcb)) + ld r7, r6, lo16(_C_LABEL(curpcb)) + or.u r6, r0, hi16(_ASM_LABEL(Lcisflt)) + or r6, r6, lo16(_ASM_LABEL(Lcisflt)) st r6, r7, PCB_ONFAULT or r6, r0, 0 - bcnd lt0, CNT, .Lcisflt - bcnd eq0, CNT, .Lcistoolong + bcnd lt0, CNT, _ASM_LABEL(Lcisflt) + bcnd eq0, CNT, _ASM_LABEL(Lcistoolong) 1: #ifdef ERRATA__XXX_USR NOP @@ -414,30 +413,31 @@ ENTRY(copyinstr) cmp r7, r6, CNT bb1 lt, r7, 1b -.Lcistoolong: +ASLOCAL(Lcistoolong) or r2, r0, ENAMETOOLONG /* overflow */ -.Lcisnull: - bcnd eq0,r6, .Lcisdone /* do not attempt to clear last byte */ +ASLOCAL(Lcisnull) + bcnd eq0,r6, _ASM_LABEL(Lcisdone) /* do not attempt to clear last byte */ /* if we did not write to the string */ subu r6, r6, 1 st.b r0, DEST, r6 /* clear last byte */ - br.n .Lcisdone + br.n _ASM_LABEL(Lcisdone) addu r6, r6, 1 2: /* all done */ or r2, r0, 0 -.Lcisdone: +ASLOCAL(Lcisdone) bcnd eq0, LEN, 3f st r6, r0, LEN 3: - or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) - st r0,r6,PCB_ONFAULT /* clear the handler */ - jmp r1 -.Lcisflt: - or r2, r0, EFAULT /* return fault */ - br .Lcisnull + or.u r5,r0,hi16(_C_LABEL(curpcb)) + ld r6,r5,lo16(_C_LABEL(curpcb)) + jmp.n r1 + st r0,r6,PCB_ONFAULT /* clear the handler */ + +ASLOCAL(Lcisflt) + br.n _ASM_LABEL(Lcisnull) + or r2, r0, EFAULT /* return fault */ #undef SRC #undef DEST @@ -458,52 +458,48 @@ ENTRY(copyinstr) ENTRY(copyout) /* setup fault handler */ -#if 0 - tb0 0, r0, 132 /* entry trap */ - SET_PCB_ONFAULT(r5, r6, .Lcoflt) -#endif - or.u r5, r0, hi16(_curpcb) - ld r6, r5, lo16(_curpcb) - or.u r5, r0, hi16(.Lcoflt) - or r5, r5, lo16(.Lcoflt) - st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lcoflt */ + or.u r5, r0, hi16(_C_LABEL(curpcb)) + ld r6, r5, lo16(_C_LABEL(curpcb)) + or.u r5, r0, hi16(_ASM_LABEL(Lcoflt)) + or r5, r5, lo16(_ASM_LABEL(Lcoflt)) + st r5, r6, PCB_ONFAULT /* pcb_onfault = Lcoflt */ #if 0 bcnd ne0, LEN, 1f /* XXX optimize len = 0 case */ or r2, r0, 0 - br .Lcodone -1: bcnd lt0, LEN, .Lcoflt /* EFAULT if len < 0 */ + br _ASM_LABEL(Lcodone) +1: bcnd lt0, LEN, _ASM_LABEL(Lcoflt) /* EFAULT if len < 0 */ #endif /* If it's a small length (less than 8), then do byte-by-byte */ cmp r9, LEN, 8 - bb1 lt, r9, copyout_byte_only + bb1 lt, r9, _ASM_LABEL(copyout_byte_only) /* If they're not aligned similiarly, use byte only... */ xor r9, SRC, DEST mask r8, r9, 0x3 - bcnd ne0, r8, copyout_byte_only + bcnd ne0, r8, _ASM_LABEL(copyout_byte_only) /* * At this point, we don't know if they're word aligned or not, * but we know that what needs to be done to one to align * it is what's needed for the other. */ - bb1 0, SRC, copyout_left_align_to_halfword -copyout_left_aligned_to_halfword: - bb1 1, SRC, copyout_left_align_to_word -copyout_left_aligned_to_word: - bb1 0, LEN, copyout_right_align_to_halfword -copyout_right_aligned_to_halfword: - bb1 1, LEN, copyout_right_align_to_word -copyout_right_aligned_to_word: + bb1 0, SRC, _ASM_LABEL(copyout_left_align_to_halfword) +ASLOCAL(copyout_left_aligned_to_halfword) + bb1 1, SRC, _ASM_LABEL(copyout_left_align_to_word) +ASLOCAL(copyout_left_aligned_to_word) + bb1 0, LEN, _ASM_LABEL(copyout_right_align_to_halfword) +ASLOCAL(copyout_right_aligned_to_halfword) + bb1 1, LEN, _ASM_LABEL(copyout_right_align_to_word) +ASLOCAL(copyout_right_aligned_to_word) /* * At this point, both SRC and DEST are aligned to a word * boundry, and LEN is an even multiple of 4. */ - bb1.n 2, LEN, copyout_right_align_to_doubleword + bb1.n 2, LEN, _ASM_LABEL(copyout_right_align_to_doubleword) or r7, r0, 4 -copyout_right_aligned_to_doubleword: +ASLOCAL(copyout_right_aligned_to_doubleword) ld r5, SRC, r0 ld r6, SRC, r7 subu LEN, LEN, 8 @@ -526,13 +522,13 @@ copyout_right_aligned_to_doubleword: #else st.usr r6, DEST, r7 #endif - bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword + bcnd.n ne0, LEN, _ASM_LABEL(copyout_right_aligned_to_doubleword) addu DEST, DEST, 8 or r2, r0, r0 /* successful return */ - br .Lcodone + br _ASM_LABEL(Lcodone) /***************************************************/ -copyout_left_align_to_halfword: +ASLOCAL(copyout_left_align_to_halfword) ld.b r5, SRC, r0 subu LEN, LEN, 1 #ifdef ERRATA__XXX_USR @@ -545,10 +541,10 @@ copyout_left_align_to_halfword: st.b.usr r5, DEST, r0 #endif addu SRC, SRC, 1 - br.n copyout_left_aligned_to_halfword + br.n _ASM_LABEL(copyout_left_aligned_to_halfword) addu DEST, DEST, 1 -copyout_left_align_to_word: +ASLOCAL(copyout_left_align_to_word) ld.h r5, SRC, r0 subu LEN, LEN, 2 #ifdef ERRATA__XXX_USR @@ -561,10 +557,10 @@ copyout_left_align_to_word: st.h.usr r5, DEST, r0 #endif addu SRC, SRC, 2 - br.n copyout_left_aligned_to_word + br.n _ASM_LABEL(copyout_left_aligned_to_word) addu DEST, DEST, 2 -copyout_right_align_to_halfword: +ASLOCAL(copyout_right_align_to_halfword) subu LEN, LEN, 1 ld.b r5, SRC, LEN #ifdef ERRATA__XXX_USR @@ -573,13 +569,13 @@ copyout_right_align_to_halfword: NOP NOP NOP - br copyout_right_aligned_to_halfword + br _ASM_LABEL(copyout_right_aligned_to_halfword) #else - br.n copyout_right_aligned_to_halfword + br.n _ASM_LABEL(copyout_right_aligned_to_halfword) st.b.usr r5, DEST, LEN #endif -copyout_right_align_to_word: +ASLOCAL(copyout_right_align_to_word) subu LEN, LEN, 2 ld.h r5, SRC, LEN #ifdef ERRATA__XXX_USR @@ -588,13 +584,13 @@ copyout_right_align_to_word: NOP NOP NOP - br copyout_right_aligned_to_word + br _ASM_LABEL(copyout_right_aligned_to_word) #else - br.n copyout_right_aligned_to_word + br.n _ASM_LABEL(copyout_right_aligned_to_word) st.h.usr r5, DEST, LEN #endif -copyout_right_align_to_doubleword: +ASLOCAL(copyout_right_align_to_doubleword) subu LEN, LEN, 4 ld r5, SRC, LEN #ifdef ERRATA__XXX_USR @@ -603,15 +599,15 @@ copyout_right_align_to_doubleword: NOP NOP NOP - bcnd ne0, LEN, copyout_right_aligned_to_doubleword + bcnd ne0, LEN, _ASM_LABEL(copyout_right_aligned_to_doubleword) #else - bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword + bcnd.n ne0, LEN, _ASM_LABEL(copyout_right_aligned_to_doubleword) st.usr r5, DEST, LEN #endif - or r2, r0, r0 /* successful return */ - br .Lcodone + br.n _ASM_LABEL(Lcodone) + or r2, r0, r0 /* successful return */ -ASGLOBAL(copyout_byte_only) +ASLOCAL(copyout_byte_only) bcnd eq0, LEN, 2f 1: subu LEN, LEN, 1 @@ -628,17 +624,19 @@ ASGLOBAL(copyout_byte_only) st.b.usr r5, DEST, LEN #endif -2: or r2, r0, r0 /* successful return */ - br .Lcodone +2: + br.n _ASM_LABEL(Lcodone) + or r2, r0, r0 /* successful return */ -.Lcodone: - or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) - st r0,r6,PCB_ONFAULT /* clear the handler */ - jmp r1 -.Lcoflt: - or r2, r0, EFAULT /* return fault */ - br .Lcodone +ASLOCAL(Lcodone) + or.u r5,r0,hi16(_C_LABEL(curpcb)) + ld r6,r5,lo16(_C_LABEL(curpcb)) + jmp.n r1 + st r0,r6,PCB_ONFAULT /* clear the handler */ + +ASLOCAL(Lcoflt) + br.n _ASM_LABEL(Lcodone) + or r2, r0, EFAULT /* return fault */ #undef SRC #undef DEST @@ -662,13 +660,13 @@ ASGLOBAL(copyout_byte_only) ENTRY(copyoutstr) /* setup fault handler */ - or.u r6, r0, hi16(_curpcb) - ld r7, r6, lo16(_curpcb) - or.u r6, r0, hi16(.Lcosflt) - or r6, r6, lo16(.Lcosflt) + or.u r6, r0, hi16(_C_LABEL(curpcb)) + ld r7, r6, lo16(_C_LABEL(curpcb)) + or.u r6, r0, hi16(_ASM_LABEL(Lcosflt)) + or r6, r6, lo16(_ASM_LABEL(Lcosflt)) st r6, r7, PCB_ONFAULT - bcnd lt0, CNT, .Lcosflt - bcnd eq0, CNT, .Lcosdone + bcnd lt0, CNT, _ASM_LABEL(Lcosflt) + bcnd eq0, CNT, _ASM_LABEL(Lcosdone) or r6, r0, 0 1: ld.bu r7, SRC, r6 @@ -685,23 +683,23 @@ ENTRY(copyoutstr) addu r6, r6, 1 cmp r7, r6, CNT bb1 lt, r7, 1b - or r2, r0, ENAMETOOLONG /* over flow */ - br .Lcosdone -2: /* all done */ - or r2, r0, 0 - br .Lcosdone + br.n _ASM_LABEL(Lcosdone) + or r2, r0, ENAMETOOLONG +2: + br.n _ASM_LABEL(Lcosdone) + or r2, r0, 0 -.Lcosflt: - or r2, r0, EFAULT /* return fault */ - br .Lcosdone +ASLOCAL(Lcosflt) + br.n _ASM_LABEL(Lcosdone) + or r2, r0, EFAULT -.Lcosdone: +ASLOCAL(Lcosdone) bcnd eq0, LEN, 3f st r6, r0, LEN -3: or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) - st r0,r6,PCB_ONFAULT /* clear the handler */ - jmp r1 +3: or.u r5,r0,hi16(_C_LABEL(curpcb)) + ld r6,r5,lo16(_C_LABEL(curpcb)) + jmp.n r1 + st r0,r6,PCB_ONFAULT /* clear the handler */ #undef SRC #undef DEST @@ -716,23 +714,23 @@ ENTRY(copyoutstr) * Copy len bytes from src to dst, aborting if we encounter a page fault. */ ENTRY(kcopy) - or.u r5, r0, hi16(_curpcb) - ld r6, r5, lo16(_curpcb) - or.u r5, r0, hi16(kcopy_fault) - or r5, r5, lo16(kcopy_fault) - st r5, r6, PCB_ONFAULT /* pcb_onfault = kcopy_fault */ - bcnd le0,r4,kcopy_out /* nothing to do if count <= 0 */ + or.u r5, r0, hi16(_C_LABEL(curpcb)) + ld r6, r5, lo16(_C_LABEL(curpcb)) + or.u r5, r0, hi16(_ASM_LABEL(kcopy_fault)) + or r5, r5, lo16(_ASM_LABEL(kcopy_fault)) + st r5, r6, PCB_ONFAULT /* pcb_onfault = kcopy_fault */ + bcnd le0,r4,_ASM_LABEL(kcopy_out) /* nothing to do if <= 0 */ /* * check position of source and destination data */ cmp r9,r2,r3 /* compare source address to destination */ - bb1 eq,r9,kcopy_out /* nothing to do if addresses are equal */ - bb1 lo,r9,kcopy_reverse /* copy in reverse if src < dest */ + bb1 eq,r9,_ASM_LABEL(kcopy_out) /* nothing to do if equal */ + bb1 lo,r9,_ASM_LABEL(kcopy_revers)e /* reverse copy if src < dest */ /* * source address is greater than destination address, copy forward */ cmp r9,r4,16 /* see if we have at least 16 bytes */ - bb1 lt,r9,kf_byte_copy /* copy bytes for small data length */ + bb1 lt,r9,_ASM_LABEL(kf_byte_copy) /* copy bytes for small length */ /* * determine copy strategy based on alignment of source and destination */ @@ -740,8 +738,8 @@ ENTRY(kcopy) mask r7,r3,3 /* get 2 low order bits of destintation addr */ mak r6,r6,0<4> /* convert source bits to table offset */ mak r7,r7,0<2> /* convert destination bits to table offset */ - or.u r12,r0,hi16(kf_strat) /* forward strategy table address (high) */ - or r12,r12,lo16(kf_strat) /* forward strategy table address (low) */ + or.u r12,r0,hi16(_ASM_LABEL(kf_strat)) + or r12,r12,lo16(_ASM_LABEL(kf_strat)) addu r6,r6,r7 /* compute final table offset for strategy */ ld r12,r12,r6 /* load the strategy routine */ jmp r12 /* branch to strategy routine */ @@ -749,7 +747,7 @@ ENTRY(kcopy) /* * Copy three bytes from src to destination then copy words */ -ASGLOBAL(kf_3byte_word_copy) +ASLOCAL(kf_3byte_word_copy) ld.bu r6,r2,0 /* load byte from source */ ld.bu r7,r2,1 /* load byte from source */ ld.bu r8,r2,2 /* load byte from source */ @@ -758,24 +756,24 @@ ASGLOBAL(kf_3byte_word_copy) st.b r8,r3,2 /* store byte to destination */ addu r2,r2,3 /* increment source pointer */ addu r3,r3,3 /* increment destination pointer */ - br.n kf_word_copy /* copy full words */ + br.n _ASM_LABEL(kf_word_copy) /* copy full words */ subu r4,r4,3 /* decrement length */ /* * Copy 1 halfword from src to destination then copy words */ -ASGLOBAL(kf_1half_word_copy) +ASLOCAL(kf_1half_word_copy) ld.hu r6,r2,0 /* load half-word from source */ st.h r6,r3,0 /* store half-word to destination */ addu r2,r2,2 /* increment source pointer */ addu r3,r3,2 /* increment destination pointer */ - br.n kf_word_copy /* copy full words */ + br.n _ASM_LABEL(kf_word_copy) /* copy full words */ subu r4,r4,2 /* decrement remaining length */ /* * Copy 1 byte from src to destination then copy words */ -ASGLOBAL(kf_1byte_word_copy) +ASLOCAL(kf_1byte_word_copy) ld.bu r6,r2,0 /* load 1 byte from source */ st.b r6,r3,0 /* store 1 byte to destination */ addu r2,r2,1 /* increment source pointer */ @@ -785,9 +783,9 @@ ASGLOBAL(kf_1byte_word_copy) /* * Copy as many full words as possible, 4 words per loop */ -ASGLOBAL(kf_word_copy) +ASLOCAL(kf_word_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,kf_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(kf_byte_copy) /* not enough left, copy bytes */ ld r6,r2,0 /* load first word */ ld r7,r2,4 /* load second word */ ld r8,r2,8 /* load third word */ @@ -798,10 +796,10 @@ ASGLOBAL(kf_word_copy) st r9,r3,12 /* store fourth word */ addu r2,r2,16 /* increment source pointer */ addu r3,r3,16 /* increment destination pointer */ - br.n kf_word_copy /* branch to copy another block */ + br.n _ASM_LABEL(kf_word_copy) /* copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(kf_1byte_half_copy) +ASLOCAL(kf_1byte_half_copy) ld.bu r6,r2,0 /* load 1 byte from source */ st.b r6,r3,0 /* store 1 byte to destination */ addu r2,r2,1 /* increment source pointer */ @@ -809,9 +807,9 @@ ASGLOBAL(kf_1byte_half_copy) subu r4,r4,1 /* decrement remaining length */ /* fall through to half copy */ -ASGLOBAL(kf_half_copy) +ASLOCAL(kf_half_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,kf_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(kf_byte_copy) /* not enough left, copy bytes */ ld.hu r6,r2,0 /* load first half-word */ ld.hu r7,r2,2 /* load second half-word */ ld.hu r8,r2,4 /* load third half-word */ @@ -830,22 +828,22 @@ ASGLOBAL(kf_half_copy) st.h r13,r3,14 /* store eighth half-word */ addu r2,r2,16 /* increment source pointer */ addu r3,r3,16 /* increment destination pointer */ - br.n kf_half_copy /* branch to copy another block */ + br.n _ASM_LABEL(kf_half_copy) /* copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(kf_byte_copy) - bcnd eq0,r4,kcopy_out /* branch if nothing left to copy */ +ASLOCAL(kf_byte_copy) + bcnd eq0,r4,_ASM_LABEL(kcopy_out) /* branch if nothing left to copy */ ld.bu r6,r2,0 /* load byte from source */ st.b r6,r3,0 /* store byte in destination */ addu r2,r2,1 /* increment source pointer */ addu r3,r3,1 /* increment destination pointer */ - br.n kf_byte_copy /* branch for next byte */ + br.n _ASM_LABEL(kf_byte_copy) /* branch for next byte */ subu r4,r4,1 /* decrement remaining length */ /* * source address is less than destination address, copy in reverse */ -ASGLOBAL(kcopy_reverse) +ASLOCAL(kcopy_reverse) /* * start copy pointers at end of data */ @@ -855,7 +853,7 @@ ASGLOBAL(kcopy_reverse) * check for short data */ cmp r9,r4,16 /* see if we have at least 16 bytes */ - bb1 lt,r9,kr_byte_copy /* copy bytes for small data length */ + bb1 lt,r9,_ASM_LABEL(kr_byte_copy) /* copy bytes for small data length */ /* * determine copy strategy based on alignment of source and destination */ @@ -863,8 +861,8 @@ ASGLOBAL(kcopy_reverse) mask r7,r3,3 /* get 2 low order bits of destintation addr */ mak r6,r6,0<4> /* convert source bits to table offset */ mak r7,r7,0<2> /* convert destination bits to table offset */ - or.u r12,r0,hi16(kr_strat) /* reverse strategy table address (high) */ - or r12,r12,lo16(kr_strat) /* reverse strategy table address (low) */ + or.u r12,r0,hi16(_ASM_LABEL(kr_strat)) + or r12,r12,lo16(_ASM_LABEL(kr_strat)) addu r6,r6,r7 /* compute final table offset for strategy */ ld r12,r12,r6 /* load the strategy routine */ jmp r12 /* branch to strategy routine */ @@ -872,7 +870,7 @@ ASGLOBAL(kcopy_reverse) /* * Copy three bytes from src to destination then copy words */ -ASGLOBAL(kr_3byte_word_copy) +ASLOCAL(kr_3byte_word_copy) subu r2,r2,3 /* decrement source pointer */ subu r3,r3,3 /* decrement destination pointer */ ld.bu r6,r2,0 /* load byte from source */ @@ -881,24 +879,24 @@ ASGLOBAL(kr_3byte_word_copy) st.b r6,r3,0 /* store byte to destination */ st.b r7,r3,1 /* store byte to destination */ st.b r8,r3,2 /* store byte to destination */ - br.n kr_word_copy /* copy full words */ + br.n _ASM_LABEL(kr_word_copy) /* copy full words */ subu r4,r4,3 /* decrement length */ /* * Copy 1 halfword from src to destination then copy words */ -ASGLOBAL(kr_1half_word_copy) +ASLOCAL(kr_1half_word_copy) subu r2,r2,2 /* decrement source pointer */ subu r3,r3,2 /* decrement destination pointer */ ld.hu r6,r2,0 /* load half-word from source */ st.h r6,r3,0 /* store half-word to destination */ - br.n kr_word_copy /* copy full words */ + br.n _ASM_LABEL(kr_word_copy) /* copy full words */ subu r4,r4,2 /* decrement remaining length */ /* * Copy 1 byte from src to destination then copy words */ -ASGLOBAL(kr_1byte_word_copy) +ASLOCAL(kr_1byte_word_copy) subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load 1 byte from source */ @@ -908,9 +906,9 @@ ASGLOBAL(kr_1byte_word_copy) /* * Copy as many full words as possible, 4 words per loop */ -ASGLOBAL(kr_word_copy) +ASLOCAL(kr_word_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,kr_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(kr_byte_copy) /* not enough left, copy bytes */ subu r2,r2,16 /* decrement source pointer */ subu r3,r3,16 /* decrement destination pointer */ ld r6,r2,0 /* load first word */ @@ -921,10 +919,10 @@ ASGLOBAL(kr_word_copy) st r7,r3,4 /* store second word */ st r8,r3,8 /* store third word */ st r9,r3,12 /* store fourth word */ - br.n kr_word_copy /* branch to copy another block */ + br.n _ASM_LABEL(kr_word_copy) /* copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(kr_1byte_half_copy) +ASLOCAL(kr_1byte_half_copy) subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load 1 byte from source */ @@ -932,9 +930,9 @@ ASGLOBAL(kr_1byte_half_copy) subu r4,r4,1 /* decrement remaining length */ /* fall through to half copy */ -ASGLOBAL(kr_half_copy) +ASLOCAL(kr_half_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,kr_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(kr_byte_copy) /* not enough left, copy bytes */ subu r2,r2,16 /* decrement source pointer */ subu r3,r3,16 /* decrement destination pointer */ ld.hu r6,r2,0 /* load first half-word */ @@ -953,89 +951,67 @@ ASGLOBAL(kr_half_copy) st.h r11,r3,10 /* store sixth half-word */ st.h r12,r3,12 /* store seventh half-word */ st.h r13,r3,14 /* store eighth half-word */ - br.n kr_half_copy /* branch to copy another block */ + br.n _ASM_LABEL(kr_half_copy) /* copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(kr_byte_copy) - bcnd eq0,r4,kcopy_out /* branch if nothing left to copy */ +ASLOCAL(kr_byte_copy) + bcnd eq0,r4,_ASM_LABEL(kcopy_out) /* branch if nothing left to copy */ subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load byte from source */ st.b r6,r3,0 /* store byte in destination */ - br.n kr_byte_copy /* branch for next byte */ + br.n _ASM_LABEL(kr_byte_copy) /* branch for next byte */ subu r4,r4,1 /* decrement remaining length */ -ASGLOBAL(kcopy_out) +ASLOCAL(kcopy_out) or r2, r0, 0 /* return success */ -ASGLOBAL(kcopy_out_fault) - or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) +ASLOCAL(kcopy_out_fault) + or.u r5,r0,hi16(_C_LABEL(curpcb)) + ld r6,r5,lo16(_C_LABEL(curpcb)) st r0,r6,PCB_ONFAULT /* clear the handler */ jmp r1 /* all done, return to caller */ -ASGLOBAL(kcopy_fault) +ASLOCAL(kcopy_fault) or r2, r0, EFAULT /* return fault */ - br kcopy_out_fault + br _ASM_LABEL(kcopy_out_fault) data align 4 -ASGLOBAL(kf_strat) - word kf_word_copy - word kf_byte_copy - word kf_half_copy - word kf_byte_copy - word kf_byte_copy - word kf_3byte_word_copy - word kf_byte_copy - word kf_1byte_half_copy - word kf_half_copy - word kf_byte_copy - word kf_1half_word_copy - word kf_byte_copy - word kf_byte_copy - word kf_1byte_half_copy - word kf_byte_copy - word kf_1byte_word_copy - -ASGLOBAL(kr_strat) - word kr_word_copy - word kr_byte_copy - word kr_half_copy - word kr_byte_copy - word kr_byte_copy - word kr_1byte_word_copy - word kr_byte_copy - word kr_1byte_half_copy - word kr_half_copy - word kr_byte_copy - word kr_1half_word_copy - word kr_byte_copy - word kr_byte_copy - word kr_1byte_half_copy - word kr_byte_copy - word kr_3byte_word_copy - - text -#ifdef notyet /* This give a stack problem. For now, use the above */ -ENTRY(kcopy) - or.u r5, r0, hi16(_curpcb) - ld r6, r5, lo16(_curpcb) - or.u r5, r0, hi16(kcfault) - or r5, r5, lo16(kcfault) - st r5, r6, PCB_ONFAULT /* pcb_onfault = kcfault */ - subu r31, r31, 40 - bsr _ovbcopy /* call ovbcopy */ - addu r31, r31, 40 - or r2, r0, 0 /* return success */ -kcdone: - or.u r5,r0,hi16(_curpcb) - ld r6,r5,lo16(_curpcb) - st r0,r6,PCB_ONFAULT /* clear the handler */ - jmp r1 /* return */ -kcfault: - or r2, r0, EFAULT /* return fault */ - br kcdone -#endif /* 0 */ +ASLOCAL(kf_strat) + word _ASM_LABEL(kf_word_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_half_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_3byte_word_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_1byte_half_copy) + word _ASM_LABEL(kf_half_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_1half_word_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_1byte_half_copy) + word _ASM_LABEL(kf_byte_copy) + word _ASM_LABEL(kf_1byte_word_copy) + +ASLOCAL(kr_strat) + word _ASM_LABEL(kr_word_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_half_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_1byte_word_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_1byte_half_copy) + word _ASM_LABEL(kr_half_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_1half_word_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_1byte_half_copy) + word _ASM_LABEL(kr_byte_copy) + word _ASM_LABEL(kr_3byte_word_copy) /* * Gcc 2 generates calls to memcpy for bcopies of unknown size. memcpy can @@ -1049,7 +1025,7 @@ kcfault: ENTRY(memcpy) or r5, r0, r2 /* dst -> tmp */ or r2, r0, r3 /* src -> 1st arg */ - br.n _ovbcopy /* call ovbcopy */ + br.n _C_LABEL(ovbcopy) or r3, r0, r5 /* dst -> 2nd arg */ /* @@ -1062,18 +1038,18 @@ ENTRY(memcpy) ENTRY(bcopy) ENTRY(ovbcopy) - bcnd le0,r4,bcopy_out /* nothing to do if count <= 0 */ + bcnd le0,r4,_ASM_LABEL(bcopy_out) /* nothing to do if <= 0 */ /* * check position of source and destination data */ cmp r9,r2,r3 /* compare source address to destination */ - bb1 eq,r9,bcopy_out /* nothing to do if addresses are equal */ - bb1 lo,r9,bcopy_reverse /* copy in reverse if src < dest */ + bb1 eq,r9,_ASM_LABEL(bcopy_out) /* nothing to do if equal */ + bb1 lo,r9,_ASM_LABEL(bcopy_reverse) /* reverse copy if src < dest */ /* * source address is greater than destination address, copy forward */ cmp r9,r4,16 /* see if we have at least 16 bytes */ - bb1 lt,r9,f_byte_copy /* copy bytes for small data length */ + bb1 lt,r9,_ASM_LABEL(f_byte_copy) /* copy bytes for small data length */ /* * determine copy strategy based on alignment of source and destination */ @@ -1081,8 +1057,8 @@ ENTRY(ovbcopy) mask r7,r3,3 /* get 2 low order bits of destintation addr */ mak r6,r6,0<4> /* convert source bits to table offset */ mak r7,r7,0<2> /* convert destination bits to table offset */ - or.u r12,r0,hi16(f_strat) /* forward strategy table address (high) */ - or r12,r12,lo16(f_strat) /* forward strategy table address (low) */ + or.u r12,r0,hi16(_ASM_LABEL(f_strat)) + or r12,r12,lo16(_ASM_LABEL(f_strat)) addu r6,r6,r7 /* compute final table offset for strategy */ ld r12,r12,r6 /* load the strategy routine */ jmp r12 /* branch to strategy routine */ @@ -1091,7 +1067,7 @@ ENTRY(ovbcopy) /* * Copy three bytes from src to destination then copy words */ -ASGLOBAL(f_3byte_word_copy) +ASLOCAL(f_3byte_word_copy) ld.bu r6,r2,0 /* load byte from source */ ld.bu r7,r2,1 /* load byte from source */ ld.bu r8,r2,2 /* load byte from source */ @@ -1100,24 +1076,24 @@ ASGLOBAL(f_3byte_word_copy) st.b r8,r3,2 /* store byte to destination */ addu r2,r2,3 /* increment source pointer */ addu r3,r3,3 /* increment destination pointer */ - br.n f_word_copy /* copy full words */ + br.n _ASM_LABEL(f_word_copy) /* copy full words */ subu r4,r4,3 /* decrement length */ /* * Copy 1 halfword from src to destination then copy words */ -ASGLOBAL(f_1half_word_copy) +ASLOCAL(f_1half_word_copy) ld.hu r6,r2,0 /* load half-word from source */ st.h r6,r3,0 /* store half-word to destination */ addu r2,r2,2 /* increment source pointer */ addu r3,r3,2 /* increment destination pointer */ - br.n f_word_copy /* copy full words */ + br.n _ASM_LABEL(f_word_copy) /* copy full words */ subu r4,r4,2 /* decrement remaining length */ /* * Copy 1 byte from src to destination then copy words */ -ASGLOBAL(f_1byte_word_copy) +ASLOCAL(f_1byte_word_copy) ld.bu r6,r2,0 /* load 1 byte from source */ st.b r6,r3,0 /* store 1 byte to destination */ addu r2,r2,1 /* increment source pointer */ @@ -1127,9 +1103,9 @@ ASGLOBAL(f_1byte_word_copy) /* * Copy as many full words as possible, 4 words per loop */ -ASGLOBAL(f_word_copy) +ASLOCAL(f_word_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,f_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(f_byte_copy) /* not enough left, copy bytes */ ld r6,r2,0 /* load first word */ ld r7,r2,4 /* load second word */ ld r8,r2,8 /* load third word */ @@ -1140,10 +1116,10 @@ ASGLOBAL(f_word_copy) st r9,r3,12 /* store fourth word */ addu r2,r2,16 /* increment source pointer */ addu r3,r3,16 /* increment destination pointer */ - br.n f_word_copy /* branch to copy another block */ + br.n _ASM_LABEL(f_word_copy) /* branch to copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(f_1byte_half_copy) +ASLOCAL(f_1byte_half_copy) ld.bu r6,r2,0 /* load 1 byte from source */ st.b r6,r3,0 /* store 1 byte to destination */ addu r2,r2,1 /* increment source pointer */ @@ -1151,9 +1127,9 @@ ASGLOBAL(f_1byte_half_copy) subu r4,r4,1 /* decrement remaining length */ /* fall through to half copy */ -ASGLOBAL(f_half_copy) +ASLOCAL(f_half_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,f_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(f_byte_copy) /* not enough left, copy bytes */ ld.hu r6,r2,0 /* load first half-word */ ld.hu r7,r2,2 /* load second half-word */ ld.hu r8,r2,4 /* load third half-word */ @@ -1172,22 +1148,22 @@ ASGLOBAL(f_half_copy) st.h r13,r3,14 /* store eighth half-word */ addu r2,r2,16 /* increment source pointer */ addu r3,r3,16 /* increment destination pointer */ - br.n f_half_copy /* branch to copy another block */ + br.n _ASM_LABEL(f_half_copy) /* branch to copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(f_byte_copy) - bcnd eq0,r4,bcopy_out /* branch if nothing left to copy */ +ASLOCAL(f_byte_copy) + bcnd eq0,r4,_ASM_LABEL(bcopy_out) /* branch if nothing left to copy */ ld.bu r6,r2,0 /* load byte from source */ st.b r6,r3,0 /* store byte in destination */ addu r2,r2,1 /* increment source pointer */ addu r3,r3,1 /* increment destination pointer */ - br.n f_byte_copy /* branch for next byte */ + br.n _ASM_LABEL(f_byte_copy) /* branch for next byte */ subu r4,r4,1 /* decrement remaining length */ /* * source address is less than destination address, copy in reverse */ -ASGLOBAL(bcopy_reverse) +ASLOCAL(bcopy_reverse) /* * start copy pointers at end of data */ @@ -1197,7 +1173,7 @@ ASGLOBAL(bcopy_reverse) * check for short data */ cmp r9,r4,16 /* see if we have at least 16 bytes */ - bb1 lt,r9,r_byte_copy /* copy bytes for small data length */ + bb1 lt,r9,_ASM_LABEL(r_byte_copy) /* copy bytes for small data length */ /* * determine copy strategy based on alignment of source and destination */ @@ -1205,8 +1181,8 @@ ASGLOBAL(bcopy_reverse) mask r7,r3,3 /* get 2 low order bits of destintation addr */ mak r6,r6,0<4> /* convert source bits to table offset */ mak r7,r7,0<2> /* convert destination bits to table offset */ - or.u r12,r0,hi16(r_strat) /* reverse strategy table address (high) */ - or r12,r12,lo16(r_strat) /* reverse strategy table address (low) */ + or.u r12,r0,hi16(_ASM_LABEL(r_strat)) + or r12,r12,lo16(_ASM_LABEL(r_strat)) addu r6,r6,r7 /* compute final table offset for strategy */ ld r12,r12,r6 /* load the strategy routine */ jmp r12 /* branch to strategy routine */ @@ -1214,7 +1190,7 @@ ASGLOBAL(bcopy_reverse) /* * Copy three bytes from src to destination then copy words */ -ASGLOBAL(r_3byte_word_copy) +ASLOCAL(r_3byte_word_copy) subu r2,r2,3 /* decrement source pointer */ subu r3,r3,3 /* decrement destination pointer */ ld.bu r6,r2,0 /* load byte from source */ @@ -1223,24 +1199,24 @@ ASGLOBAL(r_3byte_word_copy) st.b r6,r3,0 /* store byte to destination */ st.b r7,r3,1 /* store byte to destination */ st.b r8,r3,2 /* store byte to destination */ - br.n r_word_copy /* copy full words */ + br.n _ASM_LABEL(r_word_copy) /* copy full words */ subu r4,r4,3 /* decrement length */ /* * Copy 1 halfword from src to destination then copy words */ -ASGLOBAL(r_1half_word_copy) +ASLOCAL(r_1half_word_copy) subu r2,r2,2 /* decrement source pointer */ subu r3,r3,2 /* decrement destination pointer */ ld.hu r6,r2,0 /* load half-word from source */ st.h r6,r3,0 /* store half-word to destination */ - br.n r_word_copy /* copy full words */ + br.n _ASM_LABEL(r_word_copy) /* copy full words */ subu r4,r4,2 /* decrement remaining length */ /* * Copy 1 byte from src to destination then copy words */ -ASGLOBAL(r_1byte_word_copy) +ASLOCAL(r_1byte_word_copy) subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load 1 byte from source */ @@ -1250,9 +1226,9 @@ ASGLOBAL(r_1byte_word_copy) /* * Copy as many full words as possible, 4 words per loop */ -ASGLOBAL(r_word_copy) +ASLOCAL(r_word_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,r_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(r_byte_copy) /* not enough left, copy bytes */ subu r2,r2,16 /* decrement source pointer */ subu r3,r3,16 /* decrement destination pointer */ ld r6,r2,0 /* load first word */ @@ -1263,10 +1239,10 @@ ASGLOBAL(r_word_copy) st r7,r3,4 /* store second word */ st r8,r3,8 /* store third word */ st r9,r3,12 /* store fourth word */ - br.n r_word_copy /* branch to copy another block */ + br.n _ASM_LABEL(r_word_copy) /* branch to copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(r_1byte_half_copy) +ASLOCAL(r_1byte_half_copy) subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load 1 byte from source */ @@ -1274,9 +1250,9 @@ ASGLOBAL(r_1byte_half_copy) subu r4,r4,1 /* decrement remaining length */ /* fall through to half copy */ -ASGLOBAL(r_half_copy) +ASLOCAL(r_half_copy) cmp r10,r4,16 /* see if we have 16 bytes remaining */ - bb1 lo,r10,r_byte_copy /* not enough left, copy bytes */ + bb1 lo,r10,_ASM_LABEL(r_byte_copy) /* not enough left, copy bytes */ subu r2,r2,16 /* decrement source pointer */ subu r3,r3,16 /* decrement destination pointer */ ld.hu r6,r2,0 /* load first half-word */ @@ -1295,58 +1271,58 @@ ASGLOBAL(r_half_copy) st.h r11,r3,10 /* store sixth half-word */ st.h r12,r3,12 /* store seventh half-word */ st.h r13,r3,14 /* store eighth half-word */ - br.n r_half_copy /* branch to copy another block */ + br.n _ASM_LABEL(r_half_copy) /* branch to copy another block */ subu r4,r4,16 /* decrement remaining length */ -ASGLOBAL(r_byte_copy) - bcnd eq0,r4,bcopy_out /* branch if nothing left to copy */ +ASLOCAL(r_byte_copy) + bcnd eq0,r4,_ASM_LABEL(bcopy_out) /* branch if nothing left to copy */ subu r2,r2,1 /* decrement source pointer */ subu r3,r3,1 /* decrement destination pointer */ ld.bu r6,r2,0 /* load byte from source */ st.b r6,r3,0 /* store byte in destination */ - br.n r_byte_copy /* branch for next byte */ + br.n _ASM_LABEL(r_byte_copy) /* branch for next byte */ subu r4,r4,1 /* decrement remaining length */ -ASGLOBAL(bcopy_out) +ASLOCAL(bcopy_out) jmp r1 /* all done, return to caller */ data align 4 -ASGLOBAL(f_strat) - word f_word_copy - word f_byte_copy - word f_half_copy - word f_byte_copy - word f_byte_copy - word f_3byte_word_copy - word f_byte_copy - word f_1byte_half_copy - word f_half_copy - word f_byte_copy - word f_1half_word_copy - word f_byte_copy - word f_byte_copy - word f_1byte_half_copy - word f_byte_copy - word f_1byte_word_copy - -ASGLOBAL(r_strat) - word r_word_copy - word r_byte_copy - word r_half_copy - word r_byte_copy - word r_byte_copy - word r_1byte_word_copy - word r_byte_copy - word r_1byte_half_copy - word r_half_copy - word r_byte_copy - word r_1half_word_copy - word r_byte_copy - word r_byte_copy - word r_1byte_half_copy - word r_byte_copy - word r_3byte_word_copy +ASLOCAL(f_strat) + word _ASM_LABEL(f_word_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_half_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_3byte_word_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_1byte_half_copy) + word _ASM_LABEL(f_half_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_1half_word_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_1byte_half_copy) + word _ASM_LABEL(f_byte_copy) + word _ASM_LABEL(f_1byte_word_copy) + +ASLOCAL(r_strat) + word _ASM_LABEL(r_word_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_half_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_1byte_word_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_1byte_half_copy) + word _ASM_LABEL(r_half_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_1half_word_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_1byte_half_copy) + word _ASM_LABEL(r_byte_copy) + word _ASM_LABEL(r_3byte_word_copy) text @@ -1371,8 +1347,6 @@ ASGLOBAL(r_strat) #define R_addr r6 /* R_addr && R_temp SHARE */ #define R_temp r6 /* R_addr && R_temp SHARE */ - -ENTRY(blkclr) ENTRY(bzero) /* * If the destination is not word aligned, we'll word align @@ -1382,18 +1356,18 @@ ENTRY(bzero) * (of the destination address). If either are set, it's * not word aligned. */ - bb1 0, R_dest, not_initially_word_aligned - bb1 1, R_dest, not_initially_word_aligned + bb1 0, R_dest, _ASM_LABEL(not_initially_word_aligned) + bb1 1, R_dest, _ASM_LABEL(not_initially_word_aligned) - now_word_aligned: +ASLOCAL(now_word_aligned) /* * before we get into the main loop, grab the * address of the label "mark" below. */ - or.u R_mark_address, r0, hi16(mark) - or R_mark_address, R_mark_address, lo16(mark) + or.u R_mark_address, r0, hi16(_ASM_LABEL(mark)) + or R_mark_address, R_mark_address, lo16(_ASM_LABEL(mark)) - top_of_main_loop: +ASLOCAL(top_of_main_loop) #define MAX_AT_ONE_TIME 128 /* * Now we find out how many words we can zero-fill in a row. @@ -1411,7 +1385,7 @@ ENTRY(bzero) clr R_bytes, R_len, 2<0> /* if we're done clearing WORDS, jump out */ - bcnd eq0, R_bytes, done_doing_words + bcnd eq0, R_bytes, _ASM_LABEL(done_doing_words) /* if the number of bytes > MAX_AT_ONE_TIME, do only the max */ cmp R_temp, R_bytes, MAX_AT_ONE_TIME @@ -1432,8 +1406,7 @@ ENTRY(bzero) br.n do_max or R_bytes, r0, MAX_AT_ONE_TIME - 1: - +1: /* * Now we have the number of bytes to zero during this iteration, * (which, as it happens, is the last iteration if we're here). @@ -1446,7 +1419,8 @@ ENTRY(bzero) /* and go there (after adjusting the length via ".n") */ jmp.n R_addr -do_max: subu R_len, R_len, R_bytes /* NOTE: this is in the delay slot! */ +ASLOCAL(do_max) + subu R_len, R_len, R_bytes /* NOTE: this is in the delay slot! */ st r0, R_dest, 0x7c /* 128 */ st r0, R_dest, 0x78 /* 124 */ @@ -1481,33 +1455,32 @@ do_max: subu R_len, R_len, R_bytes /* NOTE: this is in the delay slot! */ st r0, R_dest, 0x04 /* 4 */ st r0, R_dest, 0x00 /* 0 */ -mark: - br.n top_of_main_loop +ASLOCAL(mark) + br.n _ASM_LABEL(top_of_main_loop) addu R_dest, R_dest, R_bytes /* bump up the dest address */ -done_doing_words: - bcnd ne0, R_len, finish_up_last_bytes +ASLOCAL(done_doing_words) + bcnd ne0, R_len, 1f jmp r1 -finish_up_last_bytes: +1: subu R_len, R_len, 1 - bcnd.n ne0, R_len, finish_up_last_bytes + bcnd.n ne0, R_len, 1b st.b r0, R_dest, R_len - -leave: +1: jmp r1 -not_initially_word_aligned: +ASLOCAL(not_initially_word_aligned) /* * Bzero to word-align the address (at least if the length allows it). */ - bcnd eq0, R_len, leave + bcnd eq0, R_len, 1b st.b r0, R_dest, 0 addu R_dest, R_dest, 1 mask R_temp, R_dest, 0x3 - bcnd.n eq0, R_temp, now_word_aligned + bcnd.n eq0, R_temp, _ASM_LABEL(now_word_aligned) subu R_len, R_len, 1 - br not_initially_word_aligned + br _ASM_LABEL(not_initially_word_aligned) #undef R_dest #undef R_len @@ -1522,8 +1495,7 @@ not_initially_word_aligned: * int setjmp(label_t *); * void longjmp(label_t*); */ - global _setjmp -_setjmp: +ENTRY(setjmp) st r1,r2,0 st r14,r2,4 st r15,r2,2*4 @@ -1546,8 +1518,7 @@ _setjmp: jmp.n r1 or r2,r0,r0 - global _longjmp -_longjmp: +ENTRY(longjmp) ld r1,r2,0 ld r14,r2,4 ld r15,r2,2*4 @@ -1570,36 +1541,6 @@ _longjmp: jmp.n r1 or r2,r0,1 -ENTRY(longjmp_int_enable) - ld r1,r2,0 - ld r14,r2,4 - ld r15,r2,2*4 - ld r16,r2,3*4 - ld r17,r2,4*4 - ld r18,r2,5*4 - ld r19,r2,6*4 - ld r20,r2,7*4 - ld r21,r2,8*4 - ld r22,r2,9*4 - ld r23,r2,10*4 - ld r24,r2,11*4 - ld r25,r2,12*4 - ld r26,r2,13*4 - ld r27,r2,14*4 - ld r28,r2,15*4 - ld r29,r2,16*4 - ld r30,r2,17*4 - ld r31,r2,18*4 - or r2,r3,r0 - ldcr r10,PSR - clr r10,r10,1<PSR_INTERRUPT_DISABLE_BIT> - stcr r10,PSR - jmp r1 - -ENTRY(getsp) - or r2, r0, r31 - jmp r1 - /* * invalidate_pte(pte) * @@ -1634,14 +1575,14 @@ ENTRY(safe_byte_access) jmp r1 ENTRY(guarded_access) -ENTRY(guarded_access_start) +GLOBAL(guarded_access_start) cmp r9,r3,4 bb1 eq,r9,@L145 cmp r9,r3,2 bb1 eq,r9,@L144 cmp r9,r3,1 bb1 eq,r9,@L143 - br _guarded_access_bad + br _C_LABEL(guarded_access_bad) @L143: ld.b r9,r0,r2 tb1 0, r0, 0 @@ -1658,12 +1599,12 @@ ENTRY(guarded_access_start) st r9,r0,r4 br @L142 -ENTRY(guarded_access_bad) +GLOBAL(guarded_access_bad) jmp.n r1 - or r2,r0,14 + or r2,r0,EFAULT @L142: -ENTRY(guarded_access_end) +GLOBAL(guarded_access_end) jmp.n r1 or r2,r0,0 @@ -1703,9 +1644,9 @@ ENTRY(set_cpu_number) jmp r1 1: /* bad cpu number*/ - or.u r2, r0, hi16(1f) - bsr.n _panic - or r2, r2, lo16(1f) -1: string "set_cpu_number: bad CPU number\0" - align 4 - /* will not return */ + or.u r2, r0, hi16(9f) + bsr.n _C_LABEL(panic) + or r2, r2, lo16(9f) + + data +9: string "set_cpu_number: bad CPU number\0" diff --git a/sys/arch/mvme88k/mvme88k/m88100_fp.S b/sys/arch/mvme88k/mvme88k/m88100_fp.S index 0d61f346c39..dfaf5c20a94 100644 --- a/sys/arch/mvme88k/mvme88k/m88100_fp.S +++ b/sys/arch/mvme88k/mvme88k/m88100_fp.S @@ -1,4 +1,4 @@ -/* $OpenBSD: m88100_fp.S,v 1.16 2001/12/22 17:57:11 smurph Exp $ */ +/* $OpenBSD: m88100_fp.S,v 1.17 2003/08/11 20:45:17 miod Exp $ */ /* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University @@ -117,10 +117,7 @@ #define MARK or r21, r0, __LINE__ - text - align 8 - global _m88100_Xfp_precise -_m88100_Xfp_precise: +ASENTRY(m88100_Xfp_precise) or r29, r3, r0 /* r29 is now the E.F. */ subu r31, r31, 40 st r1, r31, 32 @@ -2225,12 +2222,8 @@ S2noinfd: clr r10,r7,1<sign> /* clear the sign bit */ /* function */ operation: jmp r1 /* return from function */ - data - - text - align 8 +ASENTRY(Xfp_imprecise) /* input: r3 is the excepton frame */ -_Xfp_imprecise: global _Xfp_imprecise or r29, r3, r0 /* r29 is now the E.F. */ subu r31, r31, 40 st r1, r31, 32 diff --git a/sys/arch/mvme88k/mvme88k/m88110_fp.S b/sys/arch/mvme88k/mvme88k/m88110_fp.S index 9b8c073efbf..8c69952419d 100644 --- a/sys/arch/mvme88k/mvme88k/m88110_fp.S +++ b/sys/arch/mvme88k/mvme88k/m88110_fp.S @@ -1,4 +1,4 @@ -/* $OpenBSD: m88110_fp.S,v 1.9 2001/12/22 17:57:11 smurph Exp $ */ +/* $OpenBSD: m88110_fp.S,v 1.10 2003/08/11 20:45:17 miod Exp $ */ /* * Copyright (c) 1999 Steve Murphree, Jr. * All rights reserved. @@ -52,8 +52,7 @@ #include <machine/trap.h> #include <machine/asm.h> - .text -ENTRY(m88110_Xfp_precise) +ASENTRY(m88110_Xfp_precise) or r29, r3, r0 /* r29 is now the E.F. */ subu r31, r31, 40 st r1, r31, 32 diff --git a/sys/arch/mvme88k/mvme88k/m88110_mmu.S b/sys/arch/mvme88k/mvme88k/m88110_mmu.S index 962da1fc2d5..3f82f7288a8 100644 --- a/sys/arch/mvme88k/mvme88k/m88110_mmu.S +++ b/sys/arch/mvme88k/mvme88k/m88110_mmu.S @@ -1,4 +1,4 @@ -# $OpenBSD: m88110_mmu.S,v 1.7 2001/12/22 19:17:01 smurph Exp $ +# $OpenBSD: m88110_mmu.S,v 1.8 2003/08/11 20:45:17 miod Exp $ /* * Copyright (c) 2000 Steve Murphree, Jr. * All rights reserved. @@ -36,14 +36,14 @@ ENTRY(set_icmd) FLUSH_PIPELINE jmp.n r1 - stcr r2, ICMD + stcr r2, ICMD ENTRY(set_ictl) FLUSH_PIPELINE jmp.n r1 - stcr r2, ICTL + stcr r2, ICTL ENTRY(set_isar) jmp.n r1 - stcr r2, ISAR + stcr r2, ISAR ENTRY(set_isap) FLUSH_PIPELINE NOP @@ -56,29 +56,29 @@ ENTRY(set_iuap) jmp r1 ENTRY(set_iir) jmp.n r1 - stcr r2, IIR + stcr r2, IIR ENTRY(set_ibp) jmp.n r1 - stcr r2, IBP + stcr r2, IBP ENTRY(set_ippu) jmp.n r1 - stcr r2, IPPU + stcr r2, IPPU ENTRY(set_ippl) jmp.n r1 - stcr r2, IPPL + stcr r2, IPPL ENTRY(set_isr) jmp.n r1 - stcr r2, ISR + stcr r2, ISR ENTRY(set_ilar) jmp.n r1 - stcr r2, ILAR + stcr r2, ILAR ENTRY(set_ipar) jmp.n r1 - stcr r2, IPAR + stcr r2, IPAR ENTRY(set_dcmd) FLUSH_PIPELINE jmp.n r1 - stcr r2, DCMD + stcr r2, DCMD ENTRY(set_dctl) FLUSH_PIPELINE stcr r2, DCTL @@ -105,96 +105,96 @@ ENTRY(set_duap) jmp r1 ENTRY(set_dir) jmp.n r1 - stcr r2, DIR + stcr r2, DIR ENTRY(set_dbp) jmp.n r1 - stcr r2, DBP + stcr r2, DBP ENTRY(set_dppu) jmp.n r1 - stcr r2, DPPU + stcr r2, DPPU ENTRY(set_dppl) jmp.n r1 - stcr r2, DPPL + stcr r2, DPPL ENTRY(set_dsr) jmp.n r1 - stcr r2, DSR + stcr r2, DSR ENTRY(set_dlar) jmp.n r1 - stcr r2, DLAR + stcr r2, DLAR ENTRY(set_dpar) jmp.n r1 - stcr r2, DPAR + stcr r2, DPAR /* get routines */ ENTRY(get_icmd) jmp.n r1 - ldcr r2, ICMD + ldcr r2, ICMD ENTRY(get_ictl) jmp.n r1 - ldcr r2, ICTL + ldcr r2, ICTL ENTRY(get_isar) jmp.n r1 - ldcr r2, ISAR + ldcr r2, ISAR ENTRY(get_isap) jmp.n r1 - ldcr r2, ISAP + ldcr r2, ISAP ENTRY(get_iuap) jmp.n r1 - ldcr r2, IUAP + ldcr r2, IUAP ENTRY(get_iir) jmp.n r1 - ldcr r2, IIR + ldcr r2, IIR ENTRY(get_ibp) jmp.n r1 - ldcr r2, IBP + ldcr r2, IBP ENTRY(get_ippu) jmp.n r1 - ldcr r2, IPPU + ldcr r2, IPPU ENTRY(get_ippl) jmp.n r1 - ldcr r2, IPPL + ldcr r2, IPPL ENTRY(get_isr) jmp.n r1 - ldcr r2, ISR + ldcr r2, ISR ENTRY(get_ilar) jmp.n r1 - ldcr r2, ILAR + ldcr r2, ILAR ENTRY(get_ipar) jmp.n r1 - ldcr r2, IPAR + ldcr r2, IPAR ENTRY(get_dcmd) jmp.n r1 - ldcr r2, DCMD + ldcr r2, DCMD ENTRY(get_dctl) jmp.n r1 - ldcr r2, DCTL + ldcr r2, DCTL ENTRY(get_dsar) jmp.n r1 - ldcr r2, DSAR + ldcr r2, DSAR ENTRY(get_dsap) jmp.n r1 - ldcr r2, DSAP + ldcr r2, DSAP ENTRY(get_duap) jmp.n r1 - ldcr r2, DUAP + ldcr r2, DUAP ENTRY(get_dir) jmp.n r1 - ldcr r2, DIR + ldcr r2, DIR ENTRY(get_dbp) jmp.n r1 - ldcr r2, DBP + ldcr r2, DBP ENTRY(get_dppu) jmp.n r1 - ldcr r2, DPPU + ldcr r2, DPPU ENTRY(get_dppl) jmp.n r1 - ldcr r2, DPPL + ldcr r2, DPPL ENTRY(get_dsr) jmp.n r1 - ldcr r2, DSR + ldcr r2, DSR ENTRY(get_dlar) jmp.n r1 - ldcr r2, DLAR + ldcr r2, DLAR ENTRY(get_dpar) jmp.n r1 - ldcr r2, DPAR + ldcr r2, DPAR diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c index db3f8fca725..5b2cc9310eb 100644 --- a/sys/arch/mvme88k/mvme88k/machdep.c +++ b/sys/arch/mvme88k/mvme88k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.104 2003/08/07 17:23:43 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.105 2003/08/11 20:45:17 miod Exp $ */ /* * Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -119,8 +119,6 @@ vm_offset_t interrupt_stack[MAX_CPUS] = {0}; struct md_p md; /* prototypes */ -void m88100_Xfp_precise(void); -void m88110_Xfp_precise(void); void setupiackvectors(void); void regdump(struct trapframe *f); void dumpsys(void); @@ -1146,7 +1144,7 @@ __dead void boot(howto) register int howto; { - /* take a snap shot before clobbering any registers */ + /* take a snapshot before clobbering any registers */ if (curproc && curproc->p_addr) savectx(curpcb); @@ -2333,7 +2331,7 @@ mvme_bootstrap() curproc = &proc0; curpcb = &proc0paddr->u_pcb; - /* zreo out the machine dependant function pointers */ + /* zero out the machine dependant function pointers */ bzero(&md, sizeof(struct md_p)); buginit(); /* init the bug routines */ diff --git a/sys/arch/mvme88k/mvme88k/process.S b/sys/arch/mvme88k/mvme88k/process.S index 29368efb4a9..f3a63118c0c 100644 --- a/sys/arch/mvme88k/mvme88k/process.S +++ b/sys/arch/mvme88k/mvme88k/process.S @@ -1,4 +1,4 @@ -/* $OpenBSD: process.S,v 1.18 2003/08/11 01:09:35 miod Exp $ */ +/* $OpenBSD: process.S,v 1.19 2003/08/11 20:45:17 miod Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -37,25 +37,26 @@ data align 4 -swchanpanic: - string "switch wchan %x" +ASLOCAL(swchanpanic) + string "switch wchan %x\0" align 4 -swsrunpanic: - string "switch SRUN %x" +ASLOCAL(swsrunpanic) + string "switch SRUN %x\0" text align 8 -Lswchanpanic: - or.u r2, r0, hi16(swchanpanic) - or r2, r2, lo16(swchanpanic) - or r3, r0, r9 - bsr _panic - -Lswsrunpanic: - or.u r2, r0, hi16(swsrunpanic) - or r2, r2, lo16(swsrunpanic) - or r3, r0, r9 - bsr _panic +ASLOCAL(Lswchanpanic) + or.u r2, r0, hi16(_ASM_LABEL(swchanpanic)) + or r2, r2, lo16(_ASM_LABEL(swchanpanic)) + bsr.n _C_LABEL(panic) + or r3, r0, r9 + +ASLOCAL(Lswsrunpanic) + or.u r2, r0, hi16(_ASM_LABEL(swsrunpanic)) + or r2, r2, lo16(_ASM_LABEL(swsrunpanic)) + bsr.n _C_LABEL(panic) + or r3, r0, r9 + /* * At exit of a process, do a cpu_switch for the last time. * The mapping of the pcb at p->p_addr has already been deleted, @@ -69,41 +70,21 @@ ENTRY(switch_exit) * Change pcb to idle u. area, i.e., set r31 to top of stack * and set curpcb to point to _idle_u. r2 contains proc *p. */ - or.u r30, r0, hi16(_idle_u) - or r30, r30,lo16(_idle_u) - addu r31, r30, USIZE /* now on idle_u stack */ - or.u r10, r0, hi16(_curpcb) - st r30, r10,lo16(_curpcb) /* curpcb = &idle_u */ - or.u r10, r0, hi16(_curproc) - st r0, r10, lo16(_curproc) /* curproc = NULL */ + or.u r30, r0, hi16(_C_LABEL(idle_u)) + or r30, r30,lo16(_C_LABEL(idle_u)) + addu r31, r30, USIZE /* now on idle_u stack */ + or.u r10, r0, hi16(_C_LABEL(curpcb)) + st r30, r10,lo16(_C_LABEL(curpcb)) /* curpcb = &idle_u */ + or.u r10, r0, hi16(_C_LABEL(curproc)) + st r0, r10, lo16(_C_LABEL(curproc)) /* curproc = NULL */ /* Schedule the vmspace and stack to be freed. */ - bsr.n _exit2 /* call exit2(p); */ - subu r31, r31, 48 /* allocate stack */ + bsr.n _C_LABEL(exit2) + subu r31, r31, 48 /* allocate stack */ addu r31, r31, 48 /* restore stack */ - bsr.n _cpu_switch /* goto final switch */ - or r2, r0, r10 + bsr.n _C_LABEL(cpu_switch) /* goto final switch */ + or r2, r0, r10 -#if 0 -/* - * When no processes are on the runq, switch - * idles here watiing for something to come ready. - */ -ASGLOBAL(idle) - or.u r10, r0, hi16(_curproc) - st r0, r10, lo16(_curproc) /* curproc = NULL */ - - bsr.n _setipl /* unblock all interrupts */ - or r2, r0, 0 - /* spin reading whichqs until != 0 */ -1: - or.u r10, r0, hi16(_whichqs) - ld r11, r10,lo16(_whichqs) - bcnd eq0, r11, 1b - bsr.n _setipl - or r2, r0, IPL_HIGH /* block all ints */ - br Lsw1 -#endif /* 0 */ /* * cpu_switch() * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it. @@ -116,8 +97,8 @@ ENTRY(cpu_switch) * Save state of previous process in its pcb. */ - or.u r10, r0, hi16(_curpcb) - ld r10,r10, lo16(_curpcb) + or.u r10, r0, hi16(_C_LABEL(curpcb)) + ld r10,r10, lo16(_C_LABEL(curpcb)) st r1, r10, PCB_PC /* save r1 in pcb */ st r14,r10, PCB_R14 st r15,r10, PCB_R15 @@ -138,51 +119,50 @@ ENTRY(cpu_switch) st r30,r10, PCB_R30 /* save frame pointer & stack pointer */ st r31,r10, PCB_SP - or r14,r10, 0 /* save r10 in r14 */ - - bsr _getipl + bsr.n _C_LABEL(getipl) + or r14,r10, 0 /* save r10 in r14 */ st r2, r14, PCB_IPL /* save ipl in pcb */ - or.u r11, r0, hi16(_curproc) - ld r11,r11, lo16(_curproc) + or.u r11, r0, hi16(_C_LABEL(curproc)) + ld r11,r11, lo16(_C_LABEL(curproc)) - or.u r11, r0, hi16(_curproc) - st r0, r11, lo16(_curproc) /* curproc = NULL */ + or.u r11, r0, hi16(_C_LABEL(curproc)) + st r0, r11, lo16(_C_LABEL(curproc)) /* curproc = NULL */ -Lidleloop: +ASLOCAL(Lidleloop) /* * Find the highest-priority queue that isn't empty, * then take the first proc from that queue. */ - or.u r7, r0, hi16(_whichqs) - ld r7, r7, lo16(_whichqs) + or.u r7, r0, hi16(_C_LABEL(whichqs)) + ld r7, r7, lo16(_C_LABEL(whichqs)) - bcnd ne0, r7, Ldoneloop + bcnd ne0, r7, _ASM_LABEL(Ldoneloop) -Lloopchk: /* if whichqs is zero, keep checking */ - bsr.n _setipl /* enable all ints */ - or r2, r0, 0 +ASLOCAL(Lloopchk) /* if whichqs is zero, keep checking */ + bsr.n _C_LABEL(setipl) /* enable all ints */ + or r2, r0, 0 - br Lidleloop + br _ASM_LABEL(Lidleloop) -Ldoneloop: +ASLOCAL(Ldoneloop) - bsr.n _setipl /* disable ints */ - or r2, r0, IPL_HIGH + bsr.n _C_LABEL(setipl) /* disable ints */ + or r2, r0, IPL_HIGH - or.u r7, r0, hi16(_whichqs) /* reload whichqs */ - ld r7, r7, lo16(_whichqs) + or.u r7, r0, hi16(_C_LABEL(whichqs)) /* reload whichqs */ + ld r7, r7, lo16(_C_LABEL(whichqs)) - bcnd eq0, r7, Lloopchk /* keep spinning for whichqs to be !=0*/ + bcnd eq0, r7, _ASM_LABEL(Lloopchk) /* keep spinning for whichqs to be != 0 */ xor r6, r6, r6 /* set r6 to 0 */ 1: bb1 0, r7, 2f /* if rightmost bit set, done */ extu r7, r7, 0<1> /* else, right shift whichqs, */ br.n 1b /* increment r6, and repeat */ - addu r6, r6, 1 + addu r6, r6, 1 2: or.u r7, r0, hi16(_qs) or r7, r7, lo16(_qs) @@ -204,13 +184,13 @@ Ldoneloop: lda.d r8, r7[r6] /* reload r8 with qs[ff1(whichqs)] */ ld r12, r8, P_FORW /* q->p_forw */ cmp r12, r12, r8 /* q == q->p_forw; anyone left on queue? */ - bb1 ne, r12, Lsw2 /* yes, skip clearing bit in whichqs */ + bb1 ne, r12, 3f /* yes, skip clearing bit in whichqs */ or r12, r0, 1 /* r12 is 1 now */ 1: bcnd eq0, r6, 2f mak r12, r12, 0<1> /* shift left by 1 */ br.n 1b - subu r6, r6, 1 /* keep doing this while r6 != 0 */ + subu r6, r6, 1 /* keep doing this while r6 != 0 */ 2: /* * NOTE: we could have just used "mak r12, r12, r6" instead of the @@ -218,55 +198,44 @@ Ldoneloop: * preprocessor to do the right thing, but that means I have to * include sys/proc.h in this file. XXX nivas */ - or.u r7, r0, hi16(_whichqs) - ld r8, r7, lo16(_whichqs) + or.u r7, r0, hi16(_C_LABEL(whichqs)) + ld r8, r7, lo16(_C_LABEL(whichqs)) and.c r8, r8, r12 /* whichqs &= ~the bit */ - st r8, r7, lo16(_whichqs) /* reset bit in whichqs */ -Lsw2: + st r8, r7, lo16(_C_LABEL(whichqs)) +3: ld r2, r9, P_WCHAN - bcnd ne0, r2, Lswchanpanic + bcnd ne0, r2, _ASM_LABEL(Lswchanpanic) ld.b r2, r9, P_STAT cmp r2, r2, SRUN - bb1 ne, r2, Lswsrunpanic + bb1 ne, r2, _ASM_LABEL(Lswsrunpanic) - or.u r11, r0, hi16(_want_resched) - st r0, r11, lo16(_want_resched) /* clear want_resched */ + or.u r11, r0, hi16(_C_LABEL(want_resched)) + st r0, r11, lo16(_C_LABEL(want_resched)) /* clear want_resched */ - or.u r11, r0, hi16(_curproc) - st r9, r11,lo16(_curproc) /* curproc = p */ - - /* huh??? */ - or.u r10, r0, hi16(_curpcb) - ld r10,r10, lo16(_curpcb) - -#ifdef notyet - cmp r2, r2, r9 - bb1 eq, r2, Lswsameproc -#endif /* notyet */ + or.u r11, r0, hi16(_C_LABEL(curproc)) + st r9, r11,lo16(_C_LABEL(curproc)) /* curproc = p */ /* r9 is curproc */ st r0, r9, P_BACK /* p->p_back = 0 */ ld r3, r9, P_ADDR - or.u r10, r0, hi16(_curpcb) - st r3, r10, lo16(_curpcb) /* curpcb = p->p_addr */ + or.u r10, r0, hi16(_C_LABEL(curpcb)) + st r3, r10, lo16(_C_LABEL(curpcb)) /* curpcb = p->p_addr */ - /* see if pmap_activate needs to be called */ - /* _pmap_activate() now has proc * as parameter 01-11-2000 smurph */ - /* No more VM_PMAP to contend with!!! */ + /* pmap_activate() the process' pmap */ or r2, r0, r9 /* r2 = p */ or r14, r0, r9 /* save p in r14 */ - subu r31, r31,48 /* r2 = pmap, r3 = pcb, r4 = cpu number */ - bsr _pmap_activate /* _pmap_activate(proc *p)*/ + bsr.n _C_LABEL(pmap_activate) + subu r31, r31,48 addu r31, r31,48 or r9, r0, r14 /* restore p saved in r14 */ -Lswnochg: - or.u r31, r0, hi16(_intstack_end) - or r31,r31, lo16(_intstack_end)/* now goto a tmp stack for NMI */ +ASLOCAL(Lswnochg) + or.u r31, r0, hi16(_ASM_LABEL(intstack_end)) + or r31,r31, lo16(_ASM_LABEL(intstack_end)) subu r31, r31,48 - bsr.n _load_u_area - or r2, r0, r9 + bsr.n _C_LABEL(load_u_area) + or r2, r0, r9 addu r31, r31,48 /* flush tlb of any user addresses */ or r2, r0, 0 /* 0 = user space */ @@ -274,12 +243,12 @@ Lswnochg: subu r31, r31,48 /* r2 = 1 : kernel ? user, r3 = address, r4 = size */ /* cmmu_flush_tlb(0, 0, 0xffff) */ - bsr.n _md_cmmu_flush_tlb - or r4, r0, 0xffff /* cmmu_flush_tlb flushes entire tlb */ + bsr.n _C_LABEL(md_cmmu_flush_tlb) + or r4, r0, 0xffff /* cmmu_flush_tlb flushes entire tlb */ /* for sizes > 4096 */ addu r31, r31,48 - or.u r10, r0, hi16(_curpcb) - ld r10, r10, lo16(_curpcb) + or.u r10, r0, hi16(_C_LABEL(curpcb)) + ld r10, r10, lo16(_C_LABEL(curpcb)) /* XXX Is this correct/necessary? */ st r10, r14, P_ADDR /* p->p_addr = curpcb; restore p_addr */ @@ -304,17 +273,18 @@ Lswnochg: ld r30,r10, PCB_R30 /* restore frame pointer & stack */ ld r31,r10, PCB_SP -/* XXX should we postpone restoring stack till after ipl is restored? The -stack access could fault */ -Lswsameproc: +/* XXX + * Should we postpone restoring stack till after ipl is restored? + * The stack access could fault + */ subu r31,r31,48 st r1, r31,36 /* save r1 on stack */ - ld r2, r10, PCB_IPL /* restore interrupt mask */ - bsr _setipl /* restore ipl */ + bsr.n _C_LABEL(setipl) + ld r2, r10, PCB_IPL /* restore interrupt mask */ ld r1, r31,36 /* restore r1 from stack */ addu r31,r31,48 jmp.n r1 - or r2, r0, 1 /* return 1 (for alternate returns) */ + or r2, r0, 1 /* return 1 (for alternate returns) */ /* * savectx(pcb) @@ -324,8 +294,8 @@ ENTRY(savectx) /* get the spl mask */ subu r31,r31,48 /* allocate stack for r1 and args */ st r1,r31,36 /* save return address */ - st r2,r31,32 /* save r2 */ - bsr _getipl /* get the current interrupt mask */ + bsr.n _C_LABEL(getipl) /* get the current interrupt mask */ + st r2,r31,32 /* save r2 */ ld r1,r31,36 /* recover return address */ ld r10,r31,32 /* recover r2 into r10 */ addu r31,r31,48 /* put stack pointer back */ @@ -350,5 +320,5 @@ ENTRY(savectx) st r31,r10, PCB_SP st r2, r10, PCB_IPL /* save interrupt mask */ jmp.n r1 - or r2,r0,r0 + or r2,r0,r0 |