diff options
author | Patrick Wildt <patrick@cvs.openbsd.org> | 2017-02-06 19:23:46 +0000 |
---|---|---|
committer | Patrick Wildt <patrick@cvs.openbsd.org> | 2017-02-06 19:23:46 +0000 |
commit | 09d9fc30826c7586ced3c93385a2792e66d555c3 (patch) | |
tree | f753360b83ceae7ad7e3c4df4b6f1054411cb1c6 /sys/arch/arm64 | |
parent | ef615a2440c05376c83e2f5ed0c28a5ec6ab521e (diff) |
Move cache and tlb flush functions, which were mostly inline assembly,
into separate functions. This makes them reusable from other parts in
the kernel. Assembly and header are taken from FreeBSD, but modified
to fit our requirements and with some unnecessary stuff removed. While
there remove micro optimization for uniprocessor kernels.
Diffstat (limited to 'sys/arch/arm64')
-rw-r--r-- | sys/arch/arm64/arm64/cpufunc_asm.S | 146 | ||||
-rw-r--r-- | sys/arch/arm64/arm64/locore.S | 17 | ||||
-rw-r--r-- | sys/arch/arm64/arm64/machdep.c | 41 | ||||
-rw-r--r-- | sys/arch/arm64/arm64/pmap.c | 85 | ||||
-rw-r--r-- | sys/arch/arm64/conf/files.arm64 | 4 | ||||
-rw-r--r-- | sys/arch/arm64/include/armreg.h | 7 | ||||
-rw-r--r-- | sys/arch/arm64/include/cpufunc.h | 53 |
7 files changed, 260 insertions, 93 deletions
diff --git a/sys/arch/arm64/arm64/cpufunc_asm.S b/sys/arch/arm64/arm64/cpufunc_asm.S new file mode 100644 index 00000000000..2bfef7769b0 --- /dev/null +++ b/sys/arch/arm64/arm64/cpufunc_asm.S @@ -0,0 +1,146 @@ +/* $OpenBSD: cpufunc_asm.S,v 1.1 2017/02/06 19:23:45 patrick Exp $ */ +/*- + * Copyright (c) 2014 Robin Randhawa + * Copyright (c) 2015 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by Andrew Turner + * under sponsorship from the FreeBSD Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <machine/asm.h> +#include <machine/param.h> +//__FBSDID("$FreeBSD: head/sys/arm64/arm64/cpufunc_asm.S 305546 2016-09-07 16:46:54Z andrew $"); + +/* + * FIXME: + * Need big.LITTLE awareness at some point. + * Using [id]cache_line_size may not be the best option. + * Need better SMP awareness. + */ + .text + .align 2 + +/* + * Macro to handle the cache. This takes the start address in x0, length + * in x1. It will corrupt x0, x1, x2, and x3. + */ +.macro cache_handle_range dcop = 0, ic = 0, icop = 0 +.if \ic == 0 + ldr x3, =dcache_line_size /* Load the D cache line size */ +.else + ldr x3, =idcache_line_size /* Load the I & D cache line size */ +.endif + ldr x3, [x3] + sub x4, x3, #1 /* Get the address mask */ + and x2, x0, x4 /* Get the low bits of the address */ + add x1, x1, x2 /* Add these to the size */ + bic x0, x0, x4 /* Clear the low bit of the address */ +1: + dc \dcop, x0 + dsb ish +.if \ic != 0 + ic \icop, x0 + dsb ish +.endif + add x0, x0, x3 /* Move to the next line */ + subs x1, x1, x3 /* Reduce the size */ + b.hi 1b /* Check if we are done */ +.if \ic != 0 + isb +.endif + ret +.endm + +/* + * Generic functions to read/modify/write the internal coprocessor registers + */ + +ENTRY(cpu_setttb) + dsb ish + msr ttbr0_el1, x0 + dsb ish + isb + ret +END(cpu_setttb) + +ENTRY(cpu_tlb_flush) + tlbi vmalle1is + dsb ish + isb + ret +END(cpu_tlb_flush) + +ENTRY(cpu_tlb_flush_asid) + tlbi vae1is, x0 + dsb ish + isb + ret +END(cpu_tlb_flush_asid) + +ENTRY(cpu_tlb_flush_all_asid) + tlbi vaale1is, x0 + dsb ish + isb + ret +END(cpu_tlb_flush_all_asid) + +/* + * void cpu_dcache_wb_range(vaddr_t, vsize_t) + */ +ENTRY(cpu_dcache_wb_range) + cache_handle_range dcop = cvac +END(cpu_dcache_wb_range) + +/* + * void cpu_dcache_wbinv_range(vaddr_t, vsize_t) + */ +ENTRY(cpu_dcache_wbinv_range) + cache_handle_range dcop = civac +END(cpu_dcache_wbinv_range) + +/* + * void cpu_dcache_inv_range(vaddr_t, vsize_t) + * + * Note, we must not invalidate everything. If the range is too big we + * must use wb-inv of the entire cache. + */ +ENTRY(cpu_dcache_inv_range) + cache_handle_range dcop = ivac +END(cpu_dcache_inv_range) + +/* + * void cpu_idcache_wbinv_range(vaddr_t, vsize_t) + */ +ENTRY(cpu_idcache_wbinv_range) + cache_handle_range dcop = civac, ic = 1, icop = ivau +END(cpu_idcache_wbinv_range) + +/* + * void cpu_icache_sync_range(vaddr_t, vsize_t) + */ +ENTRY(cpu_icache_sync_range) + cache_handle_range dcop = cvau, ic = 1, icop = ivau +END(cpu_icache_sync_range) diff --git a/sys/arch/arm64/arm64/locore.S b/sys/arch/arm64/arm64/locore.S index 3df13ef10db..a2f1d0d81ee 100644 --- a/sys/arch/arm64/arm64/locore.S +++ b/sys/arch/arm64/arm64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.11 2017/02/05 13:08:03 patrick Exp $ */ +/* $OpenBSD: locore.S,v 1.12 2017/02/06 19:23:45 patrick Exp $ */ /*- * Copyright (c) 2012-2014 Andrew Turner * All rights reserved. @@ -95,25 +95,12 @@ _start: /* Create the page tables */ bl create_pagetables - - mrs x0, DCZID_EL0 - tbnz x0, 4, 1f - mov x1, #1 - and x0, x0, 0xf - lsl x1, x1, x0 - ldr x0, =dczva_line_size - // adjust virtual address to physical - sub x0, x0, x29 - - str x1, [x0] -1: /* * At this point: * x27 = TTBR0 table * x26 = TTBR1 table */ - /* Enable the mmu */ bl start_mmu @@ -657,8 +644,6 @@ abort: .data .global _C_LABEL(esym) _C_LABEL(esym): .xword _C_LABEL(end) - .global _C_LABEL(dczva_line_size) -_C_LABEL(dczva_line_size): .xword 0 //.section .init_pagetable data_align_pad: diff --git a/sys/arch/arm64/arm64/machdep.c b/sys/arch/arm64/arm64/machdep.c index abd8ec3531e..67f97104a2c 100644 --- a/sys/arch/arm64/arm64/machdep.c +++ b/sys/arch/arm64/arm64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.9 2017/02/04 19:49:18 patrick Exp $ */ +/* $OpenBSD: machdep.c,v 1.10 2017/02/06 19:23:45 patrick Exp $ */ /* * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se> * @@ -758,6 +758,43 @@ install_coproc_handler() { } +int64_t dcache_line_size; /* The minimum D cache line size */ +int64_t icache_line_size; /* The minimum I cache line size */ +int64_t idcache_line_size; /* The minimum cache line size */ +int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ + +void +cache_setup(void) +{ + int dcache_line_shift, icache_line_shift, dczva_line_shift; + uint32_t ctr_el0; + uint32_t dczid_el0; + + ctr_el0 = READ_SPECIALREG(ctr_el0); + + /* Read the log2 words in each D cache line */ + dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); + /* Get the D cache line size */ + dcache_line_size = sizeof(int) << dcache_line_shift; + + /* And the same for the I cache */ + icache_line_shift = CTR_ILINE_SIZE(ctr_el0); + icache_line_size = sizeof(int) << icache_line_shift; + + idcache_line_size = MIN(dcache_line_size, icache_line_size); + + dczid_el0 = READ_SPECIALREG(dczid_el0); + + /* Check if dc zva is not prohibited */ + if (dczid_el0 & DCZID_DZP) + dczva_line_size = 0; + else { + /* Same as with above calculations */ + dczva_line_shift = DCZID_BS_SIZE(dczid_el0); + dczva_line_size = sizeof(int) << dczva_line_shift; + } +} + void collect_kernel_args(char *); void process_kernel_args(void); @@ -822,6 +859,8 @@ initarm(struct arm64_bootparams *abp) "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); + cache_setup(); + { extern char bootargs[MAX_BOOT_STRING]; printf("memsize %llx %llx bootargs [%s]\n", memstart, memsize, bootargs); diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c index afcc15a6d35..05290eb7801 100644 --- a/sys/arch/arm64/arm64/pmap.c +++ b/sys/arch/arm64/arm64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.17 2017/02/06 07:15:56 jsg Exp $ */ +/* $OpenBSD: pmap.c,v 1.18 2017/02/06 19:23:45 patrick Exp $ */ /* * Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com> * @@ -27,6 +27,7 @@ #include "arm64/vmparam.h" #include "arm64/pmap.h" +#include "machine/cpufunc.h" #include "machine/pcb.h" #include <machine/db_machdep.h> @@ -71,55 +72,6 @@ pmap_pa_is_mem(uint64_t pa) return 0; } -unsigned int -dcache_line_size(void) -{ - uint64_t ctr; - unsigned int dcl_size; - - /* Accessible from all security levels */ - ctr = READ_SPECIALREG(ctr_el0); - - /* - * Relevant field [19:16] is LOG2 - * of the number of words in DCache line - */ - dcl_size = CTR_DLINE_SIZE(ctr); - - /* Size of word shifted by cache line size */ - return (sizeof(int) << dcl_size); -} - -/* Write back D-cache to PoC */ -void -dcache_wb_poc(vaddr_t addr, vsize_t len) -{ - uint64_t cl_size; - vaddr_t end; - - cl_size = dcache_line_size(); - - /* Calculate end address to clean */ - end = addr + len; - /* Align start address to cache line */ - addr = addr & ~(cl_size - 1); - - for (; addr < end; addr += cl_size) - __asm __volatile("dc cvac, %x0" :: "r" (addr) : "memory"); - __asm __volatile("dsb ish"); -} - -#if 0 -/* Write back and invalidate D-cache to PoC */ -STATIC __inline void -dcache_wbinv_poc(vaddr_t sva, paddr_t pa, vsize_t size) -{ - // XXX needed? - for (off = 0; off <size; off += CACHE_LINE_SIZE) - __asm __volatile("dc CVAC,%0"::"r"(va+off)); -} -#endif - STATIC __inline void ttlb_flush(pmap_t pm, vaddr_t va) { @@ -131,30 +83,28 @@ ttlb_flush_range(pmap_t pm, vaddr_t va, vsize_t size) { vaddr_t eva = va + size; - __asm __volatile("dsb sy"); // if size is over 512 pages, just flush the entire cache !?!?! if (size >= (512 * PAGE_SIZE)) { - __asm __volatile("tlbi vmalle1is"); - return ; + cpu_tlb_flush(); + return; } for ( ; va < eva; va += PAGE_SIZE) arm64_tlbi_asid(va, pm->pm_asid); - __asm __volatile("dsb sy"); } void arm64_tlbi_asid(vaddr_t va, int asid) { vaddr_t resva; + + resva = ((va >> PAGE_SHIFT) & ((1ULL << 44) - 1)); if (asid == -1) { - resva = ((va>>PAGE_SHIFT) & (1ULL << 44) -1) ; - __asm volatile ("TLBI VAALE1IS, %x0":: "r"(resva)); - return; + cpu_tlb_flush_all_asid(resva); + } else { + resva |= (unsigned long long)asid << 48; + cpu_tlb_flush_asid(resva); } - resva = ((va >> PAGE_SHIFT) & (1ULL << 44) -1) | - ((unsigned long long)asid << 48); - __asm volatile ("TLBI VAE1IS, %x0" :: "r"(resva)); } struct pmap kernel_pmap_; @@ -1384,7 +1334,7 @@ pmap_set_l1(struct pmap *pm, uint64_t va, struct pmapvp1 *l1_va, paddr_t l1_pa) pm->pm_vp.l0->l0[idx0] = pg_entry; __asm __volatile("dsb sy"); - dcache_wb_poc((vaddr_t)&pm->pm_vp.l0->l0[idx0], 8); + cpu_dcache_wb_range((vaddr_t)&pm->pm_vp.l0->l0[idx0], 8); ttlb_flush_range(pm, va & ~PAGE_MASK, 1<<VP_IDX1_POS); } @@ -1419,7 +1369,7 @@ pmap_set_l2(struct pmap *pm, uint64_t va, struct pmapvp2 *l2_va, paddr_t l2_pa) vp1->l1[idx1] = pg_entry; __asm __volatile("dsb sy"); - dcache_wb_poc((vaddr_t)&vp1->l1[idx1], 8); + cpu_dcache_wb_range((vaddr_t)&vp1->l1[idx1], 8); ttlb_flush_range(pm, va & ~PAGE_MASK, 1<<VP_IDX2_POS); } @@ -1458,7 +1408,7 @@ pmap_set_l3(struct pmap *pm, uint64_t va, struct pmapvp3 *l3_va, paddr_t l3_pa) vp2->vp[idx2] = l3_va; vp2->l2[idx2] = pg_entry; __asm __volatile("dsb sy"); - dcache_wb_poc((vaddr_t)&vp2->l2[idx2], 8); + cpu_dcache_wb_range((vaddr_t)&vp2->l2[idx2], 8); ttlb_flush_range(pm, va & ~PAGE_MASK, 1<<VP_IDX3_POS); } @@ -1740,7 +1690,7 @@ pmap_pte_update(struct pte_desc *pted, uint64_t *pl3) pte = (pted->pted_pte & PTE_RPGN) | attr | access_bits | L3_P; *pl3 = pte; - dcache_wb_poc((vaddr_t) pl3, 8); + cpu_dcache_wb_range((vaddr_t) pl3, 8); __asm __volatile("dsb sy"); } @@ -1778,7 +1728,7 @@ pmap_pte_remove(struct pte_desc *pted, int remove_pted) vp3->vp[VP_IDX3(pted->pted_va)] = NULL; __asm __volatile("dsb sy"); - dcache_wb_poc((vaddr_t)&vp3->l3[VP_IDX3(pted->pted_va)], 8); + cpu_dcache_wb_range((vaddr_t)&vp3->l3[VP_IDX3(pted->pted_va)], 8); arm64_tlbi_asid(pted->pted_va, pm->pm_asid); } @@ -2354,7 +2304,7 @@ pmap_allocate_asid(pmap_t pm) if (pmap_asid_id_next == MAX_ASID) { // out of asid, flush all - __asm __volatile("tlbi vmalle1is"); + cpu_tlb_flush(); for (i = 0;i < MAX_ASID; i++) { if (pmap_asid[i] != NULL) { // printf("reclaiming asid %d from %p\n", i, @@ -2404,8 +2354,7 @@ pmap_setttb(struct proc *p, paddr_t pagedir, struct pcb *pcb) //printf("switching userland to %p %p asid %d new asid %d\n", // pm, pmap_kernel(), oasid, pm->pm_asid); - __asm volatile("msr ttbr0_el1, %x0" :: "r"(pagedir)); - __asm volatile("dsb sy"); + cpu_setttb(pagedir); } else { // XXX what to do if switching to kernel pmap !?!? } diff --git a/sys/arch/arm64/conf/files.arm64 b/sys/arch/arm64/conf/files.arm64 index a939d877ef6..bba0574f2fb 100644 --- a/sys/arch/arm64/conf/files.arm64 +++ b/sys/arch/arm64/conf/files.arm64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.arm64,v 1.8 2017/01/26 01:46:19 jsg Exp $ +# $OpenBSD: files.arm64,v 1.9 2017/02/06 19:23:45 patrick Exp $ maxpartitions 16 maxusers 2 8 64 @@ -31,7 +31,7 @@ file arch/arm64/arm64/trap.c file arch/arm64/arm64/ast.c file arch/arm64/arm64/arm64_mutex.c - +file arch/arm64/arm64/cpufunc_asm.S file arch/arm64/arm64/support.S file arch/arm64/arm64/bus_dma.c diff --git a/sys/arch/arm64/include/armreg.h b/sys/arch/arm64/include/armreg.h index 4cad9e6bfed..1a1802b1dbb 100644 --- a/sys/arch/arm64/include/armreg.h +++ b/sys/arch/arm64/include/armreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: armreg.h,v 1.2 2016/12/18 14:40:25 patrick Exp $ */ +/* $OpenBSD: armreg.h,v 1.3 2017/02/06 19:23:45 patrick Exp $ */ /*- * Copyright (c) 2013, 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation @@ -467,12 +467,7 @@ #define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\ (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)) - -#ifdef SMP #define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS) -#else -#define TCR_SMP_ATTRS 0 -#endif #define TCR_T1SZ_SHIFT 16 #define TCR_T0SZ_SHIFT 0 diff --git a/sys/arch/arm64/include/cpufunc.h b/sys/arch/arm64/include/cpufunc.h new file mode 100644 index 00000000000..e827430394d --- /dev/null +++ b/sys/arch/arm64/include/cpufunc.h @@ -0,0 +1,53 @@ +/* $OpenBSD: cpufunc.h,v 1.1 2017/02/06 19:23:45 patrick Exp $ */ +/*- + * Copyright (c) 2014 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: head/sys/cpu/include/cpufunc.h 299683 2016-05-13 16:03:50Z andrew $ + */ + +#ifndef _MACHINE_CPUFUNC_H_ +#define _MACHINE_CPUFUNC_H_ + +#ifdef _KERNEL + +#include <machine/armreg.h> + +extern int64_t dcache_line_size; +extern int64_t icache_line_size; +extern int64_t idcache_line_size; +extern int64_t dczva_line_size; + +void cpu_setttb(vaddr_t); +void cpu_tlb_flush(void); +void cpu_tlb_flush_asid(vaddr_t); +void cpu_tlb_flush_all_asid(vaddr_t); +void cpu_icache_sync_range(vaddr_t, vsize_t); +void cpu_idcache_wbinv_range(vaddr_t, vsize_t); +void cpu_dcache_wbinv_range(vaddr_t, vsize_t); +void cpu_dcache_inv_range(vaddr_t, vsize_t); +void cpu_dcache_wb_range(vaddr_t, vsize_t); + +#endif /* _KERNEL */ +#endif /* _MACHINE_CPUFUNC_H_ */ |