1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
|
/* $OpenBSD: locore.S,v 1.7 2017/02/03 10:20:42 patrick Exp $ */
/*-
* Copyright (c) 2012-2014 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: head/sys/arm64/arm64/locore.S 282867 2015-05-13 18:57:03Z zbb $
*/
#include "assym.h"
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
#include <machine/hypervisor.h>
#include <machine/param.h>
#include <machine/pte.h>
#define VIRT_BITS 39
#define DEVICE_MEM 0
#define NORMAL_UNCACHED 1
#define NORMAL_MEM 2
/*
* We assume:
* MMU on with an identity map, or off
* D-Cache: off
* I-Cache: on or off
* We are loaded at a 2MiB aligned address
*/
#define INIT_STACK_SIZE (PAGE_SIZE * 4)
.text
.globl _start
_start:
mov x21, x0
mov x22, x1
mov x23, x2
/* Drop to EL1 */
bl drop_to_el1
/*
* Disable the MMU. We may have entered the kernel with it on and
* will need to update the tables later. If this has been set up
* with anything other than a VA == PA map then this will fail,
* but in this case the code to find where we are running from
* would have also failed.
*/
dsb sy
mrs x2, sctlr_el1
bic x2, x2, SCTLR_M
msr sctlr_el1, x2
isb
/* Get the virt -> phys offset */
bl get_virt_delta
/* Store symbol value. */
adr x0, .Lesym
ldr x0, [x0]
sub x0, x0, x29
str x21, [x0]
/*
* At this point:
* x29 = PA - VA
* x28 = Our physical load address
*/
/* Create the page tables */
bl create_pagetables
mrs x0, DCZID_EL0
tbnz x0, 4, 1f
mov x1, #1
and x0, x0, 0xf
lsl x1, x1, x0
ldr x0, =dczva_line_size
// adjust virtual address to physical
sub x0, x0, x29
str x1, [x0]
1:
/*
* At this point:
* x27 = TTBR0 table
* x26 = TTBR1 table
*/
/* Enable the mmu */
bl start_mmu
/* Jump to the virtual address space */
ldr x15, .Lvirtdone
br x15
.Linitstack:
.xword initstack
.Linitstack_end:
.xword initstack_end
virtdone:
/* Set up the stack */
adr x25, .Linitstack_end
ldr x25, [x25]
mov sp, x25
mov x8, #TRAPFRAME_SIZEOF
sub x8, x8, (STACKALIGNBYTES)
and x8, x8, ~(STACKALIGNBYTES)
// pass base of kernel stack as proc0
adr x25, .Linitstack
ldr x25, [x25]
sub sp, sp, x8
/* Zero the BSS */
ldr x15, .Lbss
ldr x14, .Lend
1:
str xzr, [x15], #8
cmp x15, x14
b.lo 1b
/* Backup the module pointer */
mov x1, x0
/* Make the page table base a virtual address */
sub x26, x26, x29
// XXX - shouldn't this be 8 * 5 (struct grew from 4 -> 5)
sub sp, sp, #(64 * 4)
mov x0, sp
/* Negate the delta so it is VA -> PA */
neg x29, x29
str x1, [x0] /* modulep */
str x26, [x0, 8] /* kern_l1pt */
str x29, [x0, 16] /* kern_delta */
str x25, [x0, 24] /* kern_stack */
str x21, [x0, 32] /* ? (x0 arg on boot) */
str x22, [x0, 40] /* ? (x1 arg on boot) */
str x23, [x0, 48] /* fdt (x2 arg on boot) */
/* trace back starts here */
mov fp, #0
/* Branch to C code */
bl initarm
bl _C_LABEL(main)
/* We should not get here */
brk 0
.align 3
.Lvirtdone:
.quad virtdone
.Lbss:
.quad __bss_start
.Lstart:
.quad _start
.Lend:
.quad _end
.Lcpu_info_primary:
.quad _C_LABEL(cpu_info_primary)
/*
* If we are started in EL2, configure the required hypervisor
* registers and drop to EL1.
*/
drop_to_el1:
mrs x1, CurrentEL
lsr x1, x1, #2
cmp x1, #0x2
b.eq 1f
ret
1:
/* Configure the Hypervisor */
mov x2, #(HCR_RW)
msr hcr_el2, x2
/* Load the Virtualization Process ID Register */
mrs x2, midr_el1
msr vpidr_el2, x2
/* Load the Virtualization Multiprocess ID Register */
mrs x2, mpidr_el1
msr vmpidr_el2, x2
/* Set the bits that need to be 1 in sctlr_el1 */
ldr x2, .Lsctlr_res1
msr sctlr_el1, x2
/* Don't trap to EL2 for exceptions */
mov x2, #CPTR_RES1
msr cptr_el2, x2
/* Don't trap to EL2 for CP15 traps */
msr hstr_el2, xzr
/* Hypervisor trap functions */
adr x2, hyp_vectors
msr vbar_el2, x2
mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
msr spsr_el2, x2
/* Configure GICv3 CPU interface */
mrs x2, id_aa64pfr0_el1
/* Extract GIC bits from the register */
ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
/* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
b.ne 2f
mrs x2, icc_sre_el2
orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
msr icc_sre_el2, x2
2:
/* Set the address to return to our return address */
msr elr_el2, x30
isb
eret
.align 3
.Lsctlr_res1:
.quad SCTLR_RES1
#define VECT_EMPTY \
.align 7; \
1: b 1b
.align 11
hyp_vectors:
VECT_EMPTY /* Synchronous EL2t */
VECT_EMPTY /* IRQ EL2t */
VECT_EMPTY /* FIQ EL2t */
VECT_EMPTY /* Error EL2t */
VECT_EMPTY /* Synchronous EL2h */
VECT_EMPTY /* IRQ EL2h */
VECT_EMPTY /* FIQ EL2h */
VECT_EMPTY /* Error EL2h */
VECT_EMPTY /* Synchronous 64-bit EL1 */
VECT_EMPTY /* IRQ 64-bit EL1 */
VECT_EMPTY /* FIQ 64-bit EL1 */
VECT_EMPTY /* Error 64-bit EL1 */
VECT_EMPTY /* Synchronous 32-bit EL1 */
VECT_EMPTY /* IRQ 32-bit EL1 */
VECT_EMPTY /* FIQ 32-bit EL1 */
VECT_EMPTY /* Error 32-bit EL1 */
/*
* Get the delta between the physical address we were loaded to and the
* virtual address we expect to run from. This is used when building the
* initial page table.
*/
.globl get_virt_delta
get_virt_delta:
/* Load the physical address of virt_map */
adr x28, virt_map
/* Load the virtual address of virt_map stored in virt_map */
ldr x29, [x28]
/* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
sub x29, x29, x28
and x28, x28, #~0x0003ffff // should be 2MB?
ret
.align 3
virt_map:
.quad virt_map
/*
* This builds the page tables containing the identity map, and the kernel
* virtual map.
*
* It relys on:
* We were loaded to an address that is on a 2MiB boundary
* All the memory must not cross a 1GiB boundaty
* x28 contains the physical address we were loaded from
*
* There are 3 pages before that address for the page tables
* These pages are allocated aligned in .data
* The pages used are:
* - The identity (PA = VA) table (TTBR0)
* - The Kernel L1 table (TTBR1)
* - The PA == VA L2 table for kernel
*/
.Lpagetable:
.xword pagetable
.Lpagetable_end:
.xword pagetable_end
.Lesym:
.xword esym
create_pagetables:
/* Save the Link register */
mov x5, x30
/* Clean the page table */
adr x6, .Lpagetable
ldr x6, [x6]
sub x6, x6, x29 // VA -> PA
mov x26, x6
adr x27, .Lpagetable_end
ldr x27, [x27]
sub x27, x27, x29 // VA -> PA
1:
stp xzr, xzr, [x6], #16
stp xzr, xzr, [x6], #16
stp xzr, xzr, [x6], #16
stp xzr, xzr, [x6], #16
cmp x6, x27
b.lo 1b
/*
* Build the TTBR1 maps.
*/
/* Find the size of the kernel */
adr x6, .Lstart
ldr x6, [x6]
sub x6, x6, x29
/* End is the symbol address */
adr x7, .Lesym
ldr x7, [x7]
sub x7, x7, x29
ldr x7, [x7]
sub x7, x7, x29
/* Find the end - begin */
sub x8, x7, x6
/* Get the number of l2 pages to allocate, rounded down */
lsr x10, x8, #(L2_SHIFT)
/* Add 4 MiB for any rounding above and the module data */
add x10, x10, #2
/* Create the kernel space L2 table */
mov x6, x26 // pagetable:
mov x7, #NORMAL_MEM
add x8, x28, x29
mov x9, x28
bl build_l2_block_pagetable
/* Move to the l1 table */
add x26, x26, #PAGE_SIZE*2 // pagetable_l1_ttbr1:
/* Link the l1 -> l2 table */
mov x9, x6
mov x6, x26
bl link_l1_pagetable
/*
* Build the TTBR0 maps.
*/
add x27, x26, #PAGE_SIZE * 2 // pagetable_l1_ttbr0:
mov x6, x27 /* The initial page table */
#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
/* Create a table for the UART */
mov x7, #DEVICE_MEM
mov x8, #(SOCDEV_VA) /* VA start */
mov x9, #(SOCDEV_PA) /* PA start */
mov x10, #1
bl build_l1_block_pagetable
#endif
/* Create the VA = PA map */
mov x7, #NORMAL_MEM // #NORMAL
mov x9, x27
mov x8, x9 /* VA start (== PA start) */
mov x10, #1
bl build_l1_block_pagetable
/* Create a mapping for the FDT */
mov x7, #NORMAL_MEM // #NORMAL
mov x9, x23
mov x8, x9 /* VA start (== PA start) */
mov x10, #1
bl build_l1_block_pagetable
/* Restore the Link register */
mov x30, x5
ret
/*
* Builds an L1 -> L2 table descriptor
*
* This is a link for a 1GiB block of memory with up to 2MiB regions mapped
* within it by build_l2_block_pagetable.
*
* x6 = L1 table
* x8 = Virtual Address
* x9 = L2 PA (trashed)
* x11, x12 and x13 are trashed
*/
link_l1_pagetable:
/*
* Link an L1 -> L2 table entry.
*/
/* Find the table index */
lsr x11, x8, #L1_SHIFT
and x11, x11, #Ln_ADDR_MASK
/* Build the L1 block entry */
mov x12, #L1_TABLE
/* Only use the output address bits */
lsr x9, x9, #12
orr x12, x12, x9, lsl #12
/* Store the entry */
str x12, [x6, x11, lsl #3]
ret
/*
* Builds count 1 GiB page table entry
*
* x6 = L1 table
* x7 = Type (0 = Device, 1 = Normal)
* x8 = VA start
* x9 = PA start (trashed)
* x10 = Entry count
* x11, x12 and x13 are trashed
*/
build_l1_block_pagetable:
/*
* Build the L1 table entry.
*/
/* Find the table index */
lsr x11, x8, #L1_SHIFT
and x11, x11, #Ln_ADDR_MASK
/* Build the L1 block entry */
lsl x12, x7, #2
orr x12, x12, #L1_BLOCK
orr x12, x12, #(ATTR_AF)
/* Only use the output address bits */
lsr x9, x9, #L1_SHIFT
/* Set the physical address for this virtual address */
1: orr x13, x12, x9, lsl #L1_SHIFT
/* Store the entry */
str x13, [x6, x11, lsl #3]
sub x10, x10, #1
add x11, x11, #1
add x9, x9, #1
cbnz x10, 1b
ret
/*
* Builds count 2 MiB page table entry
* x6 = L2 table
* x7 = Type (0 = Device, 1 = Normal)
* x8 = VA start
* x9 = PA start (trashed)
* x10 = Entry count
* x11, x12 and x13 are trashed
*/
build_l2_block_pagetable:
/*
* Build the L2 table entry.
*/
/* Find the table index */
lsr x11, x8, #L2_SHIFT
and x11, x11, #Ln_ADDR_MASK
/* Build the L2 block entry */
lsl x12, x7, #2
orr x12, x12, #L2_BLOCK
orr x12, x12, #(ATTR_AF)
/* Only use the output address bits */
lsr x9, x9, #L2_SHIFT
/* Set the physical address for this virtual address */
1: orr x13, x12, x9, lsl #L2_SHIFT
/* Store the entry */
str x13, [x6, x11, lsl #3]
sub x10, x10, #1
add x11, x11, #1
add x9, x9, #1
cbnz x10, 1b
ret
start_mmu:
dsb sy
/* Load the exception vectors */
ldr x2, =exception_vectors
msr vbar_el1, x2
/* Load ttbr0 and ttbr1 */
msr ttbr0_el1, x27
msr ttbr1_el1, x26
isb
/* Clear the Monitor Debug System control register */
msr mdscr_el1, xzr
/* Invalidate the TLB */
tlbi vmalle1is
ldr x2, mair
msr mair_el1, x2
/* Setup TCR according to PARange bits from ID_AA64MMFR0_EL1 */
ldr x2, tcr
mrs x3, id_aa64mmfr0_el1
bfi x2, x3, #32, #3
msr tcr_el1, x2
/* Setup SCTLR */
ldr x2, sctlr_set
ldr x3, sctlr_clear
mrs x1, sctlr_el1
bic x1, x1, x3 /* Clear the required bits */
orr x1, x1, x2 /* Set the required bits */
msr sctlr_el1, x1
isb
ret
.globl switch_mmu_kernel
switch_mmu_kernel:
dsb sy
/* Invalidate the TLB */
tlbi vmalle1is
/* Load ttbr1 (kernel) */
msr ttbr1_el1, x0
isb
ret
.align 3
mair:
/* Device Normal, no cache Normal, write-back */
.quad MAIR_ATTR(0x00, 0) | MAIR_ATTR(0x44, 1) | MAIR_ATTR(0xff, 2)
tcr:
.quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \
TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
sctlr_set:
/* Bits to set */
.quad (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
SCTLR_I | SCTLR_SED | SCTLR_C | SCTLR_M)
sctlr_clear:
/* Bits to clear */
.quad (SCTLR_EE | SCTLR_EOE | SCTLR_WXN | SCTLR_UMA | SCTLR_ITD | \
SCTLR_THEE | SCTLR_CP15BEN | SCTLR_SA0 | SCTLR_SA | SCTLR_A)
.globl abort
abort:
b abort
// First entries in data must be esym
// so that bootloader can find them easily.
.data
.global _C_LABEL(esym)
_C_LABEL(esym): .xword _C_LABEL(end)
.global _C_LABEL(dczva_line_size)
_C_LABEL(dczva_line_size): .xword 0
//.section .init_pagetable
data_align_pad:
.space 32
.align 12 /* 4KiB aligned */
/*
* 3 initial tables (in the following order):
* L2 for kernel (High addresses)
* L1 for kernel
* L1 for user (Low addresses)
*/
.globl pagetable
pagetable:
.space PAGE_SIZE * 2 // allocate 2 pages for pmapvp2
pagetable_l1_ttbr1:
.space PAGE_SIZE * 2 // allocate 2 pages for pmapvp1
pagetable_l1_ttbr0:
.space PAGE_SIZE * 2 // allocate 2 pages, reused later in pmap
pagetable_end:
.text
#if 0
.globl init_pt_va
init_pt_va:
.quad pagetable /* XXX: Keep page tables VA */
#endif
.bss
.align 4
.globl initstack
initstack:
.space USPACE
initstack_end:
.text
ENTRY(sigcode)
mov x0, sp
add x0, x0, #SF_SC
1:
mov x8, #SYS_sigreturn
svc 0
.globl _C_LABEL(sigcoderet)
_C_LABEL(sigcoderet):
/* sigreturn failed, exit */
mov x8, #SYS_exit
svc 0
b 1b
END(sigcode)
/* This may be copied to the stack, keep it 16-byte aligned */
.align 3
.global _C_LABEL(esigcode)
_C_LABEL(esigcode):
.globl sigfill
sigfill:
.word 0xa000f7f0 /* FIXME: illegal on all cpus? */
esigfill:
.data
.globl sigfillsiz
sigfillsiz:
.word esigfill - sigfill
.text
|