summaryrefslogtreecommitdiff
path: root/sys/arch/mips64/include/pmap.h
blob: 64e37c9981f21bb846df5cc4c2a997a36134d59b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
/*      $OpenBSD: pmap.h,v 1.27 2012/04/06 20:11:18 miod Exp $ */

/*
 * Copyright (c) 1987 Carnegie-Mellon University
 * Copyright (c) 1992, 1993
 *	The Regents of the University of California.  All rights reserved.
 *
 * This code is derived from software contributed to Berkeley by
 * Ralph Campbell.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	from: @(#)pmap.h	8.1 (Berkeley) 6/10/93
 */

#ifndef	_MIPS64_PMAP_H_
#define	_MIPS64_PMAP_H_

#ifdef	_KERNEL

#include <machine/pte.h>

/*
 * The user address space is currently limited to 2Gb (0x0 - 0x80000000).
 *
 * The user address space is mapped using a two level structure where
 * virtual address bits 30..22 are used to index into a segment table which
 * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
 * Bits 21..12 are then used to index a PTE which describes a page within
 * a segment.
 *
 * Note: The kernel doesn't use the same data structures as user programs.
 * All the PTE entries are stored in a single array in Sysmap which is
 * dynamically allocated at boot time.
 */

/*
 * Size of second level page structs (page tables, and segment table) used
 * by this pmap.
 */

#define	PMAP_L2SHIFT		12
#define	PMAP_L2SIZE		(1UL << PMAP_L2SHIFT)

/*
 * Segment sizes
 */

/* -2 below is for log2(sizeof pt_entry_t) */
#define	SEGSHIFT		(PAGE_SHIFT + PMAP_L2SHIFT - 2)
#define NBSEG			(1UL << SEGSHIFT)
#define	SEGOFSET		(NBSEG - 1)

#define mips_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
#define mips_round_seg(x)	(((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
#define pmap_segmap(m, v)	((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])

#define PMAP_SEGTABSIZE		(PMAP_L2SIZE / sizeof(void *))

struct segtab {
	pt_entry_t	*seg_tab[PMAP_SEGTABSIZE];
};

struct pmap_asid_info {
	u_int			pma_asid;	/* address space tag */
	u_int			pma_asidgen;	/* TLB PID generation number */
};

/*
 * Machine dependent pmap structure.
 */
typedef struct pmap {
	int			pm_count;	/* pmap reference count */
	simple_lock_data_t	pm_lock;	/* lock on pmap */
	struct pmap_statistics	pm_stats;	/* pmap statistics */
	struct segtab		*pm_segtab;	/* pointers to pages of PTEs */
	struct pmap_asid_info	pm_asid[1];	/* ASID information */
} *pmap_t;

/*
 * Compute the sizeof of a pmap structure.  Subtract one because one
 * ASID info structure is already included in the pmap structure itself.
 */
#define	PMAP_SIZEOF(x)							\
	(ALIGN(sizeof(struct pmap) +					\
	       (sizeof(struct pmap_asid_info) * ((x) - 1))))


/* flags for pv_entry */
#define	PV_UNCACHED	PG_PMAP0	/* Page is mapped unchached */
#define	PV_CACHED	PG_PMAP1	/* Page has been cached */
#define	PV_ATTR_MOD	PG_PMAP2
#define	PV_ATTR_REF	PG_PMAP3
#define	PV_PRESERVE	(PV_ATTR_MOD | PV_ATTR_REF)

extern	struct pmap *const kernel_pmap_ptr;

#define pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
#define pmap_kernel()			(kernel_pmap_ptr)

#define	PMAP_STEAL_MEMORY		/* Enable 'stealing' during boot */

#define PMAP_PREFER(pa, va)		pmap_prefer(pa, va)

extern vaddr_t pmap_prefer_mask;
/* pmap prefer alignment */
#define PMAP_PREFER_ALIGN()						\
	(pmap_prefer_mask ? pmap_prefer_mask + 1 : 0)
/* pmap prefer offset in alignment */
#define PMAP_PREFER_OFFSET(of)		((of) & pmap_prefer_mask)

#define	pmap_update(x)			do { /* nothing */ } while (0)

void	pmap_bootstrap(void);
int	pmap_is_page_ro( pmap_t, vaddr_t, pt_entry_t);
void	pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cache);
vaddr_t	pmap_prefer(vaddr_t, vaddr_t);
void	pmap_set_modify(vm_page_t);
void	pmap_page_cache(vm_page_t, int);

#define	pmap_collect(x)			do { /* nothing */ } while (0)
#define pmap_unuse_final(p)		do { /* nothing yet */ } while (0)
#define	pmap_remove_holes(map)		do { /* nothing */ } while (0)

void pmap_update_user_page(pmap_t, vaddr_t, pt_entry_t);
#ifdef MULTIPROCESSOR
void pmap_update_kernel_page(vaddr_t, pt_entry_t);
#else
#define pmap_update_kernel_page(va, entry) tlb_update(va, entry)
#endif

/*
 * Most R5000 processors (and related families) have a silicon bug preventing
 * the ll/sc (and lld/scd) instructions from honouring the caching mode
 * when accessing XKPHYS addresses.
 *
 * Since pool memory is allocated with pmap_map_direct() if __HAVE_PMAP_DIRECT,
 * and many structures containing fields which will be used with
 * <machine/atomic.h> routines are allocated from pools, __HAVE_PMAP_DIRECT can
 * not be defined on systems which may use flawed processors.
 */
#if !defined(CPU_R5000) && !defined(CPU_RM7000)
#define	__HAVE_PMAP_DIRECT
vaddr_t	pmap_map_direct(vm_page_t);
vm_page_t pmap_unmap_direct(vaddr_t);
#endif

#endif	/* _KERNEL */

#endif	/* !_MIPS64_PMAP_H_ */