summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/locore.S4
-rw-r--r--sys/arch/amd64/amd64/machdep.c20
-rw-r--r--sys/arch/amd64/amd64/mem.c16
3 files changed, 4 insertions, 36 deletions
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index 30d452fe0ab..c0076f2d5c4 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.54 2012/11/10 09:45:05 mglocker Exp $ */
+/* $OpenBSD: locore.S,v 1.55 2014/10/09 04:18:09 tedu Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -392,7 +392,7 @@ bi_size_ok:
/* Find end of kernel image. */
movl $RELOC(end),%edi
-#if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE)
+#if (NKSYMS || defined(DDB)) && !defined(SYMTAB_SPACE)
/* Save the symbols (if loaded). */
movl RELOC(esym),%eax
testl %eax,%eax
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index f02b63e36ee..fde7f8d7c0b 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.192 2014/09/27 08:27:17 mlarkin Exp $ */
+/* $OpenBSD: machdep.c,v 1.193 2014/10/09 04:18:09 tedu Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@@ -201,12 +201,6 @@ int lid_suspend;
*/
int safepri = 0;
-#ifdef LKM
-vaddr_t lkm_start, lkm_end;
-static struct vm_map lkm_map_store;
-extern struct vm_map *lkm_map;
-#endif
-
struct vm_map *exec_map = NULL;
struct vm_map *phys_map = NULL;
@@ -339,12 +333,6 @@ cpu_startup(void)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
-#ifdef LKM
- uvm_map_setup(&lkm_map_store, lkm_start, lkm_end, VM_MAP_PAGEABLE);
- lkm_map_store.pmap = pmap_kernel();
- lkm_map = &lkm_map_store;
-#endif
-
printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free),
ptoa((psize_t)uvmexp.free)/1024/1024);
@@ -1399,12 +1387,6 @@ init_x86_64(paddr_t first_avail)
first_avail = round_page(first_avail);
kern_end = KERNBASE + first_avail;
-#ifdef LKM
- lkm_start = KERNTEXTOFF + first_avail;
- /* set it to the end of the jumpable region, should be safe enough */
- lkm_end = 0xffffffffffffffff;
-#endif
-
/*
* Now, load the memory clusters (which have already been
* flensed) into the VM system.
diff --git a/sys/arch/amd64/amd64/mem.c b/sys/arch/amd64/amd64/mem.c
index 4816c659cc3..d52d2baeb6b 100644
--- a/sys/arch/amd64/amd64/mem.c
+++ b/sys/arch/amd64/amd64/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.18 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: mem.c,v 1.19 2014/10/09 04:18:09 tedu Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1982, 1986, 1990, 1993
@@ -53,9 +53,6 @@
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/fcntl.h>
-#ifdef LKM
-#include <sys/lkm.h>
-#endif
#include <machine/cpu.h>
#include <machine/conf.h>
@@ -65,10 +62,6 @@
caddr_t zeropage;
extern int start, end, etext;
-#ifdef LKM
-extern vaddr_t lkm_start, lkm_end;
-#endif
-
/* open counter for aperture */
#ifdef APERTURE
static int ap_open_count = 0;
@@ -159,13 +152,6 @@ mmrw(dev_t dev, struct uio *uio, int flags)
if (v < (vaddr_t)&etext &&
uio->uio_rw == UIO_WRITE)
return EFAULT;
-#ifdef LKM
- } else if (v >= lkm_start && v < lkm_end) {
- if (!uvm_map_checkprot(lkm_map, v, v + c,
- uio->uio_rw == UIO_READ ?
- UVM_PROT_READ: UVM_PROT_WRITE))
- return (EFAULT);
-#endif
} else if ((!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) &&
(v < PMAP_DIRECT_BASE && v > PMAP_DIRECT_END))