summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2007-05-27 15:46:03 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2007-05-27 15:46:03 +0000
commitcf1cd27fe5706a55f9f3a6e9a6adc96706990372 (patch)
treeb7ae4e6650354468e1d27809e33f62c6312b063f
parentb650e4ca7dad913dcad26f227c0dfb90c78a4eb5 (diff)
Move powerpc to vm_page_md, 'throw it in' kettenis@
-rw-r--r--sys/arch/macppc/include/vmparam.h20
-rw-r--r--sys/arch/macppc/macppc/dma.c32
-rw-r--r--sys/arch/powerpc/include/pmap.h16
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c235
4 files changed, 129 insertions, 174 deletions
diff --git a/sys/arch/macppc/include/vmparam.h b/sys/arch/macppc/include/vmparam.h
index 485b6877706..9ace5e9b546 100644
--- a/sys/arch/macppc/include/vmparam.h
+++ b/sys/arch/macppc/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.17 2006/09/18 16:57:23 marco Exp $ */
+/* $OpenBSD: vmparam.h,v 1.18 2007/05/27 15:46:02 drahn Exp $ */
/* $NetBSD: vmparam.h,v 1.1 1996/09/30 16:34:38 ws Exp $ */
/*-
@@ -89,13 +89,6 @@ extern vaddr_t ppc_kvm_stolen;
#define VM_PHYS_SIZE (USRIOSIZE * PAGE_SIZE)
-#define __HAVE_PMAP_PHYSSEG
-struct pmap_physseg {
- struct pted_pv_head *pvent;
- char *attrs;
- /* NULL ??? */
-};
-
#define VM_PHYSSEG_MAX 32 /* actually we could have this many segments */
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
@@ -103,4 +96,15 @@ struct pmap_physseg {
#define VM_NFREELIST 1
#define VM_FREELIST_DEFAULT 0
+#define __HAVE_VM_PAGE_MD
+struct pv_entry;
+struct vm_page_md {
+ LIST_HEAD(,pte_desc) pv_list;
+};
+
+#define VM_MDPAGE_INIT(pg) do { \
+ LIST_INIT(&((pg)->mdpage.pv_list)); \
+} while (0)
+
+
#endif
diff --git a/sys/arch/macppc/macppc/dma.c b/sys/arch/macppc/macppc/dma.c
index 7803b0804dd..c66dee85b48 100644
--- a/sys/arch/macppc/macppc/dma.c
+++ b/sys/arch/macppc/macppc/dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: dma.c,v 1.24 2005/12/17 07:31:26 miod Exp $ */
+/* $OpenBSD: dma.c,v 1.25 2007/05/27 15:46:02 drahn Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -387,8 +387,36 @@ void
_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
bus_size_t len, int op)
{
+ int i;
+ bus_size_t minlen, wlen;
+ bus_addr_t pa, addr;
+ struct vm_page *pg;
+
+ for (i = 0; i < map->dm_nsegs && len != 0; i++) {
+ /* Find the beginning segment. */
+ if (offset >= map->dm_segs[i].ds_len) {
+ offset -= map->dm_segs[i].ds_len;
+ continue;
+ }
+
+ minlen = len < map->dm_segs[i].ds_len - offset ?
+ len : map->dm_segs[i].ds_len - offset;
- /* Nothing to do here. */
+ addr = map->dm_segs[i].ds_addr + offset;
+
+ switch (op) {
+ case BUS_DMASYNC_POSTWRITE:
+ for (pa = trunc_page(addr), wlen = 0;
+ pa < round_page(addr + minlen);
+ pa += PAGE_SIZE) {
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg != NULL)
+ atomic_clearbits_int(&pg->pg_flags,
+ PG_PMAP_EXE);
+ }
+ }
+
+ }
}
/*
diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h
index 1fad03dd0df..51dbf28f286 100644
--- a/sys/arch/powerpc/include/pmap.h
+++ b/sys/arch/powerpc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.43 2007/05/03 18:40:19 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.44 2007/05/27 15:46:02 drahn Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -98,17 +98,17 @@ typedef struct pmap *pmap_t;
extern struct pmap kernel_pmap_;
#define pmap_kernel() (&kernel_pmap_)
-boolean_t pteclrbits(paddr_t pa, u_int mask, u_int clear);
+boolean_t pteclrbits(struct vm_page *pg, u_int mask, u_int clear);
#define pmap_clear_modify(page) \
- (pteclrbits(VM_PAGE_TO_PHYS(page), PTE_CHG_32, TRUE))
+ (pteclrbits((page), PG_PMAP_MOD, TRUE))
#define pmap_clear_reference(page) \
- (pteclrbits(VM_PAGE_TO_PHYS(page), PTE_REF_32, TRUE))
+ (pteclrbits((page), PG_PMAP_REF, TRUE))
#define pmap_is_modified(page) \
- (pteclrbits(VM_PAGE_TO_PHYS(page), PTE_CHG_32, FALSE))
+ (pteclrbits((page), PG_PMAP_MOD, FALSE))
#define pmap_is_referenced(page) \
- (pteclrbits(VM_PAGE_TO_PHYS(page), PTE_REF_32, FALSE))
+ (pteclrbits((page), PG_PMAP_REF, FALSE))
#define pmap_unwire(pm, va)
#define pmap_phys_address(x) (x)
@@ -142,6 +142,10 @@ void pmap_proc_iflush(struct proc *proc, vaddr_t va, vsize_t len);
#define PMAP_STEAL_MEMORY
+#define PG_PMAP_MOD PG_PMAP0
+#define PG_PMAP_REF PG_PMAP1
+#define PG_PMAP_EXE PG_PMAP2
+
#endif /* _KERNEL */
#endif /* _LOCORE */
#endif /* _POWERPC_PMAP_H_ */
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index fa803856560..dbfd7ab5c75 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,7 +1,7 @@
-/* $OpenBSD: pmap.c,v 1.100 2007/05/13 03:45:59 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.101 2007/05/27 15:46:02 drahn Exp $ */
/*
- * Copyright (c) 2001, 2002 Dale Rahn.
+ * Copyright (c) 2001, 2002, 2007 Dale Rahn.
* All rights reserved.
*
*
@@ -33,6 +33,7 @@
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/queue.h>
#include <sys/user.h>
#include <sys/systm.h>
#include <sys/pool.h>
@@ -48,15 +49,11 @@
#include <ddb/db_output.h>
struct pmap kernel_pmap_;
-static int npgs;
static struct mem_region *pmap_mem, *pmap_avail;
struct mem_region pmap_allocated[10];
int pmap_cnt_avail;
int pmap_cnt_allocated;
-
-void * pmap_pvh;
-void * pmap_attrib;
struct pte_64 *pmap_ptable64;
struct pte_32 *pmap_ptable32;
int pmap_ptab_cnt;
@@ -71,9 +68,6 @@ paddr_t zero_page;
paddr_t copy_src_page;
paddr_t copy_dst_page;
-/* P->V table */
-LIST_HEAD(pted_pv_head, pte_desc);
-
struct pte_desc {
/* Linked list of phys -> virt entries */
LIST_ENTRY(pte_desc) pted_pv_list;
@@ -112,7 +106,7 @@ void pmap_vp_destroy(pmap_t pm);
struct pte_desc *pmap_vp_lookup(pmap_t pm, vaddr_t va);
/* PV routines */
-int pmap_enter_pv(struct pte_desc *pted, struct pted_pv_head *);
+void pmap_enter_pv(struct pte_desc *pted, struct vm_page *);
void pmap_remove_pv(struct pte_desc *pted);
@@ -160,8 +154,6 @@ int pmap_initialized = 0;
int physmem;
int physmaxaddr;
-#define ATTRSHIFT 4
-
/* virtual to physical helpers */
static inline int
VP_SR(vaddr_t va)
@@ -289,31 +281,6 @@ pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted)
pmap_simpleunlock_pm(pm);
}
-/*
- * HELPER FUNCTIONS
- */
-static inline struct pted_pv_head *
-pmap_find_pvh(paddr_t pa)
-{
- int bank, off;
- bank = vm_physseg_find(atop(pa), &off);
- if (bank != -1) {
- return &vm_physmem[bank].pmseg.pvent[off];
- }
- return NULL;
-}
-
-static inline char *
-pmap_find_attr(paddr_t pa)
-{
- int bank, off;
- bank = vm_physseg_find(atop(pa), &off);
- if (bank != -1) {
- return &vm_physmem[bank].pmseg.attrs[off];
- }
- return NULL;
-}
-
/* PTE manipulation/calculations */
static inline void
tlbie(vaddr_t va)
@@ -419,22 +386,15 @@ PTED_VALID(struct pte_desc *pted)
* have too noticable unnecessary ram consumption.
*/
-int
-pmap_enter_pv(struct pte_desc *pted, struct pted_pv_head *pvh)
+void
+pmap_enter_pv(struct pte_desc *pted, struct vm_page *pg)
{
- int first;
-
if (__predict_false(!pmap_initialized)) {
- return 0;
- }
- if (pvh == NULL) {
- return 0;
+ return;
}
- first = LIST_EMPTY(pvh);
- LIST_INSERT_HEAD(pvh, pted, pted_pv_list);
+ LIST_INSERT_HEAD(&(pg->mdpage.pv_list), pted, pted_pv_list);
pted->pted_va |= PTED_VA_MANAGED_M;
- return first;
}
void
@@ -443,29 +403,43 @@ pmap_remove_pv(struct pte_desc *pted)
LIST_REMOVE(pted, pted_pv_list);
}
+
+/* PTE_CHG_32 == PTE_CHG_64 */
+/* PTE_REF_32 == PTE_REF_64 */
+static __inline u_int
+pmap_pte2flags(u_int32_t pte)
+{
+ return (((pte & PTE_REF_32) ? PG_PMAP_REF : 0) |
+ ((pte & PTE_CHG_32) ? PG_PMAP_MOD : 0));
+}
+
+static __inline u_int
+pmap_flags2pte(u_int32_t flags)
+{
+ return (((flags & PG_PMAP_REF) ? PTE_REF_32 : 0) |
+ ((flags & PG_PMAP_MOD) ? PTE_CHG_32 : 0));
+}
+
void
pmap_attr_save(paddr_t pa, u_int32_t bits)
{
- int bank, pg;
- u_int8_t *attr;
+ struct vm_page *pg;
- bank = vm_physseg_find(atop(pa), &pg);
- if (bank == -1)
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg == NULL)
return;
- attr = &vm_physmem[bank].pmseg.attrs[pg];
- *attr |= (u_int8_t)(bits >> ATTRSHIFT);
+
+ atomic_setbits_int(&pg->pg_flags, pmap_pte2flags(bits));
}
int
pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
struct pte_desc *pted;
- struct pted_pv_head *pvh;
+ struct vm_page *pg;
int s;
int need_sync = 0;
int cache;
- u_int8_t *pattr = NULL;
- int first_map = 0;
/* MP - Acquire lock for this pmap */
@@ -488,8 +462,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
/* Calculate PTE */
- pvh = pmap_find_pvh(pa);
- if (pvh != NULL)
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg != NULL)
cache = PMAP_CACHE_WB; /* managed memory is cacheable */
else
cache = PMAP_CACHE_CI;
@@ -499,26 +473,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
else
pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache);
- if (pvh != NULL) {
- pattr = pmap_find_attr(pa); /* pattr only for managed mem */
- first_map = pmap_enter_pv(pted, pvh); /* only managed mem */
- }
-
- /*
- * We want to flush for executable pages which are not managed???
- * Always flush for the first mapping if it is executable.
- * If previous mappings exist, but this is the first EXE, sync.
- */
-
- if (prot & VM_PROT_EXECUTE) {
- need_sync = 1;
- if (pvh != NULL) {
- if (!first_map)
- need_sync =
- (*pattr & (PTE_EXE_32 >> ATTRSHIFT)) == 0;
- else if (pattr != NULL)
- *pattr = 0;
- }
+ if (pg != NULL) {
+ pmap_enter_pv(pted, pg); /* only managed mem */
}
/*
@@ -538,15 +494,18 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (pm->pm_sr[sn] & SR_NOEXEC)
pm->pm_sr[sn] &= ~SR_NOEXEC;
- if (pattr != NULL)
- *pattr |= (PTE_EXE_32 >> ATTRSHIFT);
+ if (pg != NULL) {
+ need_sync = ((pg->pg_flags & PG_PMAP_EXE) == 0);
+ atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
+ } else
+ need_sync = 1;
} else {
/*
* Should we be paranoid about writeable non-exec
* mappings ? if so, clear the exec tag
*/
- if ((prot & VM_PROT_WRITE) && (pattr != NULL))
- *pattr &= ~(PTE_EXE_32 >> ATTRSHIFT);
+ if ((prot & VM_PROT_WRITE) && (pg != NULL))
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
}
splx(s);
@@ -712,7 +671,7 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
}
if (cache == PMAP_CACHE_DEFAULT) {
- if (pmap_find_pvh(pa) != NULL)
+ if (PHYS_TO_VM_PAGE(pa) != NULL)
cache = PMAP_CACHE_WB; /* managed memory is cacheable */
else
cache = PMAP_CACHE_CI;
@@ -980,30 +939,21 @@ pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
* ack, copied code in the pte flush code....
*/
int
-pteclrbits(paddr_t pa, u_int bit, u_int clear)
+pteclrbits(struct vm_page *pg, u_int flagbit, u_int clear)
{
- char *pattr;
u_int bits;
int s;
struct pte_desc *pted;
- struct pted_pv_head *pvh;
+ u_int ptebit = pmap_flags2pte(flagbit);
/* PTE_CHG_32 == PTE_CHG_64 */
- /* PTE_REG_32 == PTE_REG_64 */
-
- pattr = pmap_find_attr(pa);
-
- /* check if managed memory */
- if (pattr == NULL)
- return 0;
-
- pvh = pmap_find_pvh(pa);
+ /* PTE_REF_32 == PTE_REF_64 */
/*
* First try the attribute cache
*/
- bits = (*pattr << ATTRSHIFT) & bit;
- if ((bits == bit) && (clear == 0))
+ bits = pg->pg_flags & flagbit;
+ if ((bits == flagbit) && (clear == 0))
return bits;
/* cache did not contain all necessary bits,
@@ -1014,7 +964,7 @@ pteclrbits(paddr_t pa, u_int bit, u_int clear)
/* need lock for this pv */
s = splvm();
- LIST_FOREACH(pted, pvh, pted_pv_list) {
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
vaddr_t va = pted->pted_va & PAGE_MASK;
pmap_t pm = pted->pted_pmap;
struct pte_64 *ptp64;
@@ -1041,16 +991,16 @@ pteclrbits(paddr_t pa, u_int bit, u_int clear)
*/
if ((pted->p.pted_pte64.pte_hi |
(PTED_HID(pted) ? PTE_HID_64 : 0)) == ptp64->pte_hi) {
- bits |= ptp64->pte_lo & bit;
+ bits |= pmap_pte2flags(ptp64->pte_lo & ptebit);
if (clear) {
ptp64->pte_hi &= ~PTE_VALID_64;
__asm__ volatile ("sync");
tlbie(va);
tlbsync();
- ptp64->pte_lo &= ~bit;
+ ptp64->pte_lo &= ~ptebit;
__asm__ volatile ("sync");
ptp64->pte_hi |= PTE_VALID_64;
- } else if (bits == bit)
+ } else if (bits == flagbit)
break;
}
} else {
@@ -1069,25 +1019,30 @@ pteclrbits(paddr_t pa, u_int bit, u_int clear)
*/
if ((pted->p.pted_pte32.pte_hi |
(PTED_HID(pted) ? PTE_HID_32 : 0)) == ptp32->pte_hi) {
- bits |= ptp32->pte_lo & bit;
+ bits |= pmap_pte2flags(ptp32->pte_lo & ptebit);
if (clear) {
ptp32->pte_hi &= ~PTE_VALID_32;
__asm__ volatile ("sync");
tlbie(va);
tlbsync();
- ptp32->pte_lo &= ~bit;
+ ptp32->pte_lo &= ~ptebit;
__asm__ volatile ("sync");
ptp32->pte_hi |= PTE_VALID_32;
- } else if (bits == bit)
+ } else if (bits == flagbit)
break;
}
}
}
- if (clear)
- *pattr &= ~(bit >> ATTRSHIFT);
- else
- *pattr |= (bits >> ATTRSHIFT);
+ if (clear) {
+ /*
+ * this is done a second time, because while walking the list
+ * a bit could have been promoted via pmap_attr_save()
+ */
+ bits |= pg->pg_flags & flagbit;
+ atomic_clearbits_int(&pg->pg_flags, flagbit);
+ } else
+ atomic_setbits_int(&pg->pg_flags, bits);
splx(s);
return bits;
@@ -1659,15 +1614,6 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
tlbia();
- npgs = 0;
- for (mp = pmap_avail; mp->size; mp++) {
- npgs += btoc(mp->size);
- }
- /* Ok we lose a few pages from this allocation, but hopefully
- * not too many
- */
- pmap_pvh = pmap_steal_avail(sizeof(struct pted_pv_head *) * npgs, 4);
- pmap_attrib = pmap_steal_avail(sizeof(char) * npgs, 1);
pmap_avail_fixup();
for (mp = pmap_avail; mp->size; mp++) {
if (mp->start > 0x80000000)
@@ -2067,31 +2013,24 @@ pmap_page_ro32(pmap_t pm, vaddr_t va)
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
int s;
struct pte_desc *pted;
- struct pted_pv_head *pvh;
/* need to lock for this pv */
s = splvm();
- pvh = pmap_find_pvh(pa);
-
- /* nothing to do if not a managed page */
- if (pvh == NULL) {
- splx(s);
- return;
- }
if (prot == VM_PROT_NONE) {
- while (!LIST_EMPTY(pvh)) {
- pted = LIST_FIRST(pvh);
+ while (!LIST_EMPTY(&(pg->mdpage.pv_list))) {
+ pted = LIST_FIRST(&(pg->mdpage.pv_list));
pmap_remove_pg(pted->pted_pmap, pted->pted_va);
}
+ /* page is being reclaimed, sync icache next use */
+ atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
splx(s);
return;
}
- LIST_FOREACH(pted, pvh, pted_pv_list) {
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
if (ppc_proc_is_64b)
pmap_page_ro64(pted->pted_pmap, pted->pted_va, prot);
else
@@ -2150,11 +2089,6 @@ pmap_real_memory(paddr_t *start, vsize_t *size)
void
pmap_init()
{
- vsize_t sz;
- struct pted_pv_head *pvh;
- char *attr;
- int i, bank;
-
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmap", NULL);
pool_setlowat(&pmap_pmap_pool, 2);
pool_init(&pmap_vp_pool, sizeof(struct pmapvp), 0, 0, 0, "vp", NULL);
@@ -2163,22 +2097,6 @@ pmap_init()
NULL);
pool_setlowat(&pmap_pted_pool, 20);
- /* pmap_pvh and pmap_attr must be allocated 1-1 so that pmap_save_attr
- * is callable from pte_spill_r (with vm disabled)
- */
- pvh = (struct pted_pv_head *)pmap_pvh;
- for (i = npgs; i > 0; i--)
- LIST_INIT(pvh++);
- attr = pmap_attrib;
- bzero(pmap_attrib, npgs);
- pvh = (struct pted_pv_head *)pmap_pvh;
- for (bank = 0; bank < vm_nphysseg; bank++) {
- sz = vm_physmem[bank].end - vm_physmem[bank].start;
- vm_physmem[bank].pmseg.pvent = pvh;
- vm_physmem[bank].pmseg.attrs = attr;
- pvh += sz;
- attr += sz;
- }
pmap_initialized = 1;
}
@@ -2386,7 +2304,7 @@ pte_insert64(struct pte_desc *pted)
__asm volatile ("sync");
return;
}
- /* first just try fill of secondary hash */
+ /* try fill of secondary hash */
ptp64 = pmap_ptable64 + (idx ^ pmap_ptab_mask) * 8;
for (i = 0; i < 8; i++) {
if (ptp64[i].pte_hi & PTE_VALID_64)
@@ -2430,7 +2348,7 @@ pte_insert64(struct pte_desc *pted)
tlbsync();
pmap_attr_save(ptp64->pte_lo & PTE_RPGN_64,
- ptp64->pte_lo & (PTE_REF_32|PTE_CHG_32));
+ ptp64->pte_lo & (PTE_REF_64|PTE_CHG_64));
}
if (secondary)
@@ -2493,7 +2411,7 @@ pte_insert32(struct pte_desc *pted)
__asm volatile ("sync");
return;
}
- /* first just try fill of secondary hash */
+ /* try fill of secondary hash */
ptp32 = pmap_ptable32 + (idx ^ pmap_ptab_mask) * 8;
for (i = 0; i < 8; i++) {
if (ptp32[i].pte_hi & PTE_VALID_32)
@@ -2716,13 +2634,14 @@ int pmap_show_mappings(paddr_t pa);
int
pmap_show_mappings(paddr_t pa)
{
- struct pted_pv_head *pvh;
struct pte_desc *pted;
- pvh = pmap_find_pvh(pa);
- if (pvh == NULL) {
+ struct vm_page *pg;
+
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg == NULL) {
db_printf("pa %x: unmanaged\n");
} else {
- LIST_FOREACH(pted, pvh, pted_pv_list) {
+ LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
pmap_print_pted(pted, db_printf);
}
}