summaryrefslogtreecommitdiff
path: root/src/sna/kgem_debug_gen7.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-12-10 22:45:25 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-12-11 00:52:54 +0000
commit051a18063df075536cb1ac0dc4dfc3c1306ab74e (patch)
treec485da7f3349fe814b863482a54d642ca9a4a92b /src/sna/kgem_debug_gen7.c
parent735a15208dd600eefa3090f344186df9cac0462d (diff)
sna: Implement a VMA cache
A VMA cache appears unavoidable thanks to compiz and an excrutiatingly slow GTT pagefault, though it does look like it will be ineffectual during everyday usage. Compiz (and presumably other compositing managers) appears to be undoing all the pagefault minimisation as demonstrated on gen5 with large XPutImage. It also appears the CPU to memory bandwidth ratio plays a crucial role in determining whether going straight to GTT or through the CPU cache is a win - so no trivial heuristic. x11perf -putimage10 -putimage500 on i5-2467m: Before: bare: 1150,000 2,410 compiz: 438,000 2,670 After: bare: 1190,000 2,730 compiz: 437,000 2,690 UXA: bare: 658,000 2,670 compiz: 389,000 2,520 On i3-330m Before: bare: 537,000 1,080 compiz: 263,000 398 After: bare: 606,000 1,360 compiz: 203,000 985 UXA: bare: 294,000 1,070 compiz: 197,000 821 On pnv: Before: bare: 179,000 213 compiz: 106,000 123 After: bare: 181,000 246 compiz: 103,000 197 UXA: bare: 114,000 312 compiz: 75,700 191 Reported-by: Michael Larabel <Michael@phoronix.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna/kgem_debug_gen7.c')
-rw-r--r--src/sna/kgem_debug_gen7.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c
index f6a49752..a33a918d 100644
--- a/src/sna/kgem_debug_gen7.c
+++ b/src/sna/kgem_debug_gen7.c
@@ -89,7 +89,7 @@ static void gen7_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
i = data[0] >> 26;
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].base);
state.vb[i].current = bo;
state.vb[i].base = base;
@@ -130,7 +130,7 @@ static void gen7_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset)
}
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
state.dynamic_state.current = bo;
state.dynamic_state.base = base;
@@ -306,7 +306,7 @@ static void finish_vertex_buffers(struct kgem *kgem)
for (i = 0; i < ARRAY_SIZE(state.vb); i++)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
}
static void finish_state(struct kgem *kgem)
@@ -314,7 +314,7 @@ static void finish_state(struct kgem *kgem)
finish_vertex_buffers(kgem);
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
memset(&state, 0, sizeof(state));
}
@@ -482,7 +482,7 @@ static void
put_reloc(struct kgem *kgem, struct reloc *r)
{
if (r->bo != NULL)
- munmap(r->base, r->bo->size);
+ kgem_bo_unmap(kgem, r->bo);
}
static const char *