summaryrefslogtreecommitdiff
path: root/src/i830_batchbuffer.h
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2009-10-06 16:30:08 -0700
committerOwain G. Ainsworth <oga@openbsd.org>2010-02-28 23:35:15 +0000
commit503357c5c0b28d60f7cc608887f0bbd270cf2b45 (patch)
treecd2f807b284b2d7f55c275fdedcfcf2d85760cba /src/i830_batchbuffer.h
parent9f587cfdc59484fca2cf466679c526983114fd22 (diff)
Move to kernel coding style.
We've talked about doing this since the start of the project, putting it off until "some convenient time". Just after removing a third of the driver seems like a convenient time, when backporting's probably not happening much anyway. (cherry picked from 8ae0e44e42db645abe6d385f561260d2ae4a1960 commit)
Diffstat (limited to 'src/i830_batchbuffer.h')
-rw-r--r--src/i830_batchbuffer.h140
1 files changed, 70 insertions, 70 deletions
diff --git a/src/i830_batchbuffer.h b/src/i830_batchbuffer.h
index 4903b8c4..9f1c5b1d 100644
--- a/src/i830_batchbuffer.h
+++ b/src/i830_batchbuffer.h
@@ -37,63 +37,59 @@ void intel_batch_teardown(ScrnInfoPtr pScrn);
void intel_batch_flush(ScrnInfoPtr pScrn, Bool flushed);
void intel_batch_wait_last(ScrnInfoPtr pScrn);
-static inline int
-intel_batch_space(I830Ptr pI830)
+static inline int intel_batch_space(I830Ptr pI830)
{
- return (pI830->batch_bo->size - BATCH_RESERVED) - (pI830->batch_used);
+ return (pI830->batch_bo->size - BATCH_RESERVED) - (pI830->batch_used);
}
static inline void
intel_batch_require_space(ScrnInfoPtr pScrn, I830Ptr pI830, GLuint sz)
{
- assert(sz < pI830->batch_bo->size - 8);
- if (intel_batch_space(pI830) < sz)
- intel_batch_flush(pScrn, FALSE);
+ assert(sz < pI830->batch_bo->size - 8);
+ if (intel_batch_space(pI830) < sz)
+ intel_batch_flush(pScrn, FALSE);
}
-static inline void
-intel_batch_start_atomic(ScrnInfoPtr pScrn, unsigned int sz)
+static inline void intel_batch_start_atomic(ScrnInfoPtr pScrn, unsigned int sz)
{
- I830Ptr pI830 = I830PTR(pScrn);
+ I830Ptr pI830 = I830PTR(pScrn);
- assert(!pI830->in_batch_atomic);
- intel_batch_require_space(pScrn, pI830, sz * 4);
+ assert(!pI830->in_batch_atomic);
+ intel_batch_require_space(pScrn, pI830, sz * 4);
- pI830->in_batch_atomic = TRUE;
- pI830->batch_atomic_limit = pI830->batch_used + sz * 4;
+ pI830->in_batch_atomic = TRUE;
+ pI830->batch_atomic_limit = pI830->batch_used + sz * 4;
}
-static inline void
-intel_batch_end_atomic(ScrnInfoPtr pScrn)
+static inline void intel_batch_end_atomic(ScrnInfoPtr pScrn)
{
- I830Ptr pI830 = I830PTR(pScrn);
+ I830Ptr pI830 = I830PTR(pScrn);
- assert(pI830->in_batch_atomic);
- assert(pI830->batch_used <= pI830->batch_atomic_limit);
- pI830->in_batch_atomic = FALSE;
+ assert(pI830->in_batch_atomic);
+ assert(pI830->batch_used <= pI830->batch_atomic_limit);
+ pI830->in_batch_atomic = FALSE;
}
-static inline void
-intel_batch_emit_dword(I830Ptr pI830, uint32_t dword)
+static inline void intel_batch_emit_dword(I830Ptr pI830, uint32_t dword)
{
- assert(pI830->batch_ptr != NULL);
- assert(intel_batch_space(pI830) >= 4);
- *(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = dword;
- pI830->batch_used += 4;
+ assert(pI830->batch_ptr != NULL);
+ assert(intel_batch_space(pI830) >= 4);
+ *(uint32_t *) (pI830->batch_ptr + pI830->batch_used) = dword;
+ pI830->batch_used += 4;
}
static inline void
-intel_batch_emit_reloc (I830Ptr pI830,
- dri_bo *bo,
- uint32_t read_domains,
- uint32_t write_domains,
- uint32_t delta)
+intel_batch_emit_reloc(I830Ptr pI830,
+ dri_bo * bo,
+ uint32_t read_domains,
+ uint32_t write_domains, uint32_t delta)
{
- assert(intel_batch_space(pI830) >= 4);
- *(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = bo->offset + delta;
- dri_bo_emit_reloc(pI830->batch_bo, read_domains, write_domains, delta,
- pI830->batch_used, bo);
- pI830->batch_used += 4;
+ assert(intel_batch_space(pI830) >= 4);
+ *(uint32_t *) (pI830->batch_ptr + pI830->batch_used) =
+ bo->offset + delta;
+ dri_bo_emit_reloc(pI830->batch_bo, read_domains, write_domains, delta,
+ pI830->batch_used, bo);
+ pI830->batch_used += 4;
}
static inline void
@@ -101,17 +97,18 @@ intel_batch_emit_reloc_pixmap(I830Ptr pI830, PixmapPtr pPixmap,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- dri_bo *bo = i830_get_pixmap_bo(pPixmap);
- uint32_t offset;
- assert(pI830->batch_ptr != NULL);
- assert(intel_batch_space(pI830) >= 4);
- if (bo) {
- intel_batch_emit_reloc(pI830, bo, read_domains, write_domain, delta);
- return;
- }
- offset = intel_get_pixmap_offset(pPixmap);
- *(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = offset + delta;
- pI830->batch_used += 4;
+ dri_bo *bo = i830_get_pixmap_bo(pPixmap);
+ uint32_t offset;
+ assert(pI830->batch_ptr != NULL);
+ assert(intel_batch_space(pI830) >= 4);
+ if (bo) {
+ intel_batch_emit_reloc(pI830, bo, read_domains,
+ write_domain, delta);
+ return;
+ }
+ offset = intel_get_pixmap_offset(pPixmap);
+ *(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = offset + delta;
+ pI830->batch_used += 4;
}
#define OUT_BATCH(dword) intel_batch_emit_dword(pI830, dword)
@@ -135,33 +132,36 @@ union intfloat {
#define BEGIN_BATCH(n) \
do { \
- if (pI830->batch_emitting != 0) \
- FatalError("%s: BEGIN_BATCH called without closing " \
- "ADVANCE_BATCH\n", __FUNCTION__); \
- intel_batch_require_space(pScrn, pI830, (n) * 4); \
- pI830->batch_emitting = (n) * 4; \
- pI830->batch_emit_start = pI830->batch_used; \
+ if (pI830->batch_emitting != 0) \
+ FatalError("%s: BEGIN_BATCH called without closing " \
+ "ADVANCE_BATCH\n", __FUNCTION__); \
+ intel_batch_require_space(pScrn, pI830, (n) * 4); \
+ pI830->batch_emitting = (n) * 4; \
+ pI830->batch_emit_start = pI830->batch_used; \
} while (0)
#define ADVANCE_BATCH() do { \
- if (pI830->batch_emitting == 0) \
- FatalError("%s: ADVANCE_BATCH called with no matching " \
- "BEGIN_BATCH\n", __FUNCTION__); \
- if (pI830->batch_used > pI830->batch_emit_start + pI830->batch_emitting) \
- FatalError("%s: ADVANCE_BATCH: exceeded allocation %d/%d\n ", \
- __FUNCTION__, \
- pI830->batch_used - pI830->batch_emit_start, \
- pI830->batch_emitting); \
- if (pI830->batch_used < pI830->batch_emit_start + pI830->batch_emitting) \
- FatalError("%s: ADVANCE_BATCH: under-used allocation %d/%d\n ", \
- __FUNCTION__, \
- pI830->batch_used - pI830->batch_emit_start, \
- pI830->batch_emitting); \
- if ((pI830->batch_emitting > 8) && (I810_DEBUG & DEBUG_ALWAYS_SYNC)) { \
- /* Note: not actually syncing, just flushing each batch. */ \
- intel_batch_flush(pScrn, FALSE); \
- } \
- pI830->batch_emitting = 0; \
+ if (pI830->batch_emitting == 0) \
+ FatalError("%s: ADVANCE_BATCH called with no matching " \
+ "BEGIN_BATCH\n", __FUNCTION__); \
+ if (pI830->batch_used > \
+ pI830->batch_emit_start + pI830->batch_emitting) \
+ FatalError("%s: ADVANCE_BATCH: exceeded allocation %d/%d\n ", \
+ __FUNCTION__, \
+ pI830->batch_used - pI830->batch_emit_start, \
+ pI830->batch_emitting); \
+ if (pI830->batch_used < pI830->batch_emit_start + \
+ pI830->batch_emitting) \
+ FatalError("%s: ADVANCE_BATCH: under-used allocation %d/%d\n ", \
+ __FUNCTION__, \
+ pI830->batch_used - pI830->batch_emit_start, \
+ pI830->batch_emitting); \
+ if ((pI830->batch_emitting > 8) && \
+ (I810_DEBUG & DEBUG_ALWAYS_SYNC)) { \
+ /* Note: not actually syncing, just flushing each batch. */ \
+ intel_batch_flush(pScrn, FALSE); \
+ } \
+ pI830->batch_emitting = 0; \
} while (0)
#endif /* _INTEL_BATCHBUFFER_H */