1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
#ifndef SNA_RENDER_INLINE_H
#define SNA_RENDER_INLINE_H
static inline bool need_tiling(struct sna *sna, int16_t width, int16_t height)
{
/* Is the damage area too large to fit in 3D pipeline,
* and so do we need to split the operation up into tiles?
*/
return (width > sna->render.max_3d_size ||
height > sna->render.max_3d_size);
}
static inline bool need_redirect(struct sna *sna, PixmapPtr dst)
{
/* Is the pixmap too large to render to? */
return (dst->drawable.width > sna->render.max_3d_size ||
dst->drawable.height > sna->render.max_3d_size);
}
static inline int vertex_space(struct sna *sna)
{
return ARRAY_SIZE(sna->render.vertex_data) - sna->render.vertex_used;
}
static inline void vertex_emit(struct sna *sna, float v)
{
assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data));
sna->render.vertex_data[sna->render.vertex_used++] = v;
}
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
{
int16_t *v = (int16_t *)&sna->render.vertex_data[sna->render.vertex_used++];
assert(sna->render.vertex_used <= ARRAY_SIZE(sna->render.vertex_data));
v[0] = x;
v[1] = y;
}
static inline float pack_2s(int16_t x, int16_t y)
{
union {
struct sna_coordinate p;
float f;
} u;
u.p.x = x;
u.p.y = y;
return u.f;
}
static inline int batch_space(struct sna *sna)
{
return KGEM_BATCH_SIZE(&sna->kgem) - sna->kgem.nbatch;
}
static inline void batch_emit(struct sna *sna, uint32_t dword)
{
assert(sna->kgem.nbatch < sna->kgem.surface);
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}
static inline void batch_emit_float(struct sna *sna, float f)
{
union {
uint32_t dw;
float f;
} u;
u.f = f;
batch_emit(sna, u.dw);
}
static inline Bool
is_gpu(DrawablePtr drawable)
{
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
return priv && priv->gpu_bo;
}
static inline Bool
is_cpu(DrawablePtr drawable)
{
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
return !priv || priv->gpu_bo == NULL;
}
static inline Bool
is_dirty_gpu(DrawablePtr drawable)
{
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
return priv && priv->gpu_bo && priv->gpu_damage;
}
static inline Bool
too_small(DrawablePtr drawable)
{
return ((uint32_t)drawable->width * drawable->height * drawable->bitsPerPixel <= 8*4096) &&
!is_dirty_gpu(drawable);
}
static inline Bool
picture_is_gpu(PicturePtr picture)
{
if (!picture || !picture->pDrawable)
return FALSE;
return is_gpu(picture->pDrawable);
}
static inline Bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
{
if (src->depth == dst->depth)
return TRUE;
/* Also allow for the alpha to be discarded on a copy */
if (src->bitsPerPixel != dst->bitsPerPixel)
return FALSE;
if (dst->depth == 24 && src->depth == 32)
return TRUE;
/* Note that a depth-16 pixmap is r5g6b5, not x1r5g5b5. */
return FALSE;
}
static inline struct kgem_bo *
sna_render_get_alpha_gradient(struct sna *sna)
{
return kgem_bo_reference(sna->render.alpha_cache.cache_bo);
}
#endif /* SNA_RENDER_INLINE_H */
|