1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include <assert.h>
#include "intel_bufmgr.h"
#define BATCH_SZ 4096
#define BATCH_RESERVED 16
struct intel_batchbuffer {
drm_intel_bufmgr *bufmgr;
uint32_t devid;
drm_intel_bo *bo;
uint8_t buffer[BATCH_SZ];
uint8_t *ptr;
uint8_t *state;
};
struct intel_batchbuffer *intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr,
uint32_t devid);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_flush(struct intel_batchbuffer *batch);
void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring);
void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
drm_intel_context *context);
void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, unsigned int bytes);
void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer,
uint32_t delta,
uint32_t read_domains,
uint32_t write_domain,
int fenced);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
#pragma GCC diagnostic ignored "-Winline"
static inline int
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->buffer);
}
static inline void
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint32_t dword)
{
assert(intel_batchbuffer_space(batch) >= 4);
*(uint32_t *) (batch->ptr) = dword;
batch->ptr += 4;
}
static inline void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
unsigned int sz)
{
assert(sz < BATCH_SZ - BATCH_RESERVED);
if (intel_batchbuffer_space(batch) < sz)
intel_batchbuffer_flush(batch);
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n) do { \
intel_batchbuffer_require_space(batch, (n)*4); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d)
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(batch, buf, delta, \
read_domains, write_domain, 1); \
} while (0)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(batch, buf, delta, \
read_domains, write_domain, 0); \
} while (0)
#define ADVANCE_BATCH() do { \
} while(0)
void
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
void intel_copy_bo(struct intel_batchbuffer *batch,
drm_intel_bo *dst_bo, drm_intel_bo *src_bo,
int width, int height);
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
#endif
|