summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2019-11-13 06:33:05 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2019-11-13 06:33:05 +0000
commite8347fb2e113e21a2550ee9bcead303a7f503c3f (patch)
treefaca30057507c19457c9ddcfcdaa84ec224bde05 /sys/dev/pci
parent7f8736ee3b9ed8de7e75a603401298785e52ca23 (diff)
drm/i915/cmdparser: Add support for backward jumps
From Jon Bloomfield 6e53c71a69138059c8a4dcd1f9a2967c85fede64 in linux 4.19.y/4.19.84 f8c08d8faee5567803c8c533865296ca30286bbf in mainline linux
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/drm/i915/i915_cmd_parser.c151
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.h9
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_context.c5
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_context.h6
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_execbuffer.c34
5 files changed, 179 insertions, 26 deletions
diff --git a/sys/dev/pci/drm/i915/i915_cmd_parser.c b/sys/dev/pci/drm/i915/i915_cmd_parser.c
index 379cd055476..4a31d7d38b1 100644
--- a/sys/dev/pci/drm/i915/i915_cmd_parser.c
+++ b/sys/dev/pci/drm/i915/i915_cmd_parser.c
@@ -481,6 +481,19 @@ static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
};
static const struct drm_i915_cmd_descriptor noop_desc =
@@ -1292,15 +1305,113 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(const struct i915_gem_context *ctx,
+ u32 *cmd, u32 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ memset(ctx->jump_whitelist, 0,
+ BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1308,13 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 batch_start,
u32 batch_start_offset,
- u32 batch_len)
+ u32 batch_len,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false;
@@ -1328,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1348,16 +1465,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
goto err;
}
- /*
- * We don't try to handle BATCH_BUFFER_START because it adds
- * non-trivial complexity. Instead we abort the scan and return
- * and error to indicate that the batch is unsafe.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- goto err;
- }
-
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
@@ -1377,7 +1484,21 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
goto err;
}
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+
+ if (ret)
+ goto err;
+ break;
+ }
+
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
+
cmd += length;
+ offset += length;
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h
index fe434f86e99..230414ee3f7 100644
--- a/sys/dev/pci/drm/i915/i915_drv.h
+++ b/sys/dev/pci/drm/i915/i915_drv.h
@@ -3523,11 +3523,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
- u32 batch_len);
+ u32 batch_len,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
/* i915_perf.c */
extern void i915_perf_init(struct drm_i915_private *dev_priv);
diff --git a/sys/dev/pci/drm/i915/i915_gem_context.c b/sys/dev/pci/drm/i915/i915_gem_context.c
index 56d3b77c544..fbc7d4db006 100644
--- a/sys/dev/pci/drm/i915/i915_gem_context.c
+++ b/sys/dev/pci/drm/i915/i915_gem_context.c
@@ -128,6 +128,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
i915_ppgtt_put(ctx->ppgtt);
+ kfree(ctx->jump_whitelist);
+
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -351,6 +353,9 @@ __create_hw_context(struct drm_i915_private *dev_priv,
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
return ctx;
err_pid:
diff --git a/sys/dev/pci/drm/i915/i915_gem_context.h b/sys/dev/pci/drm/i915/i915_gem_context.h
index 51573258de2..75fdf081cec 100644
--- a/sys/dev/pci/drm/i915/i915_gem_context.h
+++ b/sys/dev/pci/drm/i915/i915_gem_context.h
@@ -187,6 +187,12 @@ struct i915_gem_context {
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
+ /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
+ unsigned long *jump_whitelist;
+
+ /** jump_whitelist_cmds: No of cmd slots available */
+ u32 jump_whitelist_cmds;
+
/** handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT)
diff --git a/sys/dev/pci/drm/i915/i915_gem_execbuffer.c b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c
index 3f9aa19f108..02866c349c4 100644
--- a/sys/dev/pci/drm/i915/i915_gem_execbuffer.c
+++ b/sys/dev/pci/drm/i915/i915_gem_execbuffer.c
@@ -1939,7 +1939,6 @@ shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
if (CMDPARSER_USES_GGTT(dev_priv)) {
flags = PIN_GLOBAL;
vm = &dev_priv->ggtt.vm;
- eb->batch_flags |= I915_DISPATCH_SECURE;
} else if (eb->vm->has_read_only) {
flags = PIN_USER;
vm = eb->vm;
@@ -1956,6 +1955,8 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
+ u64 batch_start;
+ u64 shadow_batch_start;
int err;
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
@@ -1963,12 +1964,27 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
if (IS_ERR(shadow_batch_obj))
return ERR_CAST(shadow_batch_obj);
- err = intel_engine_cmd_parser(eb->engine,
+ vma = shadow_batch_pin(eb, shadow_batch_obj);
+ if (IS_ERR(vma))
+ goto out;
+
+ batch_start = gen8_canonical_addr(eb->batch->node.start) +
+ eb->batch_start_offset;
+
+ shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+ err = intel_engine_cmd_parser(eb->ctx,
+ eb->engine,
eb->batch->obj,
- shadow_batch_obj,
+ batch_start,
eb->batch_start_offset,
- eb->batch_len);
+ eb->batch_len,
+ shadow_batch_obj,
+ shadow_batch_start);
+
if (err) {
+ i915_vma_unpin(vma);
+
/*
* Unsafe GGTT-backed buffers can still be submitted safely
* as non-secure.
@@ -1980,12 +1996,9 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
vma = NULL;
else
vma = ERR_PTR(err);
- goto out;
- }
- vma = shadow_batch_pin(eb, shadow_batch_obj);
- if (IS_ERR(vma))
goto out;
+ }
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
@@ -1994,7 +2007,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
eb->buffer_count++;
eb->batch_start_offset = 0;
eb->batch = vma;
+
/* eb->batch_len unchanged */
+
+ if (CMDPARSER_USES_GGTT(eb->i915))
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+
out:
i915_gem_object_unpin_pages(shadow_batch_obj);
return vma;