Skip to content

Commit 6bf32af

Browse files
robclarkRob Clark
authored andcommitted
drm/msm: Lazily create context VM
In the next commit, a way for userspace to opt-in to userspace managed VM is added. For this to work, we need to defer creation of the VM until it is needed. Signed-off-by: Rob Clark <[email protected]> Signed-off-by: Rob Clark <[email protected]> Tested-by: Antonino Maniscalco <[email protected]> Reviewed-by: Antonino Maniscalco <[email protected]> Patchwork: https://patchwork.freedesktop.org/patch/661490/
1 parent 5b5582c commit 6bf32af

File tree

5 files changed

+43
-14
lines changed

5 files changed

+43
-14
lines changed

drivers/gpu/drm/msm/adreno/a6xx_gpu.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
112112
{
113113
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
114114
struct msm_context *ctx = submit->queue->ctx;
115+
struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx);
115116
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
116117
phys_addr_t ttbr;
117118
u32 asid;
@@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
120121
if (ctx->seqno == ring->cur_ctx_seqno)
121122
return;
122123

123-
if (msm_iommu_pagetable_params(to_msm_vm(ctx->vm)->mmu, &ttbr, &asid))
124+
if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid))
124125
return;
125126

126127
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {

drivers/gpu/drm/msm/adreno/adreno_gpu.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
369369
{
370370
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
371371
struct drm_device *drm = gpu->dev;
372+
/* Note ctx can be NULL when called from rd_open(): */
373+
struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
372374

373375
/* No pointer params yet */
374376
if (*len != 0)
@@ -414,23 +416,23 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
414416
*value = 0;
415417
return 0;
416418
case MSM_PARAM_FAULTS:
417-
if (ctx->vm)
418-
*value = gpu->global_faults + to_msm_vm(ctx->vm)->faults;
419+
if (vm)
420+
*value = gpu->global_faults + to_msm_vm(vm)->faults;
419421
else
420422
*value = gpu->global_faults;
421423
return 0;
422424
case MSM_PARAM_SUSPENDS:
423425
*value = gpu->suspend_count;
424426
return 0;
425427
case MSM_PARAM_VA_START:
426-
if (ctx->vm == gpu->vm)
428+
if (vm == gpu->vm)
427429
return UERR(EINVAL, drm, "requires per-process pgtables");
428-
*value = ctx->vm->mm_start;
430+
*value = vm->mm_start;
429431
return 0;
430432
case MSM_PARAM_VA_SIZE:
431-
if (ctx->vm == gpu->vm)
433+
if (vm == gpu->vm)
432434
return UERR(EINVAL, drm, "requires per-process pgtables");
433-
*value = ctx->vm->mm_range;
435+
*value = vm->mm_range;
434436
return 0;
435437
case MSM_PARAM_HIGHEST_BANK_BIT:
436438
*value = adreno_gpu->ubwc_config.highest_bank_bit;

drivers/gpu/drm/msm/msm_drv.c

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -218,10 +218,29 @@ static void load_gpu(struct drm_device *dev)
218218
mutex_unlock(&init_lock);
219219
}
220220

221+
/**
222+
* msm_context_vm - lazily create the context's VM
223+
*
224+
* @dev: the drm device
225+
* @ctx: the context
226+
*
227+
* The VM is lazily created, so that userspace has a chance to opt-in to having
228+
* a userspace managed VM before the VM is created.
229+
*
230+
* Note that this does not return a reference to the VM. Once the VM is created,
231+
* it exists for the lifetime of the context.
232+
*/
233+
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
234+
{
235+
struct msm_drm_private *priv = dev->dev_private;
236+
if (!ctx->vm)
237+
ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
238+
return ctx->vm;
239+
}
240+
221241
static int context_init(struct drm_device *dev, struct drm_file *file)
222242
{
223243
static atomic_t ident = ATOMIC_INIT(0);
224-
struct msm_drm_private *priv = dev->dev_private;
225244
struct msm_context *ctx;
226245

227246
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -234,7 +253,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
234253
kref_init(&ctx->ref);
235254
msm_submitqueue_init(dev, ctx);
236255

237-
ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
238256
file->driver_priv = ctx;
239257

240258
ctx->seqno = atomic_inc_return(&ident);
@@ -413,7 +431,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
413431
* Don't pin the memory here - just get an address so that userspace can
414432
* be productive
415433
*/
416-
return msm_gem_get_iova(obj, ctx->vm, iova);
434+
return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
417435
}
418436

419437
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
@@ -422,18 +440,19 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
422440
{
423441
struct msm_drm_private *priv = dev->dev_private;
424442
struct msm_context *ctx = file->driver_priv;
443+
struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
425444

426445
if (!priv->gpu)
427446
return -EINVAL;
428447

429448
/* Only supported if per-process address space is supported: */
430-
if (priv->gpu->vm == ctx->vm)
449+
if (priv->gpu->vm == vm)
431450
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
432451

433452
if (should_fail(&fail_gem_iova, obj->size))
434453
return -ENOMEM;
435454

436-
return msm_gem_set_iova(obj, ctx->vm, iova);
455+
return msm_gem_set_iova(obj, vm, iova);
437456
}
438457

439458
static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,

drivers/gpu/drm/msm/msm_gem_submit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
6464

6565
kref_init(&submit->ref);
6666
submit->dev = dev;
67-
submit->vm = queue->ctx->vm;
67+
submit->vm = msm_context_vm(dev, queue->ctx);
6868
submit->gpu = gpu;
6969
submit->cmd = (void *)&submit->bos[nr_bos];
7070
submit->queue = queue;

drivers/gpu/drm/msm/msm_gpu.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,12 @@ struct msm_context {
364364
*/
365365
bool closed;
366366

367-
/** @vm: the per-process GPU address-space */
367+
/**
368+
* @vm:
369+
*
370+
* The per-process GPU address-space. Do not access directly, use
371+
* msm_context_vm().
372+
*/
368373
struct drm_gpuvm *vm;
369374

370375
/** @kref: the reference count */
@@ -449,6 +454,8 @@ struct msm_context {
449454
atomic64_t ctx_mem;
450455
};
451456

457+
struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
458+
452459
/**
453460
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
454461
*

0 commit comments

Comments
 (0)