gpuvm: remove gem.gpuva.lock_dep_map

Since all users of gem.gpuva.lock_dep_map now rely on the mutex directly
in gpuva, we may remove it. Whether the mutex is used is now tracked by
a flag in gpuvm rather than by whether lock_dep_map is null.

Note that a GEM object may not be pushed to multiple gpuvms that
disagree on the value of this new flag. But that's okay because a single
driver should use the same locking scheme everywhere, and a GEM object
is driver specific (when a GEM is exported with prime, a new GEM object
instance is created from the backing dma-buf).

The flag is present even with CONFIG_LOCKDEP=n because the intent is
that the flag will also cause vm_bo cleanup to become deferred. However,
that will happen in a follow-up patch.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20250827-gpuva-mutex-in-gem-v3-3-bd89f5a82c0d@google.com
[ Use lockdep_is_held() instead of lock_is_held(). - Danilo ]
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
This commit is contained in:
Alice Ryhl 2025-08-27 13:38:39 +00:00 committed by Danilo Krummrich
parent 69013f52b4
commit 3c8d31b893
5 changed files with 59 additions and 48 deletions

View File

@ -497,8 +497,7 @@
* DRM GPUVM also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
* itself; drivers are responsible to enforce mutual exclusion using either the
* GEMs dma_resv lock or alternatively a driver specific external lock. For the
* latter see also drm_gem_gpuva_set_lock().
* GEMs dma_resv lock or the GEMs gpuva.lock mutex.
*
* However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
* the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@ -1582,7 +1581,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@ -1603,7 +1602,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* If the reference count drops to zero, the &gpuvm_bo is destroyed, which
* includes removing it from the GEMs gpuva list. Hence, if a call to this
* function can potentially let the reference count drop to zero the caller must
* hold the dma-resv or driver specific GEM gpuva lock.
* hold the lock that the GEM uses for its gpuva list (either the GEM's
* dma-resv or gpuva.lock mutex).
*
* This function may only be called from non-atomic context.
*
@ -1627,7 +1627,7 @@ __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
{
struct drm_gpuvm_bo *vm_bo;
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
if (vm_bo->vm == gpuvm)
return vm_bo;
@ -1686,7 +1686,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
if (!vm_bo)
return ERR_PTR(-ENOMEM);
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
return vm_bo;
@ -1722,7 +1722,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
return vm_bo;
}
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
return __vm_bo;
@ -1894,8 +1894,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
* reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
* concurrent access using either the GEMs dma_resv lock or a driver specific
* lock set through drm_gem_gpuva_set_lock().
* concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
@ -1910,7 +1909,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
va->vm_bo = drm_gpuvm_bo_get(vm_bo);
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@ -1930,8 +1929,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* the latter is dropped.
*
* This function expects the caller to protect the GEM's GPUVA list against
* concurrent access using either the GEMs dma_resv lock or a driver specific
* lock set through drm_gem_gpuva_set_lock().
* concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
@ -1942,7 +1940,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
if (unlikely(!obj))
return;
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_gpuva_assert_lock_held(va->vm, obj);
list_del_init(&va->gem.entry);
va->vm_bo = NULL;
@ -2943,8 +2941,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* It is the callers responsibility to protect the GEMs GPUVA list against
* concurrent access using the GEMs dma_resv lock.
* This function expects the caller to protect the GEM's GPUVA list against
* concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
@ -2956,7 +2954,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
struct drm_gpuva *va;
int ret;
drm_gem_gpuva_assert_lock_held(vm_bo->obj);
drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)

View File

@ -245,7 +245,6 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);

View File

@ -2420,8 +2420,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
* to be handled the same way user VMAs are.
*/
drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
min_va, va_range, 0, 0, &panthor_gpuvm_ops);
DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
&ptdev->base, dummy_gem, min_va, va_range, 0, 0,
&panthor_gpuvm_ops);
drm_gem_object_put(dummy_gem);
return vm;

View File

@ -399,6 +399,12 @@ struct drm_gem_object {
/**
* @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object.
*
* When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the
* mutex. Otherwise, the list is protected by the GEMs &dma_resv lock.
*
* Note that all entries in this list must agree on whether
* DRM_GPUVM_IMMEDIATE_MODE is set.
*/
struct {
/**
@ -412,17 +418,14 @@ struct drm_gem_object {
/**
* @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list
* when the resv lock can't be used.
* when DRM_GPUVM_IMMEDIATE_MODE is used.
*
* Should only be used when the VM is being modified in a fence
* signalling path, otherwise you should use &drm_gem_object.resv to
* protect accesses to &drm_gem_object.gpuva.list.
* Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be
* safe to take this mutex during the fence signalling path, so
* do not allocate memory while holding this lock. Otherwise,
* the &dma_resv lock should be used.
*/
struct mutex lock;
#ifdef CONFIG_LOCKDEP
struct lockdep_map *lock_dep_map;
#endif
} gpuva;
/**
@ -607,26 +610,12 @@ static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
}
#ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
* @obj: the &drm_gem_object
* @lock: the lock used to protect the gpuva list. The locking primitive
* must contain a dep_map field.
*
* Call this if you're not proctecting access to the gpuva list with the
* dma-resv lock, but with a custom lock.
*/
#define drm_gem_gpuva_set_lock(obj, lock) \
if (!WARN((obj)->gpuva.lock_dep_map, \
"GEM GPUVA lock should be set only once.")) \
(obj)->gpuva.lock_dep_map = &(lock)->dep_map
#define drm_gem_gpuva_assert_lock_held(obj) \
lockdep_assert((obj)->gpuva.lock_dep_map ? \
lock_is_held((obj)->gpuva.lock_dep_map) : \
#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \
lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \
lockdep_is_held(&(obj)->gpuva.lock) : \
dma_resv_held((obj)->resv))
#else
#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0)
#endif
/**

View File

@ -196,10 +196,20 @@ enum drm_gpuvm_flags {
*/
DRM_GPUVM_RESV_PROTECTED = BIT(0),
/**
* @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed
* for modifying the GPUVM during the fence signalling path
*
* When set, gpuva.lock is used to protect gpuva.list in all GEM
* objects associated with this GPUVM. Otherwise, the GEMs dma-resv is
* used.
*/
DRM_GPUVM_IMMEDIATE_MODE = BIT(1),
/**
* @DRM_GPUVM_USERBITS: user defined bits
*/
DRM_GPUVM_USERBITS = BIT(1),
DRM_GPUVM_USERBITS = BIT(2),
};
/**
@ -369,6 +379,19 @@ drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
}
/**
* drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is
* set
* @gpuvm: the &drm_gpuvm
*
* Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise.
*/
static inline bool
drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm)
{
return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE;
}
/**
* drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
* @gpuvm__: the &drm_gpuvm
@ -742,9 +765,10 @@ drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
{
struct drm_gpuvm_bo *vm_bo;
drm_gem_gpuva_assert_lock_held(obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
drm_gpuvm_bo_evict(vm_bo, evict);
}
}
void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);