dma-fence: Use a flag for 64-bit seqnos

With the goal of reducing the need for drivers to touch (and dereference)
fence->ops, we move the 64-bit seqnos flag from struct dma_fence_ops to
the fence->flags.

Drivers which were setting this flag are changed to use new
dma_fence_init64() instead of dma_fence_init().

v2:
 * Streamlined init and added kerneldoc.
 * Rebase for amdgpu userq which landed since.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reviewed-by: Christian König <christian.koenig@amd.com> # v1
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20250515095004.28318-3-tvrtko.ursulin@igalia.com
This commit is contained in:
Tvrtko Ursulin 2025-05-15 10:49:57 +01:00 committed by Tvrtko Ursulin
parent 22b929b252
commit bf33a0003d
6 changed files with 58 additions and 35 deletions

View File

@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence,
} }
const struct dma_fence_ops dma_fence_chain_ops = { const struct dma_fence_ops dma_fence_chain_ops = {
.use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name, .get_driver_name = dma_fence_chain_get_driver_name,
.get_timeline_name = dma_fence_chain_get_timeline_name, .get_timeline_name = dma_fence_chain_get_timeline_name,
.enable_signaling = dma_fence_chain_enable_signaling, .enable_signaling = dma_fence_chain_enable_signaling,
@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno); seqno = max(prev->seqno, seqno);
} }
dma_fence_init(&chain->base, &dma_fence_chain_ops, dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
&chain->lock, context, seqno); context, seqno);
/* /*
* Chaining dma_fence_chain container together is only allowed through * Chaining dma_fence_chain container together is only allowed through

View File

@ -989,6 +989,25 @@ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
} }
EXPORT_SYMBOL(dma_fence_describe); EXPORT_SYMBOL(dma_fence_describe);
static void
__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
INIT_LIST_HEAD(&fence->cb_list);
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
fence->flags = flags;
fence->error = 0;
trace_dma_fence_init(fence);
}
/** /**
* dma_fence_init - Initialize a custom fence. * dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize * @fence: the fence to initialize
@ -1008,18 +1027,30 @@ void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno) spinlock_t *lock, u64 context, u64 seqno)
{ {
BUG_ON(!lock); __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
INIT_LIST_HEAD(&fence->cb_list);
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
fence->flags = 0UL;
fence->error = 0;
trace_dma_fence_init(fence);
} }
EXPORT_SYMBOL(dma_fence_init); EXPORT_SYMBOL(dma_fence_init);
/**
* dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
* @fence: the fence to initialize
* @ops: the dma_fence_ops for operations on this fence
* @lock: the irqsafe spinlock to use for locking this fence
* @context: the execution context this fence is run on
* @seqno: a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
* refcount again if &dma_fence_ops.enable_signaling gets called.
*
* Context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using dma_fence_later().
*/
void
dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno)
{
__dma_fence_init(fence, ops, lock, context, seqno,
BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
}
EXPORT_SYMBOL(dma_fence_init64);

View File

@ -134,7 +134,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
} }
static const struct dma_fence_ops amdgpu_eviction_fence_ops = { static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
.use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name, .get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name, .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
.enable_signaling = amdgpu_eviction_fence_enable_signaling, .enable_signaling = amdgpu_eviction_fence_enable_signaling,
@ -160,9 +159,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
ev_fence->evf_mgr = evf_mgr; ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current); get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock); spin_lock_init(&ev_fence->lock);
dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops, dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
&ev_fence->lock, evf_mgr->ev_fence_ctx, &ev_fence->lock, evf_mgr->ev_fence_ctx,
atomic_inc_return(&evf_mgr->ev_fence_seq)); atomic_inc_return(&evf_mgr->ev_fence_seq));
return ev_fence; return ev_fence;
} }

View File

@ -239,8 +239,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
fence = &userq_fence->base; fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv; userq_fence->fence_drv = fence_drv;
dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock, dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
fence_drv->context, seq); fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv); amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence); dma_fence_get(fence);
@ -334,7 +334,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f)
} }
static const struct dma_fence_ops amdgpu_userq_fence_ops = { static const struct dma_fence_ops amdgpu_userq_fence_ops = {
.use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name, .get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name, .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled, .signaled = amdgpu_userq_fence_signaled,

View File

@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
} }
static const struct dma_fence_ops amdgpu_tlb_fence_ops = { static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
.use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name, .get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name .get_timeline_name = amdgpu_tlb_fence_get_timeline_name
}; };
@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work); INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock); spin_lock_init(&f->lock);
dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock, dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */ /* TODO: We probably need a separate wq here */
dma_fence_get(&f->base); dma_fence_get(&f->base);

View File

@ -98,6 +98,7 @@ struct dma_fence {
}; };
enum dma_fence_flag_bits { enum dma_fence_flag_bits {
DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT, DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@ -125,14 +126,6 @@ struct dma_fence_cb {
* *
*/ */
struct dma_fence_ops { struct dma_fence_ops {
/**
* @use_64bit_seqno:
*
* True if this dma_fence implementation uses 64bit seqno, false
* otherwise.
*/
bool use_64bit_seqno;
/** /**
* @get_driver_name: * @get_driver_name:
* *
@ -263,6 +256,9 @@ struct dma_fence_ops {
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno); spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref); void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence); void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
@ -455,7 +451,7 @@ static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
* 32bit sequence numbers. Use a 64bit compare when the driver says to * 32bit sequence numbers. Use a 64bit compare when the driver says to
* do so. * do so.
*/ */
if (fence->ops->use_64bit_seqno) if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
return f1 > f2; return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;