mirror of https://github.com/torvalds/linux.git
io_uring/zcrx: move io_zcrx_scrub() and dependencies up
In preparation for adding zcrx ifq exporting and importing, move io_zcrx_scrub() and its dependencies up the file to be closer to io_close_queue(). Signed-off-by: David Wei <dw@davidwei.uk> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
39c9676f78
commit
742cb2e14e
|
|
@ -544,6 +544,48 @@ static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
|
|||
io_zcrx_ifq_free(ifq);
|
||||
}
|
||||
|
||||
static void io_zcrx_return_niov_freelist(struct net_iov *niov)
|
||||
{
|
||||
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
|
||||
|
||||
spin_lock_bh(&area->freelist_lock);
|
||||
area->freelist[area->free_count++] = net_iov_idx(niov);
|
||||
spin_unlock_bh(&area->freelist_lock);
|
||||
}
|
||||
|
||||
static void io_zcrx_return_niov(struct net_iov *niov)
|
||||
{
|
||||
netmem_ref netmem = net_iov_to_netmem(niov);
|
||||
|
||||
if (!niov->desc.pp) {
|
||||
/* copy fallback allocated niovs */
|
||||
io_zcrx_return_niov_freelist(niov);
|
||||
return;
|
||||
}
|
||||
page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
|
||||
}
|
||||
|
||||
static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
|
||||
{
|
||||
struct io_zcrx_area *area = ifq->area;
|
||||
int i;
|
||||
|
||||
if (!area)
|
||||
return;
|
||||
|
||||
/* Reclaim back all buffers given to the user space. */
|
||||
for (i = 0; i < area->nia.num_niovs; i++) {
|
||||
struct net_iov *niov = &area->nia.niovs[i];
|
||||
int nr;
|
||||
|
||||
if (!atomic_read(io_get_user_counter(niov)))
|
||||
continue;
|
||||
nr = atomic_xchg(io_get_user_counter(niov), 0);
|
||||
if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
|
||||
io_zcrx_return_niov(niov);
|
||||
}
|
||||
}
|
||||
|
||||
struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
|
||||
unsigned int id)
|
||||
{
|
||||
|
|
@ -684,48 +726,6 @@ static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
|
|||
return &area->nia.niovs[niov_idx];
|
||||
}
|
||||
|
||||
static void io_zcrx_return_niov_freelist(struct net_iov *niov)
|
||||
{
|
||||
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
|
||||
|
||||
spin_lock_bh(&area->freelist_lock);
|
||||
area->freelist[area->free_count++] = net_iov_idx(niov);
|
||||
spin_unlock_bh(&area->freelist_lock);
|
||||
}
|
||||
|
||||
static void io_zcrx_return_niov(struct net_iov *niov)
|
||||
{
|
||||
netmem_ref netmem = net_iov_to_netmem(niov);
|
||||
|
||||
if (!niov->desc.pp) {
|
||||
/* copy fallback allocated niovs */
|
||||
io_zcrx_return_niov_freelist(niov);
|
||||
return;
|
||||
}
|
||||
page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
|
||||
}
|
||||
|
||||
static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
|
||||
{
|
||||
struct io_zcrx_area *area = ifq->area;
|
||||
int i;
|
||||
|
||||
if (!area)
|
||||
return;
|
||||
|
||||
/* Reclaim back all buffers given to the user space. */
|
||||
for (i = 0; i < area->nia.num_niovs; i++) {
|
||||
struct net_iov *niov = &area->nia.niovs[i];
|
||||
int nr;
|
||||
|
||||
if (!atomic_read(io_get_user_counter(niov)))
|
||||
continue;
|
||||
nr = atomic_xchg(io_get_user_counter(niov), 0);
|
||||
if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
|
||||
io_zcrx_return_niov(niov);
|
||||
}
|
||||
}
|
||||
|
||||
void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_zcrx_ifq *ifq;
|
||||
|
|
|
|||
Loading…
Reference in New Issue