net: add helper to pre-check if PP for an Rx queue will be unreadable

mlx5 pokes into the rxq state to check if the queue has a memory
provider, and therefore whether it may produce unreadable mem.
Add a helper for doing this in the page pool API. fbnic will want
a similar thing (tho, for a slightly different reason).

Reviewed-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20250901211214.1027927-11-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Jakub Kicinski 2025-09-01 14:12:10 -07:00 committed by Paolo Abeni
parent 709da681f4
commit 3ceb08838b
4 changed files with 24 additions and 8 deletions

View File

@ -780,13 +780,6 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
bitmap_free(rq->mpwqe.shampo->bitmap); bitmap_free(rq->mpwqe.shampo->bitmap);
} }
static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
return !!rxq->mp_params.mp_ops;
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_rq_param *rqp, struct mlx5e_rq_param *rqp,
@ -825,7 +818,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) / hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
if (mlx5_rq_needs_separate_hd_pool(rq)) { if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */ /* Separate page pool for shampo headers */
struct page_pool_params pp_params = { }; struct page_pool_params pp_params = { };

View File

@ -151,6 +151,8 @@ struct netdev_queue_mgmt_ops {
int idx); int idx);
}; };
bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
/** /**
* DOC: Lockless queue stopping / waking helpers. * DOC: Lockless queue stopping / waking helpers.
* *

View File

@ -505,6 +505,18 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
page_pool_update_nid(pool, new_nid); page_pool_update_nid(pool, new_nid);
} }
/**
* page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU
* @pool: queried page pool
*
* Check if page pool will return buffers which are unreadable to the CPU /
* kernel. This will only be the case if user space bound a memory provider (mp)
* which returns unreadable memory to the queue served by the page pool.
* If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound
* this helper will return false. See also netif_rxq_has_unreadable_mp().
*
* Return: true if memory allocated by the page pool may be unreadable
*/
static inline bool page_pool_is_unreadable(struct page_pool *pool) static inline bool page_pool_is_unreadable(struct page_pool *pool)
{ {
return !!pool->mp_ops; return !!pool->mp_ops;

View File

@ -9,6 +9,15 @@
#include "page_pool_priv.h" #include "page_pool_priv.h"
/* See also page_pool_is_unreadable() */
bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
return !!rxq->mp_params.mp_ops;
}
EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
{ {
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);