mirror of https://github.com/torvalds/linux.git
RDMA v6.18 first rc pull request
Following fixes: - Memory leak in bnxt GSI qp path - Failure in irdma registering large MRs - Failure to clean out the right CQ table entry in irdma - Invalid vf_id in some cases - Incorrect error unwind in EFA CQ create - hns doesn't use the optimal cq/qp relationships for it's HW banks - hns reports the wrong SGE size to userspace for its QPs - Corruption of the hns work queue entries in some cases -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCaQoEZAAKCRCFwuHvBreF YbdjAP9MfjyZvmOu5H7yqwdIgCNeduANVWbEzSXUEU6j5LRCywD+O5UnkPbHQ9ko k+jo07V6Ra/FuTVmr1Wf/Nfa9JmPqwc= =dnEg -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: - Memory leak in bnxt GSI qp path - Failure in irdma registering large MRs - Failure to clean out the right CQ table entry in irdma - Invalid vf_id in some cases - Incorrect error unwind in EFA CQ create - hns doesn't use the optimal cq/qp relationships for it's HW banks - hns reports the wrong SGE size to userspace for its QPs - Corruption of the hns work queue entries in some cases * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: MAINTAINERS: Update irdma maintainers RDMA/irdma: Fix vf_id size to u16 to avoid overflow RDMA/hns: Remove an extra blank line RDMA/hns: Fix wrong WQE data when QP wraps around RDMA/hns: Fix the modification of max_send_sge RDMA/hns: Fix recv CQ and QP cache affinity RDMA/uverbs: Fix umem release in UVERBS_METHOD_CQ_CREATE RDMA/irdma: Set irdma_cq cq_num field during CQ create RDMA/irdma: Fix SD index calculation RDMA/bnxt_re: Fix a potential memory leak in destroy_gsi_sqp
This commit is contained in:
commit
17d85f33a8
|
|
@ -12521,6 +12521,7 @@ F: include/linux/avf/virtchnl.h
|
||||||
F: include/linux/net/intel/*/
|
F: include/linux/net/intel/*/
|
||||||
|
|
||||||
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
|
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
|
||||||
|
M: Krzysztof Czurylo <krzysztof.czurylo@intel.com>
|
||||||
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
|
||||||
|
|
@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
|
ib_umem_release(umem);
|
||||||
rdma_restrack_put(&cq->res);
|
rdma_restrack_put(&cq->res);
|
||||||
kfree(cq);
|
kfree(cq);
|
||||||
err_event_file:
|
err_event_file:
|
||||||
|
|
|
||||||
|
|
@ -913,7 +913,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
|
||||||
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
|
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||||
{
|
{
|
||||||
struct bnxt_re_qp *gsi_sqp;
|
struct bnxt_re_qp *gsi_sqp;
|
||||||
struct bnxt_re_ah *gsi_sah;
|
struct bnxt_re_ah *gsi_sah;
|
||||||
|
|
@ -933,10 +933,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||||
|
|
||||||
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
|
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
|
||||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
||||||
if (rc) {
|
if (rc)
|
||||||
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
|
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
|
||||||
|
|
||||||
/* remove from active qp list */
|
/* remove from active qp list */
|
||||||
|
|
@ -951,10 +950,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
|
||||||
rdev->gsi_ctx.gsi_sqp = NULL;
|
rdev->gsi_ctx.gsi_sqp = NULL;
|
||||||
rdev->gsi_ctx.gsi_sah = NULL;
|
rdev->gsi_ctx.gsi_sah = NULL;
|
||||||
rdev->gsi_ctx.sqp_tbl = NULL;
|
rdev->gsi_ctx.sqp_tbl = NULL;
|
||||||
|
|
||||||
return 0;
|
|
||||||
fail:
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
|
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
|
||||||
|
|
|
||||||
|
|
@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||||
if (umem->length < cq->size) {
|
if (umem->length < cq->size) {
|
||||||
ibdev_dbg(&dev->ibdev, "External memory too small\n");
|
ibdev_dbg(&dev->ibdev, "External memory too small\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto err_free_mem;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ib_umem_is_contiguous(umem)) {
|
if (!ib_umem_is_contiguous(umem)) {
|
||||||
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
|
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto err_free_mem;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
cq->cpu_addr = NULL;
|
cq->cpu_addr = NULL;
|
||||||
|
|
@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||||
|
|
||||||
err = efa_com_create_cq(&dev->edev, ¶ms, &result);
|
err = efa_com_create_cq(&dev->edev, ¶ms, &result);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_mem;
|
goto err_free_mapped;
|
||||||
|
|
||||||
resp.db_off = result.db_off;
|
resp.db_off = result.db_off;
|
||||||
resp.cq_idx = result.cq_idx;
|
resp.cq_idx = result.cq_idx;
|
||||||
|
|
@ -1299,12 +1299,10 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||||
efa_cq_user_mmap_entries_remove(cq);
|
efa_cq_user_mmap_entries_remove(cq);
|
||||||
err_destroy_cq:
|
err_destroy_cq:
|
||||||
efa_destroy_cq_idx(dev, cq->cq_idx);
|
efa_destroy_cq_idx(dev, cq->cq_idx);
|
||||||
err_free_mem:
|
err_free_mapped:
|
||||||
if (umem)
|
if (!umem)
|
||||||
ib_umem_release(umem);
|
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
|
||||||
else
|
DMA_FROM_DEVICE);
|
||||||
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
atomic64_inc(&dev->stats.create_cq_err);
|
atomic64_inc(&dev->stats.create_cq_err);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,7 @@
|
||||||
* SOFTWARE.
|
* SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/pci.h>
|
||||||
#include <rdma/ib_umem.h>
|
#include <rdma/ib_umem.h>
|
||||||
#include <rdma/uverbs_ioctl.h>
|
#include <rdma/uverbs_ioctl.h>
|
||||||
#include "hns_roce_device.h"
|
#include "hns_roce_device.h"
|
||||||
|
|
@ -37,6 +38,43 @@
|
||||||
#include "hns_roce_hem.h"
|
#include "hns_roce_hem.h"
|
||||||
#include "hns_roce_common.h"
|
#include "hns_roce_common.h"
|
||||||
|
|
||||||
|
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
|
||||||
|
{
|
||||||
|
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
|
||||||
|
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||||
|
|
||||||
|
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&cq_table->bank_mutex);
|
||||||
|
cq_table->ctx_num[uctx->cq_bank_id]--;
|
||||||
|
mutex_unlock(&cq_table->bank_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
|
||||||
|
{
|
||||||
|
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
|
||||||
|
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||||
|
u32 least_load = cq_table->ctx_num[0];
|
||||||
|
u8 bankid = 0;
|
||||||
|
u8 i;
|
||||||
|
|
||||||
|
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&cq_table->bank_mutex);
|
||||||
|
for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
|
||||||
|
if (cq_table->ctx_num[i] < least_load) {
|
||||||
|
least_load = cq_table->ctx_num[i];
|
||||||
|
bankid = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cq_table->ctx_num[bankid]++;
|
||||||
|
mutex_unlock(&cq_table->bank_mutex);
|
||||||
|
|
||||||
|
uctx->cq_bank_id = bankid;
|
||||||
|
}
|
||||||
|
|
||||||
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
|
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
|
||||||
{
|
{
|
||||||
u32 least_load = bank[0].inuse;
|
u32 least_load = bank[0].inuse;
|
||||||
|
|
@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
|
||||||
return bankid;
|
return bankid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
|
||||||
|
struct hns_roce_bank *bank, struct ib_udata *udata)
|
||||||
|
{
|
||||||
|
struct hns_roce_ucontext *uctx = udata ?
|
||||||
|
rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
|
||||||
|
ibucontext) : NULL;
|
||||||
|
|
||||||
|
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
|
||||||
|
return uctx ? uctx->cq_bank_id : 0;
|
||||||
|
|
||||||
|
return get_least_load_bankid_for_cq(bank);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
|
||||||
|
struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||||
struct hns_roce_bank *bank;
|
struct hns_roce_bank *bank;
|
||||||
|
|
@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||||
int id;
|
int id;
|
||||||
|
|
||||||
mutex_lock(&cq_table->bank_mutex);
|
mutex_lock(&cq_table->bank_mutex);
|
||||||
bankid = get_least_load_bankid_for_cq(cq_table->bank);
|
bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
|
||||||
bank = &cq_table->bank[bankid];
|
bank = &cq_table->bank[bankid];
|
||||||
|
|
||||||
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
|
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
|
||||||
|
|
@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
||||||
goto err_cq_buf;
|
goto err_cq_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = alloc_cqn(hr_dev, hr_cq);
|
ret = alloc_cqn(hr_dev, hr_cq, udata);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
|
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
|
||||||
goto err_cq_db;
|
goto err_cq_db;
|
||||||
|
|
|
||||||
|
|
@ -217,6 +217,7 @@ struct hns_roce_ucontext {
|
||||||
struct mutex page_mutex;
|
struct mutex page_mutex;
|
||||||
struct hns_user_mmap_entry *db_mmap_entry;
|
struct hns_user_mmap_entry *db_mmap_entry;
|
||||||
u32 config;
|
u32 config;
|
||||||
|
u8 cq_bank_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hns_roce_pd {
|
struct hns_roce_pd {
|
||||||
|
|
@ -495,6 +496,7 @@ struct hns_roce_cq_table {
|
||||||
struct hns_roce_hem_table table;
|
struct hns_roce_hem_table table;
|
||||||
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
|
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
|
||||||
struct mutex bank_mutex;
|
struct mutex bank_mutex;
|
||||||
|
u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hns_roce_srq_table {
|
struct hns_roce_srq_table {
|
||||||
|
|
@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
|
||||||
size_t length,
|
size_t length,
|
||||||
enum hns_roce_mmap_type mmap_type);
|
enum hns_roce_mmap_type mmap_type);
|
||||||
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
|
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
|
||||||
|
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
|
||||||
|
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
|
||||||
|
|
||||||
#endif /* _HNS_ROCE_DEVICE_H */
|
#endif /* _HNS_ROCE_DEVICE_H */
|
||||||
|
|
|
||||||
|
|
@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
|
||||||
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
|
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
|
||||||
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
|
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
|
||||||
hr_reg_clear(fseg, FRMR_BLK_MODE);
|
hr_reg_clear(fseg, FRMR_BLK_MODE);
|
||||||
|
hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
|
||||||
|
hr_reg_clear(fseg, FRMR_ZBVA);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_atomic_seg(const struct ib_send_wr *wr,
|
static void set_atomic_seg(const struct ib_send_wr *wr,
|
||||||
|
|
@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||||
int j = 0;
|
int j = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
|
|
||||||
(*sge_ind) & (qp->sge.sge_cnt - 1));
|
|
||||||
|
|
||||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
|
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
|
||||||
!!(wr->send_flags & IB_SEND_INLINE));
|
!!(wr->send_flags & IB_SEND_INLINE));
|
||||||
if (wr->send_flags & IB_SEND_INLINE)
|
if (wr->send_flags & IB_SEND_INLINE)
|
||||||
|
|
@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
|
||||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
|
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
|
||||||
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
|
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
|
||||||
|
|
||||||
|
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
|
||||||
|
curr_idx & (qp->sge.sge_cnt - 1));
|
||||||
|
|
||||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
|
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
|
||||||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
|
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
|
||||||
if (msg_len != ATOMIC_WR_LEN)
|
if (msg_len != ATOMIC_WR_LEN)
|
||||||
|
|
@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
|
||||||
owner_bit =
|
owner_bit =
|
||||||
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
|
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
|
||||||
|
|
||||||
|
/* RC and UD share the same DirectWQE field layout */
|
||||||
|
((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
|
||||||
|
|
||||||
/* Corresponding to the QP type, wqe process separately */
|
/* Corresponding to the QP type, wqe process separately */
|
||||||
if (ibqp->qp_type == IB_QPT_RC)
|
if (ibqp->qp_type == IB_QPT_RC)
|
||||||
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
|
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
|
||||||
|
|
@ -7048,7 +7053,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
|
||||||
goto error_failed_roce_init;
|
goto error_failed_roce_init;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
handle->priv = hr_dev;
|
handle->priv = hr_dev;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error_fail_copy_to_udata;
|
goto error_fail_copy_to_udata;
|
||||||
|
|
||||||
|
hns_roce_get_cq_bankid_for_uctx(context);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_fail_copy_to_udata:
|
error_fail_copy_to_udata:
|
||||||
|
|
@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||||
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
|
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
|
||||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
|
||||||
|
|
||||||
|
hns_roce_put_cq_bankid_for_uctx(context);
|
||||||
|
|
||||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
||||||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
|
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
|
||||||
mutex_destroy(&context->page_mutex);
|
mutex_destroy(&context->page_mutex);
|
||||||
|
|
|
||||||
|
|
@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
|
||||||
|
|
||||||
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
|
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
|
||||||
hr_qp->sq.wqe_cnt = cnt;
|
hr_qp->sq.wqe_cnt = cnt;
|
||||||
cap->max_send_sge = hr_qp->sq.max_gs;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
|
||||||
|
|
||||||
/* sync the parameters of kernel QP to user's configuration */
|
/* sync the parameters of kernel QP to user's configuration */
|
||||||
cap->max_send_wr = cnt;
|
cap->max_send_wr = cnt;
|
||||||
cap->max_send_sge = hr_qp->sq.max_gs;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
|
||||||
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
|
||||||
struct sd_pd_idx *idx)
|
struct sd_pd_idx *idx)
|
||||||
{
|
{
|
||||||
idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
|
idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
|
||||||
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
|
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
|
||||||
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
|
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -706,7 +706,7 @@ struct irdma_sc_dev {
|
||||||
u32 vchnl_ver;
|
u32 vchnl_ver;
|
||||||
u16 num_vfs;
|
u16 num_vfs;
|
||||||
u16 hmc_fn_id;
|
u16 hmc_fn_id;
|
||||||
u8 vf_id;
|
u16 vf_id;
|
||||||
bool privileged:1;
|
bool privileged:1;
|
||||||
bool vchnl_up:1;
|
bool vchnl_up:1;
|
||||||
bool ceq_valid:1;
|
bool ceq_valid:1;
|
||||||
|
|
|
||||||
|
|
@ -2503,6 +2503,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
||||||
spin_lock_init(&iwcq->lock);
|
spin_lock_init(&iwcq->lock);
|
||||||
INIT_LIST_HEAD(&iwcq->resize_list);
|
INIT_LIST_HEAD(&iwcq->resize_list);
|
||||||
INIT_LIST_HEAD(&iwcq->cmpl_generated);
|
INIT_LIST_HEAD(&iwcq->cmpl_generated);
|
||||||
|
iwcq->cq_num = cq_num;
|
||||||
info.dev = dev;
|
info.dev = dev;
|
||||||
ukinfo->cq_size = max(entries, 4);
|
ukinfo->cq_size = max(entries, 4);
|
||||||
ukinfo->cq_id = cq_num;
|
ukinfo->cq_id = cq_num;
|
||||||
|
|
|
||||||
|
|
@ -140,7 +140,7 @@ struct irdma_srq {
|
||||||
struct irdma_cq {
|
struct irdma_cq {
|
||||||
struct ib_cq ibcq;
|
struct ib_cq ibcq;
|
||||||
struct irdma_sc_cq sc_cq;
|
struct irdma_sc_cq sc_cq;
|
||||||
u16 cq_num;
|
u32 cq_num;
|
||||||
bool user_mode;
|
bool user_mode;
|
||||||
atomic_t armed;
|
atomic_t armed;
|
||||||
enum irdma_cmpl_notify last_notify;
|
enum irdma_cmpl_notify last_notify;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue