Skip to content

Commit bd4ee70

Browse files
Konstantin Taranovrleon
authored andcommitted
RDMA/mana_ib: UD/GSI QP creation for kernel
Implement UD/GSI QPs for the kernel. Allow create/modify/destroy for such QPs. Signed-off-by: Konstantin Taranov <[email protected]> Link: https://patch.msgid.link/[email protected] Reviewed-by: Shiraz Saleem <[email protected]> Reviewed-by: Long Li <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 7f5192a commit bd4ee70

File tree

1 file changed

+115
-0
lines changed
  • drivers/infiniband/hw/mana

1 file changed

+115
-0
lines changed

drivers/infiniband/hw/mana/qp.c

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -398,6 +398,52 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
398398
return err;
399399
}
400400

401+
static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
402+
{
403+
u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size;
404+
405+
return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
406+
}
407+
408+
static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
409+
{
410+
u32 queue_size;
411+
412+
switch (attr->qp_type) {
413+
case IB_QPT_UD:
414+
case IB_QPT_GSI:
415+
if (queue_type == MANA_UD_SEND_QUEUE)
416+
queue_size = attr->cap.max_send_wr *
417+
mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
418+
else
419+
queue_size = attr->cap.max_recv_wr *
420+
mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
421+
break;
422+
default:
423+
return 0;
424+
}
425+
426+
return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size));
427+
}
428+
429+
static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type)
430+
{
431+
enum gdma_queue_type type;
432+
433+
switch (attr->qp_type) {
434+
case IB_QPT_UD:
435+
case IB_QPT_GSI:
436+
if (queue_type == MANA_UD_SEND_QUEUE)
437+
type = GDMA_SQ;
438+
else
439+
type = GDMA_RQ;
440+
break;
441+
default:
442+
type = GDMA_INVALID_QUEUE;
443+
}
444+
return type;
445+
}
446+
401447
static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
402448
{
403449
refcount_set(&qp->refcount, 1);
@@ -490,6 +536,51 @@ static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
490536
return err;
491537
}
492538

539+
static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
540+
struct ib_qp_init_attr *attr, struct ib_udata *udata)
541+
{
542+
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
543+
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
544+
struct gdma_context *gc = mdev_to_gc(mdev);
545+
u32 doorbell, queue_size;
546+
int i, err;
547+
548+
if (udata) {
549+
ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n");
550+
return -EOPNOTSUPP;
551+
}
552+
553+
for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) {
554+
queue_size = mana_ib_queue_size(attr, i);
555+
err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i),
556+
&qp->ud_qp.queues[i]);
557+
if (err) {
558+
ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n",
559+
i, err);
560+
goto destroy_queues;
561+
}
562+
}
563+
doorbell = gc->mana_ib.doorbell;
564+
565+
err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type);
566+
if (err) {
567+
ibdev_err(&mdev->ib_dev, "Failed to create ud qp %d\n", err);
568+
goto destroy_queues;
569+
}
570+
qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
571+
qp->port = attr->port_num;
572+
573+
for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
574+
qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
575+
576+
return 0;
577+
578+
destroy_queues:
579+
while (i-- > 0)
580+
mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
581+
return err;
582+
}
583+
493584
int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
494585
struct ib_udata *udata)
495586
{
@@ -503,6 +594,9 @@ int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
503594
return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
504595
case IB_QPT_RC:
505596
return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
597+
case IB_QPT_UD:
598+
case IB_QPT_GSI:
599+
return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata);
506600
default:
507601
ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
508602
attr->qp_type);
@@ -579,6 +673,8 @@ int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
579673
{
580674
switch (ibqp->qp_type) {
581675
case IB_QPT_RC:
676+
case IB_QPT_UD:
677+
case IB_QPT_GSI:
582678
return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
583679
default:
584680
ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
@@ -652,6 +748,22 @@ static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
652748
return 0;
653749
}
654750

751+
static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
752+
{
753+
struct mana_ib_dev *mdev =
754+
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
755+
int i;
756+
757+
/* Ignore return code as there is not much we can do about it.
758+
* The error message is printed inside.
759+
*/
760+
mana_ib_gd_destroy_ud_qp(mdev, qp);
761+
for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
762+
mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
763+
764+
return 0;
765+
}
766+
655767
int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
656768
{
657769
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
@@ -665,6 +777,9 @@ int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
665777
return mana_ib_destroy_qp_raw(qp, udata);
666778
case IB_QPT_RC:
667779
return mana_ib_destroy_rc_qp(qp, udata);
780+
case IB_QPT_UD:
781+
case IB_QPT_GSI:
782+
return mana_ib_destroy_ud_qp(qp, udata);
668783
default:
669784
ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
670785
ibqp->qp_type);

0 commit comments

Comments
 (0)