aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Metzmacher <metze@samba.org>2025-08-06 19:35:56 +0200
committerSteve French <stfrench@microsoft.com>2025-09-28 18:29:51 -0500
commit4c564f03e23b1ea55b77679192a4cb07f5b3e7f3 (patch)
treea8bf60e88845e767221738c028dd6232ba7e12b9
parentsmb: server: make use of common smbdirect.h (diff)
downloadlinux-4c564f03e23b1ea55b77679192a4cb07f5b3e7f3.tar.gz
linux-4c564f03e23b1ea55b77679192a4cb07f5b3e7f3.zip
smb: server: make use of common smbdirect_socket
This is the next step in the direction of a common smbdirect layer. Currently only structures are shared, but that will change over time until everything is shared. Cc: Steve French <smfrench@gmail.com> Cc: Tom Talpey <tom@talpey.com> Cc: Long Li <longli@microsoft.com> Cc: Namjae Jeon <linkinjeon@kernel.org> Cc: Hyunchul Lee <hyc.lee@gmail.com> Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com> Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com>
-rw-r--r--fs/smb/server/transport_rdma.c241
1 files changed, 133 insertions, 108 deletions
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 72ae3c9714a6..e2d8ac087344 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -25,6 +25,7 @@
#include "../common/smb2status.h"
#include "../common/smbdirect/smbdirect.h"
#include "../common/smbdirect/smbdirect_pdu.h"
+#include "../common/smbdirect/smbdirect_socket.h"
#include "transport_rdma.h"
#define SMB_DIRECT_PORT_IWARP 5445
@@ -89,26 +90,14 @@ static struct smb_direct_listener {
static struct workqueue_struct *smb_direct_wq;
-enum smb_direct_status {
- SMB_DIRECT_CS_NEW = 0,
- SMB_DIRECT_CS_CONNECTED,
- SMB_DIRECT_CS_DISCONNECTING,
- SMB_DIRECT_CS_DISCONNECTED,
-};
-
struct smb_direct_transport {
struct ksmbd_transport transport;
- enum smb_direct_status status;
+ struct smbdirect_socket socket;
+
bool full_packet_received;
wait_queue_head_t wait_status;
- struct rdma_cm_id *cm_id;
- struct ib_cq *send_cq;
- struct ib_cq *recv_cq;
- struct ib_pd *pd;
- struct ib_qp *qp;
-
int max_send_size;
int max_recv_size;
int max_fragmented_send_size;
@@ -271,8 +260,10 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
static void put_recvmsg(struct smb_direct_transport *t,
struct smb_direct_recvmsg *recvmsg)
{
+ struct smbdirect_socket *sc = &t->socket;
+
if (likely(recvmsg->sge.length != 0)) {
- ib_dma_unmap_single(t->cm_id->device,
+ ib_dma_unmap_single(sc->ib.dev,
recvmsg->sge.addr,
recvmsg->sge.length,
DMA_FROM_DEVICE);
@@ -316,17 +307,20 @@ static void smb_direct_disconnect_rdma_work(struct work_struct *work)
struct smb_direct_transport *t =
container_of(work, struct smb_direct_transport,
disconnect_work);
+ struct smbdirect_socket *sc = &t->socket;
- if (t->status == SMB_DIRECT_CS_CONNECTED) {
- t->status = SMB_DIRECT_CS_DISCONNECTING;
- rdma_disconnect(t->cm_id);
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
+ rdma_disconnect(sc->rdma.cm_id);
}
}
static void
smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
{
- if (t->status == SMB_DIRECT_CS_CONNECTED)
+ struct smbdirect_socket *sc = &t->socket;
+
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED)
queue_work(smb_direct_wq, &t->disconnect_work);
}
@@ -334,8 +328,9 @@ static void smb_direct_send_immediate_work(struct work_struct *work)
{
struct smb_direct_transport *t = container_of(work,
struct smb_direct_transport, send_immediate_work);
+ struct smbdirect_socket *sc = &t->socket;
- if (t->status != SMB_DIRECT_CS_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return;
smb_direct_post_send_data(t, NULL, NULL, 0, 0);
@@ -344,19 +339,23 @@ static void smb_direct_send_immediate_work(struct work_struct *work)
static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
{
struct smb_direct_transport *t;
+ struct smbdirect_socket *sc;
struct ksmbd_conn *conn;
t = kzalloc(sizeof(*t), KSMBD_DEFAULT_GFP);
if (!t)
return NULL;
+ sc = &t->socket;
- t->cm_id = cm_id;
+ sc->rdma.cm_id = cm_id;
cm_id->context = t;
t->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH;
t->responder_resources = 1;
- t->status = SMB_DIRECT_CS_NEW;
+ sc->ib.dev = sc->rdma.cm_id->device;
+
+ sc->status = SMBDIRECT_SOCKET_CREATED;
init_waitqueue_head(&t->wait_status);
spin_lock_init(&t->reassembly_queue_lock);
@@ -400,6 +399,7 @@ static void smb_direct_free_transport(struct ksmbd_transport *kt)
static void free_transport(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
struct smb_direct_recvmsg *recvmsg;
wake_up_interruptible(&t->wait_send_credits);
@@ -412,11 +412,11 @@ static void free_transport(struct smb_direct_transport *t)
disable_work_sync(&t->post_recv_credits_work);
disable_work_sync(&t->send_immediate_work);
- if (t->qp) {
- ib_drain_qp(t->qp);
- ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
- t->qp = NULL;
- rdma_destroy_qp(t->cm_id);
+ if (sc->ib.qp) {
+ ib_drain_qp(sc->ib.qp);
+ ib_mr_pool_destroy(sc->ib.qp, &sc->ib.qp->rdma_mrs);
+ sc->ib.qp = NULL;
+ rdma_destroy_qp(sc->rdma.cm_id);
}
ksmbd_debug(RDMA, "drain the reassembly queue\n");
@@ -433,14 +433,14 @@ static void free_transport(struct smb_direct_transport *t)
} while (recvmsg);
t->reassembly_data_length = 0;
- if (t->send_cq)
- ib_free_cq(t->send_cq);
- if (t->recv_cq)
- ib_free_cq(t->recv_cq);
- if (t->pd)
- ib_dealloc_pd(t->pd);
- if (t->cm_id)
- rdma_destroy_id(t->cm_id);
+ if (sc->ib.send_cq)
+ ib_free_cq(sc->ib.send_cq);
+ if (sc->ib.recv_cq)
+ ib_free_cq(sc->ib.recv_cq);
+ if (sc->ib.pd)
+ ib_dealloc_pd(sc->ib.pd);
+ if (sc->rdma.cm_id)
+ rdma_destroy_id(sc->rdma.cm_id);
smb_direct_destroy_pools(t);
ksmbd_conn_free(KSMBD_TRANS(t)->conn);
@@ -463,14 +463,15 @@ static struct smb_direct_sendmsg
static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
struct smb_direct_sendmsg *msg)
{
+ struct smbdirect_socket *sc = &t->socket;
int i;
if (msg->num_sge > 0) {
- ib_dma_unmap_single(t->cm_id->device,
+ ib_dma_unmap_single(sc->ib.dev,
msg->sge[0].addr, msg->sge[0].length,
DMA_TO_DEVICE);
for (i = 1; i < msg->num_sge; i++)
- ib_dma_unmap_page(t->cm_id->device,
+ ib_dma_unmap_page(sc->ib.dev,
msg->sge[i].addr, msg->sge[i].length,
DMA_TO_DEVICE);
}
@@ -525,9 +526,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct smb_direct_recvmsg *recvmsg;
struct smb_direct_transport *t;
+ struct smbdirect_socket *sc;
recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
t = recvmsg->transport;
+ sc = &t->socket;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
put_recvmsg(t, recvmsg);
@@ -556,7 +559,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
t->negotiation_requested = true;
t->full_packet_received = true;
- t->status = SMB_DIRECT_CS_CONNECTED;
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
enqueue_reassembly(t, recvmsg, 0);
wake_up_interruptible(&t->wait_status);
return;
@@ -647,17 +650,18 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
static int smb_direct_post_recv(struct smb_direct_transport *t,
struct smb_direct_recvmsg *recvmsg)
{
+ struct smbdirect_socket *sc = &t->socket;
struct ib_recv_wr wr;
int ret;
- recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
+ recvmsg->sge.addr = ib_dma_map_single(sc->ib.dev,
recvmsg->packet, t->max_recv_size,
DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
+ ret = ib_dma_mapping_error(sc->ib.dev, recvmsg->sge.addr);
if (ret)
return ret;
recvmsg->sge.length = t->max_recv_size;
- recvmsg->sge.lkey = t->pd->local_dma_lkey;
+ recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
recvmsg->cqe.done = recv_done;
wr.wr_cqe = &recvmsg->cqe;
@@ -665,10 +669,10 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
wr.sg_list = &recvmsg->sge;
wr.num_sge = 1;
- ret = ib_post_recv(t->qp, &wr, NULL);
+ ret = ib_post_recv(sc->ib.qp, &wr, NULL);
if (ret) {
pr_err("Can't post recv: %d\n", ret);
- ib_dma_unmap_single(t->cm_id->device,
+ ib_dma_unmap_single(sc->ib.dev,
recvmsg->sge.addr, recvmsg->sge.length,
DMA_FROM_DEVICE);
recvmsg->sge.length = 0;
@@ -687,9 +691,10 @@ static int smb_direct_read(struct ksmbd_transport *t, char *buf,
u32 data_length, remaining_data_length, data_offset;
int rc;
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smbdirect_socket *sc = &st->socket;
again:
- if (st->status != SMB_DIRECT_CS_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
pr_err("disconnected\n");
return -ENOTCONN;
}
@@ -798,7 +803,7 @@ read_rfc1002_done:
ksmbd_debug(RDMA, "wait_event on more data\n");
rc = wait_event_interruptible(st->wait_reassembly_queue,
st->reassembly_data_length >= size ||
- st->status != SMB_DIRECT_CS_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc)
return -EINTR;
@@ -900,10 +905,11 @@ static int manage_credits_prior_sending(struct smb_direct_transport *t)
static int smb_direct_post_send(struct smb_direct_transport *t,
struct ib_send_wr *wr)
{
+ struct smbdirect_socket *sc = &t->socket;
int ret;
atomic_inc(&t->send_pending);
- ret = ib_post_send(t->qp, wr, NULL);
+ ret = ib_post_send(sc->ib.qp, wr, NULL);
if (ret) {
pr_err("failed to post send: %d\n", ret);
if (atomic_dec_and_test(&t->send_pending))
@@ -968,6 +974,7 @@ static int wait_for_credits(struct smb_direct_transport *t,
wait_queue_head_t *waitq, atomic_t *total_credits,
int needed)
{
+ struct smbdirect_socket *sc = &t->socket;
int ret;
do {
@@ -977,9 +984,9 @@ static int wait_for_credits(struct smb_direct_transport *t,
atomic_add(needed, total_credits);
ret = wait_event_interruptible(*waitq,
atomic_read(total_credits) >= needed ||
- t->status != SMB_DIRECT_CS_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
- if (t->status != SMB_DIRECT_CS_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -ENOTCONN;
else if (ret < 0)
return ret;
@@ -1017,6 +1024,7 @@ static int smb_direct_create_header(struct smb_direct_transport *t,
int size, int remaining_data_length,
struct smb_direct_sendmsg **sendmsg_out)
{
+ struct smbdirect_socket *sc = &t->socket;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_data_transfer *packet;
int header_length;
@@ -1056,11 +1064,11 @@ static int smb_direct_create_header(struct smb_direct_transport *t,
header_length =
offsetof(struct smbdirect_data_transfer, padding);
- sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+ sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
(void *)packet,
header_length,
DMA_TO_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+ ret = ib_dma_mapping_error(sc->ib.dev, sendmsg->sge[0].addr);
if (ret) {
smb_direct_free_sendmsg(t, sendmsg);
return ret;
@@ -1068,7 +1076,7 @@ static int smb_direct_create_header(struct smb_direct_transport *t,
sendmsg->num_sge = 1;
sendmsg->sge[0].length = header_length;
- sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+ sendmsg->sge[0].lkey = sc->ib.pd->local_dma_lkey;
*sendmsg_out = sendmsg;
return 0;
@@ -1122,10 +1130,11 @@ static int post_sendmsg(struct smb_direct_transport *t,
struct smb_direct_send_ctx *send_ctx,
struct smb_direct_sendmsg *msg)
{
+ struct smbdirect_socket *sc = &t->socket;
int i;
for (i = 0; i < msg->num_sge; i++)
- ib_dma_sync_single_for_device(t->cm_id->device,
+ ib_dma_sync_single_for_device(sc->ib.dev,
msg->sge[i].addr, msg->sge[i].length,
DMA_TO_DEVICE);
@@ -1161,6 +1170,7 @@ static int smb_direct_post_send_data(struct smb_direct_transport *t,
struct kvec *iov, int niov,
int remaining_data_length)
{
+ struct smbdirect_socket *sc = &t->socket;
int i, j, ret;
struct smb_direct_sendmsg *msg;
int data_length;
@@ -1186,7 +1196,7 @@ static int smb_direct_post_send_data(struct smb_direct_transport *t,
int sg_cnt;
sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
- sg_cnt = get_mapped_sg_list(t->cm_id->device,
+ sg_cnt = get_mapped_sg_list(sc->ib.dev,
iov[i].iov_base, iov[i].iov_len,
sg, SMB_DIRECT_MAX_SEND_SGES - 1,
DMA_TO_DEVICE);
@@ -1197,7 +1207,7 @@ static int smb_direct_post_send_data(struct smb_direct_transport *t,
} else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
pr_err("buffer not fitted into sges\n");
ret = -E2BIG;
- ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
+ ib_dma_unmap_sg(sc->ib.dev, sg, sg_cnt,
DMA_TO_DEVICE);
goto err;
}
@@ -1206,7 +1216,7 @@ static int smb_direct_post_send_data(struct smb_direct_transport *t,
sge = &msg->sge[msg->num_sge];
sge->addr = sg_dma_address(&sg[j]);
sge->length = sg_dma_len(&sg[j]);
- sge->lkey = t->pd->local_dma_lkey;
+ sge->lkey = sc->ib.pd->local_dma_lkey;
msg->num_sge++;
}
}
@@ -1226,6 +1236,7 @@ static int smb_direct_writev(struct ksmbd_transport *t,
bool need_invalidate, unsigned int remote_key)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smbdirect_socket *sc = &st->socket;
size_t remaining_data_length;
size_t iov_idx;
size_t iov_ofs;
@@ -1235,7 +1246,7 @@ static int smb_direct_writev(struct ksmbd_transport *t,
struct smb_direct_send_ctx send_ctx;
int error = 0;
- if (st->status != SMB_DIRECT_CS_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -ENOTCONN;
//FIXME: skip RFC1002 header..
@@ -1367,7 +1378,9 @@ static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
struct smb_direct_rdma_rw_msg *msg,
enum dma_data_direction dir)
{
- rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+ struct smbdirect_socket *sc = &t->socket;
+
+ rdma_rw_ctx_destroy(&msg->rw_ctx, sc->ib.qp, sc->ib.qp->port,
msg->sgt.sgl, msg->sgt.nents, dir);
sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
kfree(msg);
@@ -1407,6 +1420,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
unsigned int desc_len,
bool is_read)
{
+ struct smbdirect_socket *sc = &t->socket;
struct smb_direct_rdma_rw_msg *msg, *next_msg;
int i, ret;
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1416,7 +1430,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
int credits_needed;
unsigned int desc_buf_len, desc_num = 0;
- if (t->status != SMB_DIRECT_CS_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -ENOTCONN;
if (buf_len > t->max_rdma_rw_size)
@@ -1486,7 +1500,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
goto out;
}
- ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
+ ret = rdma_rw_ctx_init(&msg->rw_ctx, sc->ib.qp, sc->ib.qp->port,
msg->sgt.sgl,
get_buf_page_count(desc_buf, desc_buf_len),
0,
@@ -1507,11 +1521,11 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
/* concatenate work requests of rdma_rw_ctxs */
first_wr = NULL;
list_for_each_entry_reverse(msg, &msg_list, list) {
- first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
+ first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, sc->ib.qp, sc->ib.qp->port,
&msg->cqe, first_wr);
}
- ret = ib_post_send(t->qp, first_wr, NULL);
+ ret = ib_post_send(sc->ib.qp, first_wr, NULL);
if (ret) {
pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
goto out;
@@ -1552,20 +1566,22 @@ static int smb_direct_rdma_read(struct ksmbd_transport *t,
static void smb_direct_disconnect(struct ksmbd_transport *t)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smbdirect_socket *sc = &st->socket;
- ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
+ ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", sc->rdma.cm_id);
smb_direct_disconnect_rdma_work(&st->disconnect_work);
wait_event_interruptible(st->wait_status,
- st->status == SMB_DIRECT_CS_DISCONNECTED);
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
free_transport(st);
}
static void smb_direct_shutdown(struct ksmbd_transport *t)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smbdirect_socket *sc = &st->socket;
- ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
+ ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", sc->rdma.cm_id);
smb_direct_disconnect_rdma_work(&st->disconnect_work);
}
@@ -1574,28 +1590,29 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct smb_direct_transport *t = cm_id->context;
+ struct smbdirect_socket *sc = &t->socket;
ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
cm_id, rdma_event_msg(event->event), event->event);
switch (event->event) {
case RDMA_CM_EVENT_ESTABLISHED: {
- t->status = SMB_DIRECT_CS_CONNECTED;
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
wake_up_interruptible(&t->wait_status);
break;
}
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_DISCONNECTED: {
- ib_drain_qp(t->qp);
+ ib_drain_qp(sc->ib.qp);
- t->status = SMB_DIRECT_CS_DISCONNECTED;
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
wake_up_interruptible(&t->wait_status);
wake_up_interruptible(&t->wait_reassembly_queue);
wake_up(&t->wait_send_credits);
break;
}
case RDMA_CM_EVENT_CONNECT_ERROR: {
- t->status = SMB_DIRECT_CS_DISCONNECTED;
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
wake_up_interruptible(&t->wait_status);
break;
}
@@ -1611,9 +1628,10 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
static void smb_direct_qpair_handler(struct ib_event *event, void *context)
{
struct smb_direct_transport *t = context;
+ struct smbdirect_socket *sc = &t->socket;
ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
- t->cm_id, ib_event_msg(event->event), event->event);
+ sc->rdma.cm_id, ib_event_msg(event->event), event->event);
switch (event->event) {
case IB_EVENT_CQ_ERR:
@@ -1628,6 +1646,7 @@ static void smb_direct_qpair_handler(struct ib_event *event, void *context)
static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
int failed)
{
+ struct smbdirect_socket *sc = &t->socket;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_negotiate_resp *resp;
int ret;
@@ -1658,10 +1677,10 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
cpu_to_le32(t->max_fragmented_recv_size);
}
- sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+ sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
(void *)resp, sizeof(*resp),
DMA_TO_DEVICE);
- ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+ ret = ib_dma_mapping_error(sc->ib.dev, sendmsg->sge[0].addr);
if (ret) {
smb_direct_free_sendmsg(t, sendmsg);
return ret;
@@ -1669,7 +1688,7 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
sendmsg->num_sge = 1;
sendmsg->sge[0].length = sizeof(*resp);
- sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+ sendmsg->sge[0].lkey = sc->ib.pd->local_dma_lkey;
ret = post_sendmsg(t, NULL, sendmsg);
if (ret) {
@@ -1684,6 +1703,7 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
static int smb_direct_accept_client(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
struct rdma_conn_param conn_param;
__be32 ird_ord_hdr[2];
int ret;
@@ -1710,7 +1730,7 @@ static int smb_direct_accept_client(struct smb_direct_transport *t)
conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
conn_param.flow_control = 0;
- ret = rdma_accept(t->cm_id, &conn_param);
+ ret = rdma_accept(sc->rdma.cm_id, &conn_param);
if (ret) {
pr_err("error at rdma_accept: %d\n", ret);
return ret;
@@ -1750,15 +1770,18 @@ out_err:
static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
+
return min_t(unsigned int,
- t->cm_id->device->attrs.max_fast_reg_page_list_len,
+ sc->ib.dev->attrs.max_fast_reg_page_list_len,
256);
}
static int smb_direct_init_params(struct smb_direct_transport *t,
struct ib_qp_cap *cap)
{
- struct ib_device *device = t->cm_id->device;
+ struct smbdirect_socket *sc = &t->socket;
+ struct ib_device *device = sc->ib.dev;
int max_send_sges, max_rw_wrs, max_send_wrs;
unsigned int max_sge_per_wr, wrs_per_credit;
@@ -1923,34 +1946,35 @@ err:
static int smb_direct_create_qpair(struct smb_direct_transport *t,
struct ib_qp_cap *cap)
{
+ struct smbdirect_socket *sc = &t->socket;
int ret;
struct ib_qp_init_attr qp_attr;
int pages_per_rw;
- t->pd = ib_alloc_pd(t->cm_id->device, 0);
- if (IS_ERR(t->pd)) {
+ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
+ if (IS_ERR(sc->ib.pd)) {
pr_err("Can't create RDMA PD\n");
- ret = PTR_ERR(t->pd);
- t->pd = NULL;
+ ret = PTR_ERR(sc->ib.pd);
+ sc->ib.pd = NULL;
return ret;
}
- t->send_cq = ib_alloc_cq(t->cm_id->device, t,
+ sc->ib.send_cq = ib_alloc_cq(sc->ib.dev, t,
smb_direct_send_credit_target + cap->max_rdma_ctxs,
0, IB_POLL_WORKQUEUE);
- if (IS_ERR(t->send_cq)) {
+ if (IS_ERR(sc->ib.send_cq)) {
pr_err("Can't create RDMA send CQ\n");
- ret = PTR_ERR(t->send_cq);
- t->send_cq = NULL;
+ ret = PTR_ERR(sc->ib.send_cq);
+ sc->ib.send_cq = NULL;
goto err;
}
- t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
+ sc->ib.recv_cq = ib_alloc_cq(sc->ib.dev, t,
t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
- if (IS_ERR(t->recv_cq)) {
+ if (IS_ERR(sc->ib.recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
- ret = PTR_ERR(t->recv_cq);
- t->recv_cq = NULL;
+ ret = PTR_ERR(sc->ib.recv_cq);
+ sc->ib.recv_cq = NULL;
goto err;
}
@@ -1960,22 +1984,22 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
qp_attr.cap = *cap;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
- qp_attr.send_cq = t->send_cq;
- qp_attr.recv_cq = t->recv_cq;
+ qp_attr.send_cq = sc->ib.send_cq;
+ qp_attr.recv_cq = sc->ib.recv_cq;
qp_attr.port_num = ~0;
- ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
+ ret = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
if (ret) {
pr_err("Can't create RDMA QP: %d\n", ret);
goto err;
}
- t->qp = t->cm_id->qp;
- t->cm_id->event_handler = smb_direct_cm_handler;
+ sc->ib.qp = sc->rdma.cm_id->qp;
+ sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
- if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
- ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
+ if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) {
+ ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs,
t->max_rw_credits, IB_MR_TYPE_MEM_REG,
t->pages_per_rw_credit, 0);
if (ret) {
@@ -1987,21 +2011,21 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
return 0;
err:
- if (t->qp) {
- t->qp = NULL;
- rdma_destroy_qp(t->cm_id);
+ if (sc->ib.qp) {
+ sc->ib.qp = NULL;
+ rdma_destroy_qp(sc->rdma.cm_id);
}
- if (t->recv_cq) {
- ib_destroy_cq(t->recv_cq);
- t->recv_cq = NULL;
+ if (sc->ib.recv_cq) {
+ ib_destroy_cq(sc->ib.recv_cq);
+ sc->ib.recv_cq = NULL;
}
- if (t->send_cq) {
- ib_destroy_cq(t->send_cq);
- t->send_cq = NULL;
+ if (sc->ib.send_cq) {
+ ib_destroy_cq(sc->ib.send_cq);
+ sc->ib.send_cq = NULL;
}
- if (t->pd) {
- ib_dealloc_pd(t->pd);
- t->pd = NULL;
+ if (sc->ib.pd) {
+ ib_dealloc_pd(sc->ib.pd);
+ sc->ib.pd = NULL;
}
return ret;
}
@@ -2009,6 +2033,7 @@ err:
static int smb_direct_prepare(struct ksmbd_transport *t)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smbdirect_socket *sc = &st->socket;
struct smb_direct_recvmsg *recvmsg;
struct smbdirect_negotiate_req *req;
int ret;
@@ -2016,9 +2041,9 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
ret = wait_event_interruptible_timeout(st->wait_status,
st->negotiation_requested ||
- st->status == SMB_DIRECT_CS_DISCONNECTED,
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED,
SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
- if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
+ if (ret <= 0 || sc->status == SMBDIRECT_SOCKET_DISCONNECTED)
return ret < 0 ? ret : -ETIMEDOUT;
recvmsg = get_first_reassembly(st);