aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Metzmacher <metze@samba.org>2025-08-06 19:35:57 +0200
committerSteve French <stfrench@microsoft.com>2025-09-28 18:29:51 -0500
commit177368b9924314bde7d2ea6dc93de0d9ba728b61 (patch)
treed5b99fe97add0eef57685beb1d9b5569746e27e3
parentsmb: server: make use of common smbdirect_socket (diff)
downloadlinux-177368b9924314bde7d2ea6dc93de0d9ba728b61.tar.gz
linux-177368b9924314bde7d2ea6dc93de0d9ba728b61.zip
smb: server: make use of common smbdirect_socket_parameters
Cc: Steve French <smfrench@gmail.com> Cc: Tom Talpey <tom@talpey.com> Cc: Long Li <longli@microsoft.com> Cc: Namjae Jeon <linkinjeon@kernel.org> Cc: Hyunchul Lee <hyc.lee@gmail.com> Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com> Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com>
-rw-r--r--fs/smb/server/transport_rdma.c93
1 files changed, 49 insertions, 44 deletions
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index e2d8ac087344..499bb2c0bd79 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -98,12 +98,6 @@ struct smb_direct_transport {
bool full_packet_received;
wait_queue_head_t wait_status;
- int max_send_size;
- int max_recv_size;
- int max_fragmented_send_size;
- int max_fragmented_recv_size;
- int max_rdma_rw_size;
-
spinlock_t reassembly_queue_lock;
struct list_head reassembly_queue;
int reassembly_data_length;
@@ -114,13 +108,11 @@ struct smb_direct_transport {
spinlock_t receive_credit_lock;
int recv_credits;
int count_avail_recvmsg;
- int recv_credit_max;
int recv_credit_target;
spinlock_t recvmsg_queue_lock;
struct list_head recvmsg_queue;
- int send_credit_target;
atomic_t send_credits;
spinlock_t lock_new_recv_credits;
int new_recv_credits;
@@ -527,10 +519,12 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct smb_direct_recvmsg *recvmsg;
struct smb_direct_transport *t;
struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
t = recvmsg->transport;
sc = &t->socket;
+ sp = &sc->parameters;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
put_recvmsg(t, recvmsg);
@@ -585,10 +579,10 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
smb_direct_disconnect_rdma_connection(t);
return;
}
- if (remaining_data_length > t->max_fragmented_recv_size ||
- data_length > t->max_fragmented_recv_size ||
+ if (remaining_data_length > sp->max_fragmented_recv_size ||
+ data_length > sp->max_fragmented_recv_size ||
(u64)remaining_data_length + (u64)data_length >
- (u64)t->max_fragmented_recv_size) {
+ (u64)sp->max_fragmented_recv_size) {
put_recvmsg(t, recvmsg);
smb_direct_disconnect_rdma_connection(t);
return;
@@ -651,16 +645,18 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
struct smb_direct_recvmsg *recvmsg)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_recv_wr wr;
int ret;
recvmsg->sge.addr = ib_dma_map_single(sc->ib.dev,
- recvmsg->packet, t->max_recv_size,
+ recvmsg->packet,
+ sp->max_recv_size,
DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(sc->ib.dev, recvmsg->sge.addr);
if (ret)
return ret;
- recvmsg->sge.length = t->max_recv_size;
+ recvmsg->sge.length = sp->max_recv_size;
recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
recvmsg->cqe.done = recv_done;
@@ -1025,6 +1021,7 @@ static int smb_direct_create_header(struct smb_direct_transport *t,
struct smb_direct_sendmsg **sendmsg_out)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_data_transfer *packet;
int header_length;
@@ -1036,7 +1033,7 @@ static int smb_direct_create_header(struct smb_direct_transport *t,
/* Fill in the packet header */
packet = (struct smbdirect_data_transfer *)sendmsg->packet;
- packet->credits_requested = cpu_to_le16(t->send_credit_target);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
packet->flags = 0;
@@ -1237,10 +1234,11 @@ static int smb_direct_writev(struct ksmbd_transport *t,
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
struct smbdirect_socket *sc = &st->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
size_t remaining_data_length;
size_t iov_idx;
size_t iov_ofs;
- size_t max_iov_size = st->max_send_size -
+ size_t max_iov_size = sp->max_send_size -
sizeof(struct smbdirect_data_transfer);
int ret;
struct smb_direct_send_ctx send_ctx;
@@ -1421,6 +1419,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
bool is_read)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_rdma_rw_msg *msg, *next_msg;
int i, ret;
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1433,7 +1432,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -ENOTCONN;
- if (buf_len > t->max_rdma_rw_size)
+ if (buf_len > sp->max_read_write_size)
return -EINVAL;
/* calculate needed credits */
@@ -1647,6 +1646,7 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
int failed)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_negotiate_resp *resp;
int ret;
@@ -1668,13 +1668,13 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
resp->negotiated_version = SMB_DIRECT_VERSION_LE;
resp->reserved = 0;
resp->credits_requested =
- cpu_to_le16(t->send_credit_target);
+ cpu_to_le16(sp->send_credit_target);
resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
- resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
- resp->preferred_send_size = cpu_to_le32(t->max_send_size);
- resp->max_receive_size = cpu_to_le32(t->max_recv_size);
+ resp->max_readwrite_size = cpu_to_le32(sp->max_read_write_size);
+ resp->preferred_send_size = cpu_to_le32(sp->max_send_size);
+ resp->max_receive_size = cpu_to_le32(sp->max_recv_size);
resp->max_fragmented_size =
- cpu_to_le32(t->max_fragmented_recv_size);
+ cpu_to_le32(sp->max_fragmented_recv_size);
}
sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
@@ -1781,6 +1781,7 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
struct ib_qp_cap *cap)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_device *device = sc->ib.dev;
int max_send_sges, max_rw_wrs, max_send_wrs;
unsigned int max_sge_per_wr, wrs_per_credit;
@@ -1788,10 +1789,10 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
* SMB2 response could be mapped.
*/
- t->max_send_size = smb_direct_max_send_size;
- max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
+ sp->max_send_size = smb_direct_max_send_size;
+ max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("max_send_size %d is too large\n", t->max_send_size);
+ pr_err("max_send_size %d is too large\n", sp->max_send_size);
return -EINVAL;
}
@@ -1802,9 +1803,9 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
* are needed for MR registration, RDMA R/W, local & remote
* MR invalidation.
*/
- t->max_rdma_rw_size = smb_direct_max_read_write_size;
+ sp->max_read_write_size = smb_direct_max_read_write_size;
t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
- t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
+ t->max_rw_credits = DIV_ROUND_UP(sp->max_read_write_size,
(t->pages_per_rw_credit - 1) *
PAGE_SIZE);
@@ -1850,20 +1851,20 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
t->recv_credits = 0;
t->count_avail_recvmsg = 0;
- t->recv_credit_max = smb_direct_receive_credit_max;
+ sp->recv_credit_max = smb_direct_receive_credit_max;
t->recv_credit_target = 10;
t->new_recv_credits = 0;
- t->send_credit_target = smb_direct_send_credit_target;
+ sp->send_credit_target = smb_direct_send_credit_target;
atomic_set(&t->send_credits, 0);
atomic_set(&t->rw_credits, t->max_rw_credits);
- t->max_send_size = smb_direct_max_send_size;
- t->max_recv_size = smb_direct_max_receive_size;
- t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
+ sp->max_send_size = smb_direct_max_send_size;
+ sp->max_recv_size = smb_direct_max_receive_size;
+ sp->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
cap->max_send_wr = max_send_wrs;
- cap->max_recv_wr = t->recv_credit_max;
+ cap->max_recv_wr = sp->recv_credit_max;
cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
cap->max_inline_data = 0;
@@ -1893,6 +1894,8 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
static int smb_direct_create_pools(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
char name[80];
int i;
struct smb_direct_recvmsg *recvmsg;
@@ -1905,7 +1908,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
if (!t->sendmsg_cache)
return -ENOMEM;
- t->sendmsg_mempool = mempool_create(t->send_credit_target,
+ t->sendmsg_mempool = mempool_create(sp->send_credit_target,
mempool_alloc_slab, mempool_free_slab,
t->sendmsg_cache);
if (!t->sendmsg_mempool)
@@ -1914,20 +1917,20 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
t->recvmsg_cache = kmem_cache_create(name,
sizeof(struct smb_direct_recvmsg) +
- t->max_recv_size,
+ sp->max_recv_size,
0, SLAB_HWCACHE_ALIGN, NULL);
if (!t->recvmsg_cache)
goto err;
t->recvmsg_mempool =
- mempool_create(t->recv_credit_max, mempool_alloc_slab,
+ mempool_create(sp->recv_credit_max, mempool_alloc_slab,
mempool_free_slab, t->recvmsg_cache);
if (!t->recvmsg_mempool)
goto err;
INIT_LIST_HEAD(&t->recvmsg_queue);
- for (i = 0; i < t->recv_credit_max; i++) {
+ for (i = 0; i < sp->recv_credit_max; i++) {
recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP);
if (!recvmsg)
goto err;
@@ -1935,7 +1938,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
recvmsg->sge.length = 0;
list_add(&recvmsg->list, &t->recvmsg_queue);
}
- t->count_avail_recvmsg = t->recv_credit_max;
+ t->count_avail_recvmsg = sp->recv_credit_max;
return 0;
err:
@@ -1947,6 +1950,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
struct ib_qp_cap *cap)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int ret;
struct ib_qp_init_attr qp_attr;
int pages_per_rw;
@@ -1970,7 +1974,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
}
sc->ib.recv_cq = ib_alloc_cq(sc->ib.dev, t,
- t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
+ sp->recv_credit_max, 0, IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
ret = PTR_ERR(sc->ib.recv_cq);
@@ -1997,7 +2001,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
sc->ib.qp = sc->rdma.cm_id->qp;
sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
- pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+ pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1;
if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) {
ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs,
t->max_rw_credits, IB_MR_TYPE_MEM_REG,
@@ -2034,6 +2038,7 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
struct smbdirect_socket *sc = &st->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_recvmsg *recvmsg;
struct smbdirect_negotiate_req *req;
int ret;
@@ -2055,14 +2060,14 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
goto out;
req = (struct smbdirect_negotiate_req *)recvmsg->packet;
- st->max_recv_size = min_t(int, st->max_recv_size,
+ sp->max_recv_size = min_t(int, sp->max_recv_size,
le32_to_cpu(req->preferred_send_size));
- st->max_send_size = min_t(int, st->max_send_size,
+ sp->max_send_size = min_t(int, sp->max_send_size,
le32_to_cpu(req->max_receive_size));
- st->max_fragmented_send_size =
+ sp->max_fragmented_send_size =
le32_to_cpu(req->max_fragmented_size);
- st->max_fragmented_recv_size =
- (st->recv_credit_max * st->max_recv_size) / 2;
+ sp->max_fragmented_recv_size =
+ (sp->recv_credit_max * sp->max_recv_size) / 2;
ret = smb_direct_send_negotiate_response(st, ret);
out: