~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/cifs/transport.c

Version: ~ [ linux-5.15-rc5 ] ~ [ linux-5.14.11 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.72 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.152 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.210 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.250 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.286 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.288 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *   fs/cifs/transport.c
  3  *
  4  *   Copyright (C) International Business Machines  Corp., 2002,2008
  5  *   Author(s): Steve French (sfrench@us.ibm.com)
  6  *   Jeremy Allison (jra@samba.org) 2006.
  7  *
  8  *   This library is free software; you can redistribute it and/or modify
  9  *   it under the terms of the GNU Lesser General Public License as published
 10  *   by the Free Software Foundation; either version 2.1 of the License, or
 11  *   (at your option) any later version.
 12  *
 13  *   This library is distributed in the hope that it will be useful,
 14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 16  *   the GNU Lesser General Public License for more details.
 17  *
 18  *   You should have received a copy of the GNU Lesser General Public License
 19  *   along with this library; if not, write to the Free Software
 20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 21  */
 22 
 23 #include <linux/fs.h>
 24 #include <linux/list.h>
 25 #include <linux/gfp.h>
 26 #include <linux/wait.h>
 27 #include <linux/net.h>
 28 #include <linux/delay.h>
 29 #include <linux/freezer.h>
 30 #include <linux/tcp.h>
 31 #include <linux/bvec.h>
 32 #include <linux/highmem.h>
 33 #include <linux/uaccess.h>
 34 #include <asm/processor.h>
 35 #include <linux/mempool.h>
 36 #include "cifspdu.h"
 37 #include "cifsglob.h"
 38 #include "cifsproto.h"
 39 #include "cifs_debug.h"
 40 #include "smb2proto.h"
 41 #include "smbdirect.h"
 42 
 43 /* Max number of iovectors we can use off the stack when sending requests. */
 44 #define CIFS_MAX_IOV_SIZE 8
 45 
 46 void
 47 cifs_wake_up_task(struct mid_q_entry *mid)
 48 {
 49         wake_up_process(mid->callback_data);
 50 }
 51 
 52 struct mid_q_entry *
 53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 54 {
 55         struct mid_q_entry *temp;
 56 
 57         if (server == NULL) {
 58                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
 59                 return NULL;
 60         }
 61 
 62         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
 63         memset(temp, 0, sizeof(struct mid_q_entry));
 64         kref_init(&temp->refcount);
 65         temp->mid = get_mid(smb_buffer);
 66         temp->pid = current->pid;
 67         temp->command = cpu_to_le16(smb_buffer->Command);
 68         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
 69         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 70         /* when mid allocated can be before when sent */
 71         temp->when_alloc = jiffies;
 72         temp->server = server;
 73 
 74         /*
 75          * The default is for the mid to be synchronous, so the
 76          * default callback just wakes up the current task.
 77          */
 78         temp->callback = cifs_wake_up_task;
 79         temp->callback_data = current;
 80 
 81         atomic_inc(&midCount);
 82         temp->mid_state = MID_REQUEST_ALLOCATED;
 83         return temp;
 84 }
 85 
 86 static void _cifs_mid_q_entry_release(struct kref *refcount)
 87 {
 88         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
 89                                                refcount);
 90 
 91         mempool_free(mid, cifs_mid_poolp);
 92 }
 93 
 94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
 95 {
 96         spin_lock(&GlobalMid_Lock);
 97         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
 98         spin_unlock(&GlobalMid_Lock);
 99 }
100 
101 void
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105         __le16 command = midEntry->server->vals->lock_cmd;
106         unsigned long now;
107 #endif
108         midEntry->mid_state = MID_FREE;
109         atomic_dec(&midCount);
110         if (midEntry->large_buf)
111                 cifs_buf_release(midEntry->resp_buf);
112         else
113                 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115         now = jiffies;
116         /* commands taking longer than one second are indications that
117            something is wrong, unless it is quite a slow link or server */
118         if (time_after(now, midEntry->when_alloc + HZ)) {
119                 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
120                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
121                                midEntry->command, midEntry->mid);
122                         pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
123                                now - midEntry->when_alloc,
124                                now - midEntry->when_sent,
125                                now - midEntry->when_received);
126                 }
127         }
128 #endif
129         cifs_mid_q_entry_release(midEntry);
130 }
131 
132 void
133 cifs_delete_mid(struct mid_q_entry *mid)
134 {
135         spin_lock(&GlobalMid_Lock);
136         list_del(&mid->qhead);
137         spin_unlock(&GlobalMid_Lock);
138 
139         DeleteMidQEntry(mid);
140 }
141 
142 /*
143  * smb_send_kvec - send an array of kvecs to the server
144  * @server:     Server to send the data to
145  * @smb_msg:    Message to send
146  * @sent:       amount of data sent on socket is stored here
147  *
148  * Our basic "send data to server" function. Should be called with srv_mutex
149  * held. The caller is responsible for handling the results.
150  */
151 static int
152 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
153               size_t *sent)
154 {
155         int rc = 0;
156         int retries = 0;
157         struct socket *ssocket = server->ssocket;
158 
159         *sent = 0;
160 
161         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
162         smb_msg->msg_namelen = sizeof(struct sockaddr);
163         smb_msg->msg_control = NULL;
164         smb_msg->msg_controllen = 0;
165         if (server->noblocksnd)
166                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
167         else
168                 smb_msg->msg_flags = MSG_NOSIGNAL;
169 
170         while (msg_data_left(smb_msg)) {
171                 /*
172                  * If blocking send, we try 3 times, since each can block
173                  * for 5 seconds. For nonblocking  we have to try more
174                  * but wait increasing amounts of time allowing time for
175                  * socket to clear.  The overall time we wait in either
176                  * case to send on the socket is about 15 seconds.
177                  * Similarly we wait for 15 seconds for a response from
178                  * the server in SendReceive[2] for the server to send
179                  * a response back for most types of requests (except
180                  * SMB Write past end of file which can be slow, and
181                  * blocking lock operations). NFS waits slightly longer
182                  * than CIFS, but this can make it take longer for
183                  * nonresponsive servers to be detected and 15 seconds
184                  * is more than enough time for modern networks to
185                  * send a packet.  In most cases if we fail to send
186                  * after the retries we will kill the socket and
187                  * reconnect which may clear the network problem.
188                  */
189                 rc = sock_sendmsg(ssocket, smb_msg);
190                 if (rc == -EAGAIN) {
191                         retries++;
192                         if (retries >= 14 ||
193                             (!server->noblocksnd && (retries > 2))) {
194                                 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
195                                          ssocket);
196                                 return -EAGAIN;
197                         }
198                         msleep(1 << retries);
199                         continue;
200                 }
201 
202                 if (rc < 0)
203                         return rc;
204 
205                 if (rc == 0) {
206                         /* should never happen, letting socket clear before
207                            retrying is our only obvious option here */
208                         cifs_dbg(VFS, "tcp sent no data\n");
209                         msleep(500);
210                         continue;
211                 }
212 
213                 /* send was at least partially successful */
214                 *sent += rc;
215                 retries = 0; /* in case we get ENOSPC on the next send */
216         }
217         return 0;
218 }
219 
220 unsigned long
221 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
222 {
223         unsigned int i;
224         struct kvec *iov;
225         int nvec;
226         unsigned long buflen = 0;
227 
228         if (server->vals->header_preamble_size == 0 &&
229             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
230                 iov = &rqst->rq_iov[1];
231                 nvec = rqst->rq_nvec - 1;
232         } else {
233                 iov = rqst->rq_iov;
234                 nvec = rqst->rq_nvec;
235         }
236 
237         /* total up iov array first */
238         for (i = 0; i < nvec; i++)
239                 buflen += iov[i].iov_len;
240 
241         /*
242          * Add in the page array if there is one. The caller needs to make
243          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
244          * multiple pages ends at page boundary, rq_tailsz needs to be set to
245          * PAGE_SIZE.
246          */
247         if (rqst->rq_npages) {
248                 if (rqst->rq_npages == 1)
249                         buflen += rqst->rq_tailsz;
250                 else {
251                         /*
252                          * If there is more than one page, calculate the
253                          * buffer length based on rq_offset and rq_tailsz
254                          */
255                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
256                                         rqst->rq_offset;
257                         buflen += rqst->rq_tailsz;
258                 }
259         }
260 
261         return buflen;
262 }
263 
264 static int
265 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
266                 struct smb_rqst *rqst)
267 {
268         int rc = 0;
269         struct kvec *iov;
270         int n_vec;
271         unsigned int send_length = 0;
272         unsigned int i, j;
273         size_t total_len = 0, sent, size;
274         struct socket *ssocket = server->ssocket;
275         struct msghdr smb_msg;
276         int val = 1;
277         __be32 rfc1002_marker;
278 
279         if (cifs_rdma_enabled(server) && server->smbd_conn) {
280                 rc = smbd_send(server, rqst);
281                 goto smbd_done;
282         }
283         if (ssocket == NULL)
284                 return -ENOTSOCK;
285 
286         /* cork the socket */
287         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
288                                 (char *)&val, sizeof(val));
289 
290         for (j = 0; j < num_rqst; j++)
291                 send_length += smb_rqst_len(server, &rqst[j]);
292         rfc1002_marker = cpu_to_be32(send_length);
293 
294         /* Generate a rfc1002 marker for SMB2+ */
295         if (server->vals->header_preamble_size == 0) {
296                 struct kvec hiov = {
297                         .iov_base = &rfc1002_marker,
298                         .iov_len  = 4
299                 };
300                 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
301                               1, 4);
302                 rc = smb_send_kvec(server, &smb_msg, &sent);
303                 if (rc < 0)
304                         goto uncork;
305 
306                 total_len += sent;
307                 send_length += 4;
308         }
309 
310         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
311 
312         for (j = 0; j < num_rqst; j++) {
313                 iov = rqst[j].rq_iov;
314                 n_vec = rqst[j].rq_nvec;
315 
316                 size = 0;
317                 for (i = 0; i < n_vec; i++) {
318                         dump_smb(iov[i].iov_base, iov[i].iov_len);
319                         size += iov[i].iov_len;
320                 }
321 
322                 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
323                               iov, n_vec, size);
324 
325                 rc = smb_send_kvec(server, &smb_msg, &sent);
326                 if (rc < 0)
327                         goto uncork;
328 
329                 total_len += sent;
330 
331                 /* now walk the page array and send each page in it */
332                 for (i = 0; i < rqst[j].rq_npages; i++) {
333                         struct bio_vec bvec;
334 
335                         bvec.bv_page = rqst[j].rq_pages[i];
336                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
337                                              &bvec.bv_offset);
338 
339                         iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
340                                       &bvec, 1, bvec.bv_len);
341                         rc = smb_send_kvec(server, &smb_msg, &sent);
342                         if (rc < 0)
343                                 break;
344 
345                         total_len += sent;
346                 }
347         }
348 
349 uncork:
350         /* uncork it */
351         val = 0;
352         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
353                                 (char *)&val, sizeof(val));
354 
355         if ((total_len > 0) && (total_len != send_length)) {
356                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
357                          send_length, total_len);
358                 /*
359                  * If we have only sent part of an SMB then the next SMB could
360                  * be taken as the remainder of this one. We need to kill the
361                  * socket so the server throws away the partial SMB
362                  */
363                 server->tcpStatus = CifsNeedReconnect;
364         }
365 smbd_done:
366         if (rc < 0 && rc != -EINTR)
367                 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
368                          rc);
369         else
370                 rc = 0;
371 
372         return rc;
373 }
374 
375 static int
376 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
377 {
378         struct smb_rqst cur_rqst;
379         int rc;
380 
381         if (!(flags & CIFS_TRANSFORM_REQ))
382                 return __smb_send_rqst(server, 1, rqst);
383 
384         if (!server->ops->init_transform_rq ||
385             !server->ops->free_transform_rq) {
386                 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
387                 return -EIO;
388         }
389 
390         rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
391         if (rc)
392                 return rc;
393 
394         rc = __smb_send_rqst(server, 1, &cur_rqst);
395         server->ops->free_transform_rq(&cur_rqst);
396         return rc;
397 }
398 
399 int
400 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
401          unsigned int smb_buf_length)
402 {
403         struct kvec iov[2];
404         struct smb_rqst rqst = { .rq_iov = iov,
405                                  .rq_nvec = 2 };
406 
407         iov[0].iov_base = smb_buffer;
408         iov[0].iov_len = 4;
409         iov[1].iov_base = (char *)smb_buffer + 4;
410         iov[1].iov_len = smb_buf_length;
411 
412         return __smb_send_rqst(server, 1, &rqst);
413 }
414 
415 static int
416 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
417                       int *credits)
418 {
419         int rc;
420 
421         spin_lock(&server->req_lock);
422         if (timeout == CIFS_ASYNC_OP) {
423                 /* oplock breaks must not be held up */
424                 server->in_flight++;
425                 *credits -= 1;
426                 spin_unlock(&server->req_lock);
427                 return 0;
428         }
429 
430         while (1) {
431                 if (*credits <= 0) {
432                         spin_unlock(&server->req_lock);
433                         cifs_num_waiters_inc(server);
434                         rc = wait_event_killable(server->request_q,
435                                                  has_credits(server, credits));
436                         cifs_num_waiters_dec(server);
437                         if (rc)
438                                 return rc;
439                         spin_lock(&server->req_lock);
440                 } else {
441                         if (server->tcpStatus == CifsExiting) {
442                                 spin_unlock(&server->req_lock);
443                                 return -ENOENT;
444                         }
445 
446                         /*
447                          * Can not count locking commands against total
448                          * as they are allowed to block on server.
449                          */
450 
451                         /* update # of requests on the wire to server */
452                         if (timeout != CIFS_BLOCKING_OP) {
453                                 *credits -= 1;
454                                 server->in_flight++;
455                         }
456                         spin_unlock(&server->req_lock);
457                         break;
458                 }
459         }
460         return 0;
461 }
462 
463 static int
464 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
465                       const int optype)
466 {
467         int *val;
468 
469         val = server->ops->get_credits_field(server, optype);
470         /* Since an echo is already inflight, no need to wait to send another */
471         if (*val <= 0 && optype == CIFS_ECHO_OP)
472                 return -EAGAIN;
473         return wait_for_free_credits(server, timeout, val);
474 }
475 
476 int
477 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
478                       unsigned int *num, unsigned int *credits)
479 {
480         *num = size;
481         *credits = 0;
482         return 0;
483 }
484 
485 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
486                         struct mid_q_entry **ppmidQ)
487 {
488         if (ses->server->tcpStatus == CifsExiting) {
489                 return -ENOENT;
490         }
491 
492         if (ses->server->tcpStatus == CifsNeedReconnect) {
493                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
494                 return -EAGAIN;
495         }
496 
497         if (ses->status == CifsNew) {
498                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
499                         (in_buf->Command != SMB_COM_NEGOTIATE))
500                         return -EAGAIN;
501                 /* else ok - we are setting up session */
502         }
503 
504         if (ses->status == CifsExiting) {
505                 /* check if SMB session is bad because we are setting it up */
506                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
507                         return -EAGAIN;
508                 /* else ok - we are shutting down session */
509         }
510 
511         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
512         if (*ppmidQ == NULL)
513                 return -ENOMEM;
514         spin_lock(&GlobalMid_Lock);
515         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
516         spin_unlock(&GlobalMid_Lock);
517         return 0;
518 }
519 
520 static int
521 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
522 {
523         int error;
524 
525         error = wait_event_freezekillable_unsafe(server->response_q,
526                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
527         if (error < 0)
528                 return -ERESTARTSYS;
529 
530         return 0;
531 }
532 
533 struct mid_q_entry *
534 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
535 {
536         int rc;
537         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
538         struct mid_q_entry *mid;
539 
540         if (rqst->rq_iov[0].iov_len != 4 ||
541             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
542                 return ERR_PTR(-EIO);
543 
544         /* enable signing if server requires it */
545         if (server->sign)
546                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
547 
548         mid = AllocMidQEntry(hdr, server);
549         if (mid == NULL)
550                 return ERR_PTR(-ENOMEM);
551 
552         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
553         if (rc) {
554                 DeleteMidQEntry(mid);
555                 return ERR_PTR(rc);
556         }
557 
558         return mid;
559 }
560 
561 /*
562  * Send a SMB request and set the callback function in the mid to handle
563  * the result. Caller is responsible for dealing with timeouts.
564  */
565 int
566 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
567                 mid_receive_t *receive, mid_callback_t *callback,
568                 mid_handle_t *handle, void *cbdata, const int flags)
569 {
570         int rc, timeout, optype;
571         struct mid_q_entry *mid;
572         unsigned int credits = 0;
573 
574         timeout = flags & CIFS_TIMEOUT_MASK;
575         optype = flags & CIFS_OP_MASK;
576 
577         if ((flags & CIFS_HAS_CREDITS) == 0) {
578                 rc = wait_for_free_request(server, timeout, optype);
579                 if (rc)
580                         return rc;
581                 credits = 1;
582         }
583 
584         mutex_lock(&server->srv_mutex);
585         mid = server->ops->setup_async_request(server, rqst);
586         if (IS_ERR(mid)) {
587                 mutex_unlock(&server->srv_mutex);
588                 add_credits_and_wake_if(server, credits, optype);
589                 return PTR_ERR(mid);
590         }
591 
592         mid->receive = receive;
593         mid->callback = callback;
594         mid->callback_data = cbdata;
595         mid->handle = handle;
596         mid->mid_state = MID_REQUEST_SUBMITTED;
597 
598         /* put it on the pending_mid_q */
599         spin_lock(&GlobalMid_Lock);
600         list_add_tail(&mid->qhead, &server->pending_mid_q);
601         spin_unlock(&GlobalMid_Lock);
602 
603         /*
604          * Need to store the time in mid before calling I/O. For call_async,
605          * I/O response may come back and free the mid entry on another thread.
606          */
607         cifs_save_when_sent(mid);
608         cifs_in_send_inc(server);
609         rc = smb_send_rqst(server, rqst, flags);
610         cifs_in_send_dec(server);
611 
612         if (rc < 0) {
613                 server->sequence_number -= 2;
614                 cifs_delete_mid(mid);
615         }
616 
617         mutex_unlock(&server->srv_mutex);
618 
619         if (rc == 0)
620                 return 0;
621 
622         add_credits_and_wake_if(server, credits, optype);
623         return rc;
624 }
625 
626 /*
627  *
628  * Send an SMB Request.  No response info (other than return code)
629  * needs to be parsed.
630  *
631  * flags indicate the type of request buffer and how long to wait
632  * and whether to log NT STATUS code (error) before mapping it to POSIX error
633  *
634  */
635 int
636 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
637                  char *in_buf, int flags)
638 {
639         int rc;
640         struct kvec iov[1];
641         struct kvec rsp_iov;
642         int resp_buf_type;
643 
644         iov[0].iov_base = in_buf;
645         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
646         flags |= CIFS_NO_RESP;
647         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
648         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
649 
650         return rc;
651 }
652 
653 static int
654 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
655 {
656         int rc = 0;
657 
658         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
659                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
660 
661         spin_lock(&GlobalMid_Lock);
662         switch (mid->mid_state) {
663         case MID_RESPONSE_RECEIVED:
664                 spin_unlock(&GlobalMid_Lock);
665                 return rc;
666         case MID_RETRY_NEEDED:
667                 rc = -EAGAIN;
668                 break;
669         case MID_RESPONSE_MALFORMED:
670                 rc = -EIO;
671                 break;
672         case MID_SHUTDOWN:
673                 rc = -EHOSTDOWN;
674                 break;
675         default:
676                 list_del_init(&mid->qhead);
677                 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
678                          __func__, mid->mid, mid->mid_state);
679                 rc = -EIO;
680         }
681         spin_unlock(&GlobalMid_Lock);
682 
683         DeleteMidQEntry(mid);
684         return rc;
685 }
686 
687 static inline int
688 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
689             struct mid_q_entry *mid)
690 {
691         return server->ops->send_cancel ?
692                                 server->ops->send_cancel(server, rqst, mid) : 0;
693 }
694 
695 int
696 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
697                    bool log_error)
698 {
699         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
700 
701         dump_smb(mid->resp_buf, min_t(u32, 92, len));
702 
703         /* convert the length into a more usable form */
704         if (server->sign) {
705                 struct kvec iov[2];
706                 int rc = 0;
707                 struct smb_rqst rqst = { .rq_iov = iov,
708                                          .rq_nvec = 2 };
709 
710                 iov[0].iov_base = mid->resp_buf;
711                 iov[0].iov_len = 4;
712                 iov[1].iov_base = (char *)mid->resp_buf + 4;
713                 iov[1].iov_len = len - 4;
714                 /* FIXME: add code to kill session */
715                 rc = cifs_verify_signature(&rqst, server,
716                                            mid->sequence_number);
717                 if (rc)
718                         cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
719                                  rc);
720         }
721 
722         /* BB special case reconnect tid and uid here? */
723         return map_smb_to_linux_error(mid->resp_buf, log_error);
724 }
725 
726 struct mid_q_entry *
727 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
728 {
729         int rc;
730         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
731         struct mid_q_entry *mid;
732 
733         if (rqst->rq_iov[0].iov_len != 4 ||
734             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
735                 return ERR_PTR(-EIO);
736 
737         rc = allocate_mid(ses, hdr, &mid);
738         if (rc)
739                 return ERR_PTR(rc);
740         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
741         if (rc) {
742                 cifs_delete_mid(mid);
743                 return ERR_PTR(rc);
744         }
745         return mid;
746 }
747 
748 int
749 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
750                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
751                struct kvec *resp_iov)
752 {
753         int rc = 0;
754         int timeout, optype;
755         struct mid_q_entry *midQ;
756         unsigned int credits = 1;
757         char *buf;
758 
759         timeout = flags & CIFS_TIMEOUT_MASK;
760         optype = flags & CIFS_OP_MASK;
761 
762         *resp_buf_type = CIFS_NO_BUFFER;  /* no response buf yet */
763 
764         if ((ses == NULL) || (ses->server == NULL)) {
765                 cifs_dbg(VFS, "Null session\n");
766                 return -EIO;
767         }
768 
769         if (ses->server->tcpStatus == CifsExiting)
770                 return -ENOENT;
771 
772         /*
773          * Ensure that we do not send more than 50 overlapping requests
774          * to the same server. We may make this configurable later or
775          * use ses->maxReq.
776          */
777         rc = wait_for_free_request(ses->server, timeout, optype);
778         if (rc)
779                 return rc;
780 
781         /*
782          * Make sure that we sign in the same order that we send on this socket
783          * and avoid races inside tcp sendmsg code that could cause corruption
784          * of smb data.
785          */
786 
787         mutex_lock(&ses->server->srv_mutex);
788 
789         midQ = ses->server->ops->setup_request(ses, rqst);
790         if (IS_ERR(midQ)) {
791                 mutex_unlock(&ses->server->srv_mutex);
792                 /* Update # of requests on wire to server */
793                 add_credits(ses->server, 1, optype);
794                 return PTR_ERR(midQ);
795         }
796 
797         midQ->mid_state = MID_REQUEST_SUBMITTED;
798         cifs_in_send_inc(ses->server);
799         rc = smb_send_rqst(ses->server, rqst, flags);
800         cifs_in_send_dec(ses->server);
801         cifs_save_when_sent(midQ);
802 
803         if (rc < 0)
804                 ses->server->sequence_number -= 2;
805         mutex_unlock(&ses->server->srv_mutex);
806 
807         if (rc < 0)
808                 goto out;
809 
810 #ifdef CONFIG_CIFS_SMB311
811         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
812                 smb311_update_preauth_hash(ses, rqst->rq_iov,
813                                            rqst->rq_nvec);
814 #endif
815 
816         if (timeout == CIFS_ASYNC_OP)
817                 goto out;
818 
819         rc = wait_for_response(ses->server, midQ);
820         if (rc != 0) {
821                 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
822                 send_cancel(ses->server, rqst, midQ);
823                 spin_lock(&GlobalMid_Lock);
824                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
825                         midQ->mid_flags |= MID_WAIT_CANCELLED;
826                         midQ->callback = DeleteMidQEntry;
827                         spin_unlock(&GlobalMid_Lock);
828                         add_credits(ses->server, 1, optype);
829                         return rc;
830                 }
831                 spin_unlock(&GlobalMid_Lock);
832         }
833 
834         rc = cifs_sync_mid_result(midQ, ses->server);
835         if (rc != 0) {
836                 add_credits(ses->server, 1, optype);
837                 return rc;
838         }
839 
840         if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
841                 rc = -EIO;
842                 cifs_dbg(FYI, "Bad MID state?\n");
843                 goto out;
844         }
845 
846         buf = (char *)midQ->resp_buf;
847         resp_iov->iov_base = buf;
848         resp_iov->iov_len = midQ->resp_buf_size +
849                 ses->server->vals->header_preamble_size;
850         if (midQ->large_buf)
851                 *resp_buf_type = CIFS_LARGE_BUFFER;
852         else
853                 *resp_buf_type = CIFS_SMALL_BUFFER;
854 
855 #ifdef CONFIG_CIFS_SMB311
856         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
857                 struct kvec iov = {
858                         .iov_base = resp_iov->iov_base,
859                         .iov_len = resp_iov->iov_len
860                 };
861                 smb311_update_preauth_hash(ses, &iov, 1);
862         }
863 #endif
864 
865         credits = ses->server->ops->get_credits(midQ);
866 
867         rc = ses->server->ops->check_receive(midQ, ses->server,
868                                              flags & CIFS_LOG_ERROR);
869 
870         /* mark it so buf will not be freed by cifs_delete_mid */
871         if ((flags & CIFS_NO_RESP) == 0)
872                 midQ->resp_buf = NULL;
873 out:
874         cifs_delete_mid(midQ);
875         add_credits(ses->server, credits, optype);
876 
877         return rc;
878 }
879 
880 int
881 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
882              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
883              const int flags, struct kvec *resp_iov)
884 {
885         struct smb_rqst rqst;
886         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
887         int rc;
888 
889         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
890                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
891                                         GFP_KERNEL);
892                 if (!new_iov) {
893                         /* otherwise cifs_send_recv below sets resp_buf_type */
894                         *resp_buf_type = CIFS_NO_BUFFER;
895                         return -ENOMEM;
896                 }
897         } else
898                 new_iov = s_iov;
899 
900         /* 1st iov is a RFC1001 length followed by the rest of the packet */
901         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
902 
903         new_iov[0].iov_base = new_iov[1].iov_base;
904         new_iov[0].iov_len = 4;
905         new_iov[1].iov_base += 4;
906         new_iov[1].iov_len -= 4;
907 
908         memset(&rqst, 0, sizeof(struct smb_rqst));
909         rqst.rq_iov = new_iov;
910         rqst.rq_nvec = n_vec + 1;
911 
912         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
913         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
914                 kfree(new_iov);
915         return rc;
916 }
917 
918 int
919 SendReceive(const unsigned int xid, struct cifs_ses *ses,
920             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
921             int *pbytes_returned, const int timeout)
922 {
923         int rc = 0;
924         struct mid_q_entry *midQ;
925         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
926         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
927         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
928 
929         if (ses == NULL) {
930                 cifs_dbg(VFS, "Null smb session\n");
931                 return -EIO;
932         }
933         if (ses->server == NULL) {
934                 cifs_dbg(VFS, "Null tcp session\n");
935                 return -EIO;
936         }
937 
938         if (ses->server->tcpStatus == CifsExiting)
939                 return -ENOENT;
940 
941         /* Ensure that we do not send more than 50 overlapping requests
942            to the same server. We may make this configurable later or
943            use ses->maxReq */
944 
945         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
946                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
947                          len);
948                 return -EIO;
949         }
950 
951         rc = wait_for_free_request(ses->server, timeout, 0);
952         if (rc)
953                 return rc;
954 
955         /* make sure that we sign in the same order that we send on this socket
956            and avoid races inside tcp sendmsg code that could cause corruption
957            of smb data */
958 
959         mutex_lock(&ses->server->srv_mutex);
960 
961         rc = allocate_mid(ses, in_buf, &midQ);
962         if (rc) {
963                 mutex_unlock(&ses->server->srv_mutex);
964                 /* Update # of requests on wire to server */
965                 add_credits(ses->server, 1, 0);
966                 return rc;
967         }
968 
969         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
970         if (rc) {
971                 mutex_unlock(&ses->server->srv_mutex);
972                 goto out;
973         }
974 
975         midQ->mid_state = MID_REQUEST_SUBMITTED;
976 
977         cifs_in_send_inc(ses->server);
978         rc = smb_send(ses->server, in_buf, len);
979         cifs_in_send_dec(ses->server);
980         cifs_save_when_sent(midQ);
981 
982         if (rc < 0)
983                 ses->server->sequence_number -= 2;
984 
985         mutex_unlock(&ses->server->srv_mutex);
986 
987         if (rc < 0)
988                 goto out;
989 
990         if (timeout == CIFS_ASYNC_OP)
991                 goto out;
992 
993         rc = wait_for_response(ses->server, midQ);
994         if (rc != 0) {
995                 send_cancel(ses->server, &rqst, midQ);
996                 spin_lock(&GlobalMid_Lock);
997                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
998                         /* no longer considered to be "in-flight" */
999                         midQ->callback = DeleteMidQEntry;
1000                         spin_unlock(&GlobalMid_Lock);
1001                         add_credits(ses->server, 1, 0);
1002                         return rc;
1003                 }
1004                 spin_unlock(&GlobalMid_Lock);
1005         }
1006 
1007         rc = cifs_sync_mid_result(midQ, ses->server);
1008         if (rc != 0) {
1009                 add_credits(ses->server, 1, 0);
1010                 return rc;
1011         }
1012 
1013         if (!midQ->resp_buf || !out_buf ||
1014             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1015                 rc = -EIO;
1016                 cifs_dbg(VFS, "Bad MID state?\n");
1017                 goto out;
1018         }
1019 
1020         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1021         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1022         rc = cifs_check_receive(midQ, ses->server, 0);
1023 out:
1024         cifs_delete_mid(midQ);
1025         add_credits(ses->server, 1, 0);
1026 
1027         return rc;
1028 }
1029 
1030 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1031    blocking lock to return. */
1032 
1033 static int
1034 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1035                         struct smb_hdr *in_buf,
1036                         struct smb_hdr *out_buf)
1037 {
1038         int bytes_returned;
1039         struct cifs_ses *ses = tcon->ses;
1040         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1041 
1042         /* We just modify the current in_buf to change
1043            the type of lock from LOCKING_ANDX_SHARED_LOCK
1044            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1045            LOCKING_ANDX_CANCEL_LOCK. */
1046 
1047         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1048         pSMB->Timeout = 0;
1049         pSMB->hdr.Mid = get_next_mid(ses->server);
1050 
1051         return SendReceive(xid, ses, in_buf, out_buf,
1052                         &bytes_returned, 0);
1053 }
1054 
1055 int
1056 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1057             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1058             int *pbytes_returned)
1059 {
1060         int rc = 0;
1061         int rstart = 0;
1062         struct mid_q_entry *midQ;
1063         struct cifs_ses *ses;
1064         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1065         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1066         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1067 
1068         if (tcon == NULL || tcon->ses == NULL) {
1069                 cifs_dbg(VFS, "Null smb session\n");
1070                 return -EIO;
1071         }
1072         ses = tcon->ses;
1073 
1074         if (ses->server == NULL) {
1075                 cifs_dbg(VFS, "Null tcp session\n");
1076                 return -EIO;
1077         }
1078 
1079         if (ses->server->tcpStatus == CifsExiting)
1080                 return -ENOENT;
1081 
1082         /* Ensure that we do not send more than 50 overlapping requests
1083            to the same server. We may make this configurable later or
1084            use ses->maxReq */
1085 
1086         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1087                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1088                          len);
1089                 return -EIO;
1090         }
1091 
1092         rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1093         if (rc)
1094                 return rc;
1095 
1096         /* make sure that we sign in the same order that we send on this socket
1097            and avoid races inside tcp sendmsg code that could cause corruption
1098            of smb data */
1099 
1100         mutex_lock(&ses->server->srv_mutex);
1101 
1102         rc = allocate_mid(ses, in_buf, &midQ);
1103         if (rc) {
1104                 mutex_unlock(&ses->server->srv_mutex);
1105                 return rc;
1106         }
1107 
1108         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1109         if (rc) {
1110                 cifs_delete_mid(midQ);
1111                 mutex_unlock(&ses->server->srv_mutex);
1112                 return rc;
1113         }
1114 
1115         midQ->mid_state = MID_REQUEST_SUBMITTED;
1116         cifs_in_send_inc(ses->server);
1117         rc = smb_send(ses->server, in_buf, len);
1118         cifs_in_send_dec(ses->server);
1119         cifs_save_when_sent(midQ);
1120 
1121         if (rc < 0)
1122                 ses->server->sequence_number -= 2;
1123 
1124         mutex_unlock(&ses->server->srv_mutex);
1125 
1126         if (rc < 0) {
1127                 cifs_delete_mid(midQ);
1128                 return rc;
1129         }
1130 
1131         /* Wait for a reply - allow signals to interrupt. */
1132         rc = wait_event_interruptible(ses->server->response_q,
1133                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1134                 ((ses->server->tcpStatus != CifsGood) &&
1135                  (ses->server->tcpStatus != CifsNew)));
1136 
1137         /* Were we interrupted by a signal ? */
1138         if ((rc == -ERESTARTSYS) &&
1139                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1140                 ((ses->server->tcpStatus == CifsGood) ||
1141                  (ses->server->tcpStatus == CifsNew))) {
1142 
1143                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1144                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1145                            blocking lock to return. */
1146                         rc = send_cancel(ses->server, &rqst, midQ);
1147                         if (rc) {
1148                                 cifs_delete_mid(midQ);
1149                                 return rc;
1150                         }
1151                 } else {
1152                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1153                            to cause the blocking lock to return. */
1154 
1155                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1156 
1157                         /* If we get -ENOLCK back the lock may have
1158                            already been removed. Don't exit in this case. */
1159                         if (rc && rc != -ENOLCK) {
1160                                 cifs_delete_mid(midQ);
1161                                 return rc;
1162                         }
1163                 }
1164 
1165                 rc = wait_for_response(ses->server, midQ);
1166                 if (rc) {
1167                         send_cancel(ses->server, &rqst, midQ);
1168                         spin_lock(&GlobalMid_Lock);
1169                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1170                                 /* no longer considered to be "in-flight" */
1171                                 midQ->callback = DeleteMidQEntry;
1172                                 spin_unlock(&GlobalMid_Lock);
1173                                 return rc;
1174                         }
1175                         spin_unlock(&GlobalMid_Lock);
1176                 }
1177 
1178                 /* We got the response - restart system call. */
1179                 rstart = 1;
1180         }
1181 
1182         rc = cifs_sync_mid_result(midQ, ses->server);
1183         if (rc != 0)
1184                 return rc;
1185 
1186         /* rcvd frame is ok */
1187         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1188                 rc = -EIO;
1189                 cifs_dbg(VFS, "Bad MID state?\n");
1190                 goto out;
1191         }
1192 
1193         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1194         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1195         rc = cifs_check_receive(midQ, ses->server, 0);
1196 out:
1197         cifs_delete_mid(midQ);
1198         if (rstart && rc == -EACCES)
1199                 return -ERESTARTSYS;
1200         return rc;
1201 }
1202 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp