~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/cifs/transport.c

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *   fs/cifs/transport.c
  3  *
  4  *   Copyright (C) International Business Machines  Corp., 2002,2008
  5  *   Author(s): Steve French (sfrench@us.ibm.com)
  6  *   Jeremy Allison (jra@samba.org) 2006.
  7  *
  8  *   This library is free software; you can redistribute it and/or modify
  9  *   it under the terms of the GNU Lesser General Public License as published
 10  *   by the Free Software Foundation; either version 2.1 of the License, or
 11  *   (at your option) any later version.
 12  *
 13  *   This library is distributed in the hope that it will be useful,
 14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 16  *   the GNU Lesser General Public License for more details.
 17  *
 18  *   You should have received a copy of the GNU Lesser General Public License
 19  *   along with this library; if not, write to the Free Software
 20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 21  */
 22 
 23 #include <linux/fs.h>
 24 #include <linux/list.h>
 25 #include <linux/gfp.h>
 26 #include <linux/wait.h>
 27 #include <linux/net.h>
 28 #include <linux/delay.h>
 29 #include <linux/freezer.h>
 30 #include <linux/tcp.h>
 31 #include <linux/bvec.h>
 32 #include <linux/highmem.h>
 33 #include <linux/uaccess.h>
 34 #include <asm/processor.h>
 35 #include <linux/mempool.h>
 36 #include "cifspdu.h"
 37 #include "cifsglob.h"
 38 #include "cifsproto.h"
 39 #include "cifs_debug.h"
 40 #include "smb2proto.h"
 41 #include "smbdirect.h"
 42 
 43 /* Max number of iovectors we can use off the stack when sending requests. */
 44 #define CIFS_MAX_IOV_SIZE 8
 45 
 46 void
 47 cifs_wake_up_task(struct mid_q_entry *mid)
 48 {
 49         wake_up_process(mid->callback_data);
 50 }
 51 
 52 struct mid_q_entry *
 53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 54 {
 55         struct mid_q_entry *temp;
 56 
 57         if (server == NULL) {
 58                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
 59                 return NULL;
 60         }
 61 
 62         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
 63         memset(temp, 0, sizeof(struct mid_q_entry));
 64         kref_init(&temp->refcount);
 65         temp->mid = get_mid(smb_buffer);
 66         temp->pid = current->pid;
 67         temp->command = cpu_to_le16(smb_buffer->Command);
 68         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
 69         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 70         /* when mid allocated can be before when sent */
 71         temp->when_alloc = jiffies;
 72         temp->server = server;
 73 
 74         /*
 75          * The default is for the mid to be synchronous, so the
 76          * default callback just wakes up the current task.
 77          */
 78         temp->callback = cifs_wake_up_task;
 79         temp->callback_data = current;
 80 
 81         atomic_inc(&midCount);
 82         temp->mid_state = MID_REQUEST_ALLOCATED;
 83         return temp;
 84 }
 85 
 86 static void _cifs_mid_q_entry_release(struct kref *refcount)
 87 {
 88         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
 89                                                refcount);
 90 
 91         mempool_free(mid, cifs_mid_poolp);
 92 }
 93 
 94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
 95 {
 96         spin_lock(&GlobalMid_Lock);
 97         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
 98         spin_unlock(&GlobalMid_Lock);
 99 }
100 
101 void
102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105         __le16 command = midEntry->server->vals->lock_cmd;
106         unsigned long now;
107 #endif
108         midEntry->mid_state = MID_FREE;
109         atomic_dec(&midCount);
110         if (midEntry->large_buf)
111                 cifs_buf_release(midEntry->resp_buf);
112         else
113                 cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115         now = jiffies;
116         /* commands taking longer than one second are indications that
117            something is wrong, unless it is quite a slow link or server */
118         if (time_after(now, midEntry->when_alloc + HZ)) {
119                 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
120                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
121                                midEntry->command, midEntry->mid);
122                         pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
123                                now - midEntry->when_alloc,
124                                now - midEntry->when_sent,
125                                now - midEntry->when_received);
126                 }
127         }
128 #endif
129         cifs_mid_q_entry_release(midEntry);
130 }
131 
132 void
133 cifs_delete_mid(struct mid_q_entry *mid)
134 {
135         spin_lock(&GlobalMid_Lock);
136         list_del(&mid->qhead);
137         spin_unlock(&GlobalMid_Lock);
138 
139         DeleteMidQEntry(mid);
140 }
141 
142 /*
143  * smb_send_kvec - send an array of kvecs to the server
144  * @server:     Server to send the data to
145  * @smb_msg:    Message to send
146  * @sent:       amount of data sent on socket is stored here
147  *
148  * Our basic "send data to server" function. Should be called with srv_mutex
149  * held. The caller is responsible for handling the results.
150  */
151 static int
152 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
153               size_t *sent)
154 {
155         int rc = 0;
156         int retries = 0;
157         struct socket *ssocket = server->ssocket;
158 
159         *sent = 0;
160 
161         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
162         smb_msg->msg_namelen = sizeof(struct sockaddr);
163         smb_msg->msg_control = NULL;
164         smb_msg->msg_controllen = 0;
165         if (server->noblocksnd)
166                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
167         else
168                 smb_msg->msg_flags = MSG_NOSIGNAL;
169 
170         while (msg_data_left(smb_msg)) {
171                 /*
172                  * If blocking send, we try 3 times, since each can block
173                  * for 5 seconds. For nonblocking  we have to try more
174                  * but wait increasing amounts of time allowing time for
175                  * socket to clear.  The overall time we wait in either
176                  * case to send on the socket is about 15 seconds.
177                  * Similarly we wait for 15 seconds for a response from
178                  * the server in SendReceive[2] for the server to send
179                  * a response back for most types of requests (except
180                  * SMB Write past end of file which can be slow, and
181                  * blocking lock operations). NFS waits slightly longer
182                  * than CIFS, but this can make it take longer for
183                  * nonresponsive servers to be detected and 15 seconds
184                  * is more than enough time for modern networks to
185                  * send a packet.  In most cases if we fail to send
186                  * after the retries we will kill the socket and
187                  * reconnect which may clear the network problem.
188                  */
189                 rc = sock_sendmsg(ssocket, smb_msg);
190                 if (rc == -EAGAIN) {
191                         retries++;
192                         if (retries >= 14 ||
193                             (!server->noblocksnd && (retries > 2))) {
194                                 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
195                                          ssocket);
196                                 return -EAGAIN;
197                         }
198                         msleep(1 << retries);
199                         continue;
200                 }
201 
202                 if (rc < 0)
203                         return rc;
204 
205                 if (rc == 0) {
206                         /* should never happen, letting socket clear before
207                            retrying is our only obvious option here */
208                         cifs_dbg(VFS, "tcp sent no data\n");
209                         msleep(500);
210                         continue;
211                 }
212 
213                 /* send was at least partially successful */
214                 *sent += rc;
215                 retries = 0; /* in case we get ENOSPC on the next send */
216         }
217         return 0;
218 }
219 
220 static unsigned long
221 rqst_len(struct smb_rqst *rqst)
222 {
223         unsigned int i;
224         struct kvec *iov = rqst->rq_iov;
225         unsigned long buflen = 0;
226 
227         /* total up iov array first */
228         for (i = 0; i < rqst->rq_nvec; i++)
229                 buflen += iov[i].iov_len;
230 
231         /* add in the page array if there is one */
232         if (rqst->rq_npages) {
233                 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
234                 buflen += rqst->rq_tailsz;
235         }
236 
237         return buflen;
238 }
239 
240 static int
241 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
242 {
243         int rc;
244         struct kvec *iov = rqst->rq_iov;
245         int n_vec = rqst->rq_nvec;
246         unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
247         unsigned long send_length;
248         unsigned int i;
249         size_t total_len = 0, sent, size;
250         struct socket *ssocket = server->ssocket;
251         struct msghdr smb_msg;
252         int val = 1;
253         if (cifs_rdma_enabled(server) && server->smbd_conn) {
254                 rc = smbd_send(server->smbd_conn, rqst);
255                 goto smbd_done;
256         }
257         if (ssocket == NULL)
258                 return -ENOTSOCK;
259 
260         /* sanity check send length */
261         send_length = rqst_len(rqst);
262         if (send_length != smb_buf_length + 4) {
263                 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
264                         send_length, smb_buf_length);
265                 return -EIO;
266         }
267 
268         if (n_vec < 2)
269                 return -EIO;
270 
271         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
272         dump_smb(iov[0].iov_base, iov[0].iov_len);
273         dump_smb(iov[1].iov_base, iov[1].iov_len);
274 
275         /* cork the socket */
276         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
277                                 (char *)&val, sizeof(val));
278 
279         size = 0;
280         for (i = 0; i < n_vec; i++)
281                 size += iov[i].iov_len;
282 
283         iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
284 
285         rc = smb_send_kvec(server, &smb_msg, &sent);
286         if (rc < 0)
287                 goto uncork;
288 
289         total_len += sent;
290 
291         /* now walk the page array and send each page in it */
292         for (i = 0; i < rqst->rq_npages; i++) {
293                 size_t len = i == rqst->rq_npages - 1
294                                 ? rqst->rq_tailsz
295                                 : rqst->rq_pagesz;
296                 struct bio_vec bvec = {
297                         .bv_page = rqst->rq_pages[i],
298                         .bv_len = len
299                 };
300                 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
301                               &bvec, 1, len);
302                 rc = smb_send_kvec(server, &smb_msg, &sent);
303                 if (rc < 0)
304                         break;
305 
306                 total_len += sent;
307         }
308 
309 uncork:
310         /* uncork it */
311         val = 0;
312         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
313                                 (char *)&val, sizeof(val));
314 
315         if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
316                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
317                          smb_buf_length + 4, total_len);
318                 /*
319                  * If we have only sent part of an SMB then the next SMB could
320                  * be taken as the remainder of this one. We need to kill the
321                  * socket so the server throws away the partial SMB
322                  */
323                 server->tcpStatus = CifsNeedReconnect;
324         }
325 smbd_done:
326         if (rc < 0 && rc != -EINTR)
327                 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
328                          rc);
329         else
330                 rc = 0;
331 
332         return rc;
333 }
334 
335 static int
336 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
337 {
338         struct smb_rqst cur_rqst;
339         int rc;
340 
341         if (!(flags & CIFS_TRANSFORM_REQ))
342                 return __smb_send_rqst(server, rqst);
343 
344         if (!server->ops->init_transform_rq ||
345             !server->ops->free_transform_rq) {
346                 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
347                 return -EIO;
348         }
349 
350         rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
351         if (rc)
352                 return rc;
353 
354         rc = __smb_send_rqst(server, &cur_rqst);
355         server->ops->free_transform_rq(&cur_rqst);
356         return rc;
357 }
358 
359 int
360 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
361          unsigned int smb_buf_length)
362 {
363         struct kvec iov[2];
364         struct smb_rqst rqst = { .rq_iov = iov,
365                                  .rq_nvec = 2 };
366 
367         iov[0].iov_base = smb_buffer;
368         iov[0].iov_len = 4;
369         iov[1].iov_base = (char *)smb_buffer + 4;
370         iov[1].iov_len = smb_buf_length;
371 
372         return __smb_send_rqst(server, &rqst);
373 }
374 
375 static int
376 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
377                       int *credits)
378 {
379         int rc;
380 
381         spin_lock(&server->req_lock);
382         if (timeout == CIFS_ASYNC_OP) {
383                 /* oplock breaks must not be held up */
384                 server->in_flight++;
385                 *credits -= 1;
386                 spin_unlock(&server->req_lock);
387                 return 0;
388         }
389 
390         while (1) {
391                 if (*credits <= 0) {
392                         spin_unlock(&server->req_lock);
393                         cifs_num_waiters_inc(server);
394                         rc = wait_event_killable(server->request_q,
395                                                  has_credits(server, credits));
396                         cifs_num_waiters_dec(server);
397                         if (rc)
398                                 return rc;
399                         spin_lock(&server->req_lock);
400                 } else {
401                         if (server->tcpStatus == CifsExiting) {
402                                 spin_unlock(&server->req_lock);
403                                 return -ENOENT;
404                         }
405 
406                         /*
407                          * Can not count locking commands against total
408                          * as they are allowed to block on server.
409                          */
410 
411                         /* update # of requests on the wire to server */
412                         if (timeout != CIFS_BLOCKING_OP) {
413                                 *credits -= 1;
414                                 server->in_flight++;
415                         }
416                         spin_unlock(&server->req_lock);
417                         break;
418                 }
419         }
420         return 0;
421 }
422 
423 static int
424 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
425                       const int optype)
426 {
427         int *val;
428 
429         val = server->ops->get_credits_field(server, optype);
430         /* Since an echo is already inflight, no need to wait to send another */
431         if (*val <= 0 && optype == CIFS_ECHO_OP)
432                 return -EAGAIN;
433         return wait_for_free_credits(server, timeout, val);
434 }
435 
436 int
437 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
438                       unsigned int *num, unsigned int *credits)
439 {
440         *num = size;
441         *credits = 0;
442         return 0;
443 }
444 
445 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
446                         struct mid_q_entry **ppmidQ)
447 {
448         if (ses->server->tcpStatus == CifsExiting) {
449                 return -ENOENT;
450         }
451 
452         if (ses->server->tcpStatus == CifsNeedReconnect) {
453                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
454                 return -EAGAIN;
455         }
456 
457         if (ses->status == CifsNew) {
458                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
459                         (in_buf->Command != SMB_COM_NEGOTIATE))
460                         return -EAGAIN;
461                 /* else ok - we are setting up session */
462         }
463 
464         if (ses->status == CifsExiting) {
465                 /* check if SMB session is bad because we are setting it up */
466                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
467                         return -EAGAIN;
468                 /* else ok - we are shutting down session */
469         }
470 
471         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
472         if (*ppmidQ == NULL)
473                 return -ENOMEM;
474         spin_lock(&GlobalMid_Lock);
475         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
476         spin_unlock(&GlobalMid_Lock);
477         return 0;
478 }
479 
480 static int
481 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
482 {
483         int error;
484 
485         error = wait_event_freezekillable_unsafe(server->response_q,
486                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
487         if (error < 0)
488                 return -ERESTARTSYS;
489 
490         return 0;
491 }
492 
493 struct mid_q_entry *
494 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
495 {
496         int rc;
497         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
498         struct mid_q_entry *mid;
499 
500         if (rqst->rq_iov[0].iov_len != 4 ||
501             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
502                 return ERR_PTR(-EIO);
503 
504         /* enable signing if server requires it */
505         if (server->sign)
506                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
507 
508         mid = AllocMidQEntry(hdr, server);
509         if (mid == NULL)
510                 return ERR_PTR(-ENOMEM);
511 
512         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
513         if (rc) {
514                 DeleteMidQEntry(mid);
515                 return ERR_PTR(rc);
516         }
517 
518         return mid;
519 }
520 
521 /*
522  * Send a SMB request and set the callback function in the mid to handle
523  * the result. Caller is responsible for dealing with timeouts.
524  */
525 int
526 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
527                 mid_receive_t *receive, mid_callback_t *callback,
528                 mid_handle_t *handle, void *cbdata, const int flags)
529 {
530         int rc, timeout, optype;
531         struct mid_q_entry *mid;
532         unsigned int credits = 0;
533 
534         timeout = flags & CIFS_TIMEOUT_MASK;
535         optype = flags & CIFS_OP_MASK;
536 
537         if ((flags & CIFS_HAS_CREDITS) == 0) {
538                 rc = wait_for_free_request(server, timeout, optype);
539                 if (rc)
540                         return rc;
541                 credits = 1;
542         }
543 
544         mutex_lock(&server->srv_mutex);
545         mid = server->ops->setup_async_request(server, rqst);
546         if (IS_ERR(mid)) {
547                 mutex_unlock(&server->srv_mutex);
548                 add_credits_and_wake_if(server, credits, optype);
549                 return PTR_ERR(mid);
550         }
551 
552         mid->receive = receive;
553         mid->callback = callback;
554         mid->callback_data = cbdata;
555         mid->handle = handle;
556         mid->mid_state = MID_REQUEST_SUBMITTED;
557 
558         /* put it on the pending_mid_q */
559         spin_lock(&GlobalMid_Lock);
560         list_add_tail(&mid->qhead, &server->pending_mid_q);
561         spin_unlock(&GlobalMid_Lock);
562 
563         /*
564          * Need to store the time in mid before calling I/O. For call_async,
565          * I/O response may come back and free the mid entry on another thread.
566          */
567         cifs_save_when_sent(mid);
568         cifs_in_send_inc(server);
569         rc = smb_send_rqst(server, rqst, flags);
570         cifs_in_send_dec(server);
571 
572         if (rc < 0) {
573                 server->sequence_number -= 2;
574                 cifs_delete_mid(mid);
575         }
576 
577         mutex_unlock(&server->srv_mutex);
578 
579         if (rc == 0)
580                 return 0;
581 
582         add_credits_and_wake_if(server, credits, optype);
583         return rc;
584 }
585 
586 /*
587  *
588  * Send an SMB Request.  No response info (other than return code)
589  * needs to be parsed.
590  *
591  * flags indicate the type of request buffer and how long to wait
592  * and whether to log NT STATUS code (error) before mapping it to POSIX error
593  *
594  */
595 int
596 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
597                  char *in_buf, int flags)
598 {
599         int rc;
600         struct kvec iov[1];
601         struct kvec rsp_iov;
602         int resp_buf_type;
603 
604         iov[0].iov_base = in_buf;
605         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
606         flags |= CIFS_NO_RESP;
607         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
608         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
609 
610         return rc;
611 }
612 
613 static int
614 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
615 {
616         int rc = 0;
617 
618         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
619                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
620 
621         spin_lock(&GlobalMid_Lock);
622         switch (mid->mid_state) {
623         case MID_RESPONSE_RECEIVED:
624                 spin_unlock(&GlobalMid_Lock);
625                 return rc;
626         case MID_RETRY_NEEDED:
627                 rc = -EAGAIN;
628                 break;
629         case MID_RESPONSE_MALFORMED:
630                 rc = -EIO;
631                 break;
632         case MID_SHUTDOWN:
633                 rc = -EHOSTDOWN;
634                 break;
635         default:
636                 list_del_init(&mid->qhead);
637                 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
638                          __func__, mid->mid, mid->mid_state);
639                 rc = -EIO;
640         }
641         spin_unlock(&GlobalMid_Lock);
642 
643         DeleteMidQEntry(mid);
644         return rc;
645 }
646 
647 static inline int
648 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
649             struct mid_q_entry *mid)
650 {
651         return server->ops->send_cancel ?
652                                 server->ops->send_cancel(server, rqst, mid) : 0;
653 }
654 
655 int
656 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
657                    bool log_error)
658 {
659         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
660 
661         dump_smb(mid->resp_buf, min_t(u32, 92, len));
662 
663         /* convert the length into a more usable form */
664         if (server->sign) {
665                 struct kvec iov[2];
666                 int rc = 0;
667                 struct smb_rqst rqst = { .rq_iov = iov,
668                                          .rq_nvec = 2 };
669 
670                 iov[0].iov_base = mid->resp_buf;
671                 iov[0].iov_len = 4;
672                 iov[1].iov_base = (char *)mid->resp_buf + 4;
673                 iov[1].iov_len = len - 4;
674                 /* FIXME: add code to kill session */
675                 rc = cifs_verify_signature(&rqst, server,
676                                            mid->sequence_number);
677                 if (rc)
678                         cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
679                                  rc);
680         }
681 
682         /* BB special case reconnect tid and uid here? */
683         return map_smb_to_linux_error(mid->resp_buf, log_error);
684 }
685 
686 struct mid_q_entry *
687 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
688 {
689         int rc;
690         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
691         struct mid_q_entry *mid;
692 
693         if (rqst->rq_iov[0].iov_len != 4 ||
694             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
695                 return ERR_PTR(-EIO);
696 
697         rc = allocate_mid(ses, hdr, &mid);
698         if (rc)
699                 return ERR_PTR(rc);
700         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
701         if (rc) {
702                 cifs_delete_mid(mid);
703                 return ERR_PTR(rc);
704         }
705         return mid;
706 }
707 
708 int
709 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
710                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
711                struct kvec *resp_iov)
712 {
713         int rc = 0;
714         int timeout, optype;
715         struct mid_q_entry *midQ;
716         unsigned int credits = 1;
717         char *buf;
718 
719         timeout = flags & CIFS_TIMEOUT_MASK;
720         optype = flags & CIFS_OP_MASK;
721 
722         *resp_buf_type = CIFS_NO_BUFFER;  /* no response buf yet */
723 
724         if ((ses == NULL) || (ses->server == NULL)) {
725                 cifs_dbg(VFS, "Null session\n");
726                 return -EIO;
727         }
728 
729         if (ses->server->tcpStatus == CifsExiting)
730                 return -ENOENT;
731 
732         /*
733          * Ensure that we do not send more than 50 overlapping requests
734          * to the same server. We may make this configurable later or
735          * use ses->maxReq.
736          */
737 
738         rc = wait_for_free_request(ses->server, timeout, optype);
739         if (rc)
740                 return rc;
741 
742         /*
743          * Make sure that we sign in the same order that we send on this socket
744          * and avoid races inside tcp sendmsg code that could cause corruption
745          * of smb data.
746          */
747 
748         mutex_lock(&ses->server->srv_mutex);
749 
750         midQ = ses->server->ops->setup_request(ses, rqst);
751         if (IS_ERR(midQ)) {
752                 mutex_unlock(&ses->server->srv_mutex);
753                 /* Update # of requests on wire to server */
754                 add_credits(ses->server, 1, optype);
755                 return PTR_ERR(midQ);
756         }
757 
758         midQ->mid_state = MID_REQUEST_SUBMITTED;
759         cifs_in_send_inc(ses->server);
760         rc = smb_send_rqst(ses->server, rqst, flags);
761         cifs_in_send_dec(ses->server);
762         cifs_save_when_sent(midQ);
763 
764         if (rc < 0)
765                 ses->server->sequence_number -= 2;
766         mutex_unlock(&ses->server->srv_mutex);
767 
768         if (rc < 0)
769                 goto out;
770 
771 #ifdef CONFIG_CIFS_SMB311
772         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
773                 smb311_update_preauth_hash(ses, rqst->rq_iov+1,
774                                            rqst->rq_nvec-1);
775 #endif
776 
777         if (timeout == CIFS_ASYNC_OP)
778                 goto out;
779 
780         rc = wait_for_response(ses->server, midQ);
781         if (rc != 0) {
782                 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
783                 send_cancel(ses->server, rqst, midQ);
784                 spin_lock(&GlobalMid_Lock);
785                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
786                         midQ->mid_flags |= MID_WAIT_CANCELLED;
787                         midQ->callback = DeleteMidQEntry;
788                         spin_unlock(&GlobalMid_Lock);
789                         add_credits(ses->server, 1, optype);
790                         return rc;
791                 }
792                 spin_unlock(&GlobalMid_Lock);
793         }
794 
795         rc = cifs_sync_mid_result(midQ, ses->server);
796         if (rc != 0) {
797                 add_credits(ses->server, 1, optype);
798                 return rc;
799         }
800 
801         if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
802                 rc = -EIO;
803                 cifs_dbg(FYI, "Bad MID state?\n");
804                 goto out;
805         }
806 
807         buf = (char *)midQ->resp_buf;
808         resp_iov->iov_base = buf;
809         resp_iov->iov_len = midQ->resp_buf_size +
810                 ses->server->vals->header_preamble_size;
811         if (midQ->large_buf)
812                 *resp_buf_type = CIFS_LARGE_BUFFER;
813         else
814                 *resp_buf_type = CIFS_SMALL_BUFFER;
815 
816 #ifdef CONFIG_CIFS_SMB311
817         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
818                 struct kvec iov = {
819                         .iov_base = buf + 4,
820                         .iov_len = get_rfc1002_length(buf)
821                 };
822                 smb311_update_preauth_hash(ses, &iov, 1);
823         }
824 #endif
825 
826         credits = ses->server->ops->get_credits(midQ);
827 
828         rc = ses->server->ops->check_receive(midQ, ses->server,
829                                              flags & CIFS_LOG_ERROR);
830 
831         /* mark it so buf will not be freed by cifs_delete_mid */
832         if ((flags & CIFS_NO_RESP) == 0)
833                 midQ->resp_buf = NULL;
834 out:
835         cifs_delete_mid(midQ);
836         add_credits(ses->server, credits, optype);
837 
838         return rc;
839 }
840 
841 int
842 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
843              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
844              const int flags, struct kvec *resp_iov)
845 {
846         struct smb_rqst rqst;
847         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
848         int rc;
849 
850         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
851                 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
852                                   GFP_KERNEL);
853                 if (!new_iov) {
854                         /* otherwise cifs_send_recv below sets resp_buf_type */
855                         *resp_buf_type = CIFS_NO_BUFFER;
856                         return -ENOMEM;
857                 }
858         } else
859                 new_iov = s_iov;
860 
861         /* 1st iov is a RFC1001 length followed by the rest of the packet */
862         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
863 
864         new_iov[0].iov_base = new_iov[1].iov_base;
865         new_iov[0].iov_len = 4;
866         new_iov[1].iov_base += 4;
867         new_iov[1].iov_len -= 4;
868 
869         memset(&rqst, 0, sizeof(struct smb_rqst));
870         rqst.rq_iov = new_iov;
871         rqst.rq_nvec = n_vec + 1;
872 
873         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
874         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
875                 kfree(new_iov);
876         return rc;
877 }
878 
879 /* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
880 int
881 smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
882                struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
883                const int flags, struct kvec *resp_iov)
884 {
885         struct smb_rqst rqst;
886         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
887         int rc;
888         int i;
889         __u32 count;
890         __be32 rfc1002_marker;
891 
892         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
893                 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
894                                   GFP_KERNEL);
895                 if (!new_iov)
896                         return -ENOMEM;
897         } else
898                 new_iov = s_iov;
899 
900         /* 1st iov is an RFC1002 Session Message length */
901         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
902 
903         count = 0;
904         for (i = 1; i < n_vec + 1; i++)
905                 count += new_iov[i].iov_len;
906 
907         rfc1002_marker = cpu_to_be32(count);
908 
909         new_iov[0].iov_base = &rfc1002_marker;
910         new_iov[0].iov_len = 4;
911 
912         memset(&rqst, 0, sizeof(struct smb_rqst));
913         rqst.rq_iov = new_iov;
914         rqst.rq_nvec = n_vec + 1;
915 
916         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
917         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
918                 kfree(new_iov);
919         return rc;
920 }
921 
922 int
923 SendReceive(const unsigned int xid, struct cifs_ses *ses,
924             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
925             int *pbytes_returned, const int timeout)
926 {
927         int rc = 0;
928         struct mid_q_entry *midQ;
929         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
930         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
931         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
932 
933         if (ses == NULL) {
934                 cifs_dbg(VFS, "Null smb session\n");
935                 return -EIO;
936         }
937         if (ses->server == NULL) {
938                 cifs_dbg(VFS, "Null tcp session\n");
939                 return -EIO;
940         }
941 
942         if (ses->server->tcpStatus == CifsExiting)
943                 return -ENOENT;
944 
945         /* Ensure that we do not send more than 50 overlapping requests
946            to the same server. We may make this configurable later or
947            use ses->maxReq */
948 
949         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
950                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
951                          len);
952                 return -EIO;
953         }
954 
955         rc = wait_for_free_request(ses->server, timeout, 0);
956         if (rc)
957                 return rc;
958 
959         /* make sure that we sign in the same order that we send on this socket
960            and avoid races inside tcp sendmsg code that could cause corruption
961            of smb data */
962 
963         mutex_lock(&ses->server->srv_mutex);
964 
965         rc = allocate_mid(ses, in_buf, &midQ);
966         if (rc) {
967                 mutex_unlock(&ses->server->srv_mutex);
968                 /* Update # of requests on wire to server */
969                 add_credits(ses->server, 1, 0);
970                 return rc;
971         }
972 
973         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
974         if (rc) {
975                 mutex_unlock(&ses->server->srv_mutex);
976                 goto out;
977         }
978 
979         midQ->mid_state = MID_REQUEST_SUBMITTED;
980 
981         cifs_in_send_inc(ses->server);
982         rc = smb_send(ses->server, in_buf, len);
983         cifs_in_send_dec(ses->server);
984         cifs_save_when_sent(midQ);
985 
986         if (rc < 0)
987                 ses->server->sequence_number -= 2;
988 
989         mutex_unlock(&ses->server->srv_mutex);
990 
991         if (rc < 0)
992                 goto out;
993 
994         if (timeout == CIFS_ASYNC_OP)
995                 goto out;
996 
997         rc = wait_for_response(ses->server, midQ);
998         if (rc != 0) {
999                 send_cancel(ses->server, &rqst, midQ);
1000                 spin_lock(&GlobalMid_Lock);
1001                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1002                         /* no longer considered to be "in-flight" */
1003                         midQ->callback = DeleteMidQEntry;
1004                         spin_unlock(&GlobalMid_Lock);
1005                         add_credits(ses->server, 1, 0);
1006                         return rc;
1007                 }
1008                 spin_unlock(&GlobalMid_Lock);
1009         }
1010 
1011         rc = cifs_sync_mid_result(midQ, ses->server);
1012         if (rc != 0) {
1013                 add_credits(ses->server, 1, 0);
1014                 return rc;
1015         }
1016 
1017         if (!midQ->resp_buf || !out_buf ||
1018             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1019                 rc = -EIO;
1020                 cifs_dbg(VFS, "Bad MID state?\n");
1021                 goto out;
1022         }
1023 
1024         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1025         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1026         rc = cifs_check_receive(midQ, ses->server, 0);
1027 out:
1028         cifs_delete_mid(midQ);
1029         add_credits(ses->server, 1, 0);
1030 
1031         return rc;
1032 }
1033 
1034 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1035    blocking lock to return. */
1036 
1037 static int
1038 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1039                         struct smb_hdr *in_buf,
1040                         struct smb_hdr *out_buf)
1041 {
1042         int bytes_returned;
1043         struct cifs_ses *ses = tcon->ses;
1044         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1045 
1046         /* We just modify the current in_buf to change
1047            the type of lock from LOCKING_ANDX_SHARED_LOCK
1048            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1049            LOCKING_ANDX_CANCEL_LOCK. */
1050 
1051         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1052         pSMB->Timeout = 0;
1053         pSMB->hdr.Mid = get_next_mid(ses->server);
1054 
1055         return SendReceive(xid, ses, in_buf, out_buf,
1056                         &bytes_returned, 0);
1057 }
1058 
1059 int
1060 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1061             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1062             int *pbytes_returned)
1063 {
1064         int rc = 0;
1065         int rstart = 0;
1066         struct mid_q_entry *midQ;
1067         struct cifs_ses *ses;
1068         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1069         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1070         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1071 
1072         if (tcon == NULL || tcon->ses == NULL) {
1073                 cifs_dbg(VFS, "Null smb session\n");
1074                 return -EIO;
1075         }
1076         ses = tcon->ses;
1077 
1078         if (ses->server == NULL) {
1079                 cifs_dbg(VFS, "Null tcp session\n");
1080                 return -EIO;
1081         }
1082 
1083         if (ses->server->tcpStatus == CifsExiting)
1084                 return -ENOENT;
1085 
1086         /* Ensure that we do not send more than 50 overlapping requests
1087            to the same server. We may make this configurable later or
1088            use ses->maxReq */
1089 
1090         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1091                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1092                          len);
1093                 return -EIO;
1094         }
1095 
1096         rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1097         if (rc)
1098                 return rc;
1099 
1100         /* make sure that we sign in the same order that we send on this socket
1101            and avoid races inside tcp sendmsg code that could cause corruption
1102            of smb data */
1103 
1104         mutex_lock(&ses->server->srv_mutex);
1105 
1106         rc = allocate_mid(ses, in_buf, &midQ);
1107         if (rc) {
1108                 mutex_unlock(&ses->server->srv_mutex);
1109                 return rc;
1110         }
1111 
1112         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1113         if (rc) {
1114                 cifs_delete_mid(midQ);
1115                 mutex_unlock(&ses->server->srv_mutex);
1116                 return rc;
1117         }
1118 
1119         midQ->mid_state = MID_REQUEST_SUBMITTED;
1120         cifs_in_send_inc(ses->server);
1121         rc = smb_send(ses->server, in_buf, len);
1122         cifs_in_send_dec(ses->server);
1123         cifs_save_when_sent(midQ);
1124 
1125         if (rc < 0)
1126                 ses->server->sequence_number -= 2;
1127 
1128         mutex_unlock(&ses->server->srv_mutex);
1129 
1130         if (rc < 0) {
1131                 cifs_delete_mid(midQ);
1132                 return rc;
1133         }
1134 
1135         /* Wait for a reply - allow signals to interrupt. */
1136         rc = wait_event_interruptible(ses->server->response_q,
1137                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1138                 ((ses->server->tcpStatus != CifsGood) &&
1139                  (ses->server->tcpStatus != CifsNew)));
1140 
1141         /* Were we interrupted by a signal ? */
1142         if ((rc == -ERESTARTSYS) &&
1143                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1144                 ((ses->server->tcpStatus == CifsGood) ||
1145                  (ses->server->tcpStatus == CifsNew))) {
1146 
1147                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1148                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1149                            blocking lock to return. */
1150                         rc = send_cancel(ses->server, &rqst, midQ);
1151                         if (rc) {
1152                                 cifs_delete_mid(midQ);
1153                                 return rc;
1154                         }
1155                 } else {
1156                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1157                            to cause the blocking lock to return. */
1158 
1159                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1160 
1161                         /* If we get -ENOLCK back the lock may have
1162                            already been removed. Don't exit in this case. */
1163                         if (rc && rc != -ENOLCK) {
1164                                 cifs_delete_mid(midQ);
1165                                 return rc;
1166                         }
1167                 }
1168 
1169                 rc = wait_for_response(ses->server, midQ);
1170                 if (rc) {
1171                         send_cancel(ses->server, &rqst, midQ);
1172                         spin_lock(&GlobalMid_Lock);
1173                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1174                                 /* no longer considered to be "in-flight" */
1175                                 midQ->callback = DeleteMidQEntry;
1176                                 spin_unlock(&GlobalMid_Lock);
1177                                 return rc;
1178                         }
1179                         spin_unlock(&GlobalMid_Lock);
1180                 }
1181 
1182                 /* We got the response - restart system call. */
1183                 rstart = 1;
1184         }
1185 
1186         rc = cifs_sync_mid_result(midQ, ses->server);
1187         if (rc != 0)
1188                 return rc;
1189 
1190         /* rcvd frame is ok */
1191         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1192                 rc = -EIO;
1193                 cifs_dbg(VFS, "Bad MID state?\n");
1194                 goto out;
1195         }
1196 
1197         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1198         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1199         rc = cifs_check_receive(midQ, ses->server, 0);
1200 out:
1201         cifs_delete_mid(midQ);
1202         if (rstart && rc == -EACCES)
1203                 return -ERESTARTSYS;
1204         return rc;
1205 }
1206 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp