1 /* 2 * fs/cifs/misc.c 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * This library is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU Lesser General Public License as published 9 * by the Free Software Foundation; either version 2.1 of the License, or 10 * (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 15 * the GNU Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public License 18 * along with this library; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/slab.h> 23 #include <linux/ctype.h> 24 #include <linux/mempool.h> 25 #include <linux/vmalloc.h> 26 #include "cifspdu.h" 27 #include "cifsglob.h" 28 #include "cifsproto.h" 29 #include "cifs_debug.h" 30 #include "smberr.h" 31 #include "nterr.h" 32 #include "cifs_unicode.h" 33 #include "smb2pdu.h" 34 35 extern mempool_t *cifs_sm_req_poolp; 36 extern mempool_t *cifs_req_poolp; 37 38 /* The xid serves as a useful identifier for each incoming vfs request, 39 in a similar way to the mid which is useful to track each sent smb, 40 and CurrentXid can also provide a running counter (although it 41 will eventually wrap past zero) of the total vfs operations handled 42 since the cifs fs was mounted */ 43 44 unsigned int 45 _get_xid(void) 46 { 47 unsigned int xid; 48 49 spin_lock(&GlobalMid_Lock); 50 GlobalTotalActiveXid++; 51 52 /* keep high water mark for number of simultaneous ops in filesystem */ 53 if (GlobalTotalActiveXid > GlobalMaxActiveXid) 54 GlobalMaxActiveXid = GlobalTotalActiveXid; 55 if (GlobalTotalActiveXid > 65000) 56 cifs_dbg(FYI, "warning: more than 65000 requests active\n"); 57 xid = GlobalCurrentXid++; 58 spin_unlock(&GlobalMid_Lock); 59 return xid; 60 } 61 62 void 63 _free_xid(unsigned int xid) 64 { 65 spin_lock(&GlobalMid_Lock); 66 /* if (GlobalTotalActiveXid == 0) 67 BUG(); */ 68 GlobalTotalActiveXid--; 69 spin_unlock(&GlobalMid_Lock); 70 } 71 72 struct cifs_ses * 73 sesInfoAlloc(void) 74 { 75 struct cifs_ses *ret_buf; 76 77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); 78 if (ret_buf) { 79 atomic_inc(&sesInfoAllocCount); 80 ret_buf->status = CifsNew; 81 ++ret_buf->ses_count; 82 INIT_LIST_HEAD(&ret_buf->smb_ses_list); 83 INIT_LIST_HEAD(&ret_buf->tcon_list); 84 mutex_init(&ret_buf->session_mutex); 85 spin_lock_init(&ret_buf->iface_lock); 86 } 87 return ret_buf; 88 } 89 90 void 91 sesInfoFree(struct cifs_ses *buf_to_free) 92 { 93 if (buf_to_free == NULL) { 94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); 95 return; 96 } 97 98 atomic_dec(&sesInfoAllocCount); 99 kfree(buf_to_free->serverOS); 100 kfree(buf_to_free->serverDomain); 101 kfree(buf_to_free->serverNOS); 102 kzfree(buf_to_free->password); 103 kfree(buf_to_free->user_name); 104 kfree(buf_to_free->domainName); 105 kzfree(buf_to_free->auth_key.response); 106 kfree(buf_to_free->iface_list); 107 kzfree(buf_to_free); 108 } 109 110 struct cifs_tcon * 111 tconInfoAlloc(void) 112 { 113 struct cifs_tcon *ret_buf; 114 115 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); 116 if (!ret_buf) 117 return NULL; 118 ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL); 119 if (!ret_buf->crfid.fid) { 120 kfree(ret_buf); 121 return NULL; 122 } 123 124 atomic_inc(&tconInfoAllocCount); 125 ret_buf->tidStatus = CifsNew; 126 ++ret_buf->tc_count; 127 INIT_LIST_HEAD(&ret_buf->openFileList); 128 INIT_LIST_HEAD(&ret_buf->tcon_list); 129 spin_lock_init(&ret_buf->open_file_lock); 130 mutex_init(&ret_buf->crfid.fid_mutex); 131 spin_lock_init(&ret_buf->stat_lock); 132 atomic_set(&ret_buf->num_local_opens, 0); 133 atomic_set(&ret_buf->num_remote_opens, 0); 134 135 return ret_buf; 136 } 137 138 void 139 tconInfoFree(struct cifs_tcon *buf_to_free) 140 { 141 if (buf_to_free == NULL) { 142 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); 143 return; 144 } 145 atomic_dec(&tconInfoAllocCount); 146 kfree(buf_to_free->nativeFileSystem); 147 kzfree(buf_to_free->password); 148 kfree(buf_to_free->crfid.fid); 149 #ifdef CONFIG_CIFS_DFS_UPCALL 150 kfree(buf_to_free->dfs_path); 151 #endif 152 kfree(buf_to_free); 153 } 154 155 struct smb_hdr * 156 cifs_buf_get(void) 157 { 158 struct smb_hdr *ret_buf = NULL; 159 /* 160 * SMB2 header is bigger than CIFS one - no problems to clean some 161 * more bytes for CIFS. 162 */ 163 size_t buf_size = sizeof(struct smb2_sync_hdr); 164 165 /* 166 * We could use negotiated size instead of max_msgsize - 167 * but it may be more efficient to always alloc same size 168 * albeit slightly larger than necessary and maxbuffersize 169 * defaults to this and can not be bigger. 170 */ 171 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); 172 173 /* clear the first few header bytes */ 174 /* for most paths, more is cleared in header_assemble */ 175 memset(ret_buf, 0, buf_size + 3); 176 atomic_inc(&bufAllocCount); 177 #ifdef CONFIG_CIFS_STATS2 178 atomic_inc(&totBufAllocCount); 179 #endif /* CONFIG_CIFS_STATS2 */ 180 181 return ret_buf; 182 } 183 184 void 185 cifs_buf_release(void *buf_to_free) 186 { 187 if (buf_to_free == NULL) { 188 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/ 189 return; 190 } 191 mempool_free(buf_to_free, cifs_req_poolp); 192 193 atomic_dec(&bufAllocCount); 194 return; 195 } 196 197 struct smb_hdr * 198 cifs_small_buf_get(void) 199 { 200 struct smb_hdr *ret_buf = NULL; 201 202 /* We could use negotiated size instead of max_msgsize - 203 but it may be more efficient to always alloc same size 204 albeit slightly larger than necessary and maxbuffersize 205 defaults to this and can not be bigger */ 206 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); 207 /* No need to clear memory here, cleared in header assemble */ 208 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ 209 atomic_inc(&smBufAllocCount); 210 #ifdef CONFIG_CIFS_STATS2 211 atomic_inc(&totSmBufAllocCount); 212 #endif /* CONFIG_CIFS_STATS2 */ 213 214 return ret_buf; 215 } 216 217 void 218 cifs_small_buf_release(void *buf_to_free) 219 { 220 221 if (buf_to_free == NULL) { 222 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n"); 223 return; 224 } 225 mempool_free(buf_to_free, cifs_sm_req_poolp); 226 227 atomic_dec(&smBufAllocCount); 228 return; 229 } 230 231 void 232 free_rsp_buf(int resp_buftype, void *rsp) 233 { 234 if (resp_buftype == CIFS_SMALL_BUFFER) 235 cifs_small_buf_release(rsp); 236 else if (resp_buftype == CIFS_LARGE_BUFFER) 237 cifs_buf_release(rsp); 238 } 239 240 /* NB: MID can not be set if treeCon not passed in, in that 241 case it is responsbility of caller to set the mid */ 242 void 243 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , 244 const struct cifs_tcon *treeCon, int word_count 245 /* length of fixed section (word count) in two byte units */) 246 { 247 char *temp = (char *) buffer; 248 249 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ 250 251 buffer->smb_buf_length = cpu_to_be32( 252 (2 * word_count) + sizeof(struct smb_hdr) - 253 4 /* RFC 1001 length field does not count */ + 254 2 /* for bcc field itself */) ; 255 256 buffer->Protocol[0] = 0xFF; 257 buffer->Protocol[1] = 'S'; 258 buffer->Protocol[2] = 'M'; 259 buffer->Protocol[3] = 'B'; 260 buffer->Command = smb_command; 261 buffer->Flags = 0x00; /* case sensitive */ 262 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES; 263 buffer->Pid = cpu_to_le16((__u16)current->tgid); 264 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16)); 265 if (treeCon) { 266 buffer->Tid = treeCon->tid; 267 if (treeCon->ses) { 268 if (treeCon->ses->capabilities & CAP_UNICODE) 269 buffer->Flags2 |= SMBFLG2_UNICODE; 270 if (treeCon->ses->capabilities & CAP_STATUS32) 271 buffer->Flags2 |= SMBFLG2_ERR_STATUS; 272 273 /* Uid is not converted */ 274 buffer->Uid = treeCon->ses->Suid; 275 buffer->Mid = get_next_mid(treeCon->ses->server); 276 } 277 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) 278 buffer->Flags2 |= SMBFLG2_DFS; 279 if (treeCon->nocase) 280 buffer->Flags |= SMBFLG_CASELESS; 281 if ((treeCon->ses) && (treeCon->ses->server)) 282 if (treeCon->ses->server->sign) 283 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 284 } 285 286 /* endian conversion of flags is now done just before sending */ 287 buffer->WordCount = (char) word_count; 288 return; 289 } 290 291 static int 292 check_smb_hdr(struct smb_hdr *smb) 293 { 294 /* does it have the right SMB "signature" ? */ 295 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { 296 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n", 297 *(unsigned int *)smb->Protocol); 298 return 1; 299 } 300 301 /* if it's a response then accept */ 302 if (smb->Flags & SMBFLG_RESPONSE) 303 return 0; 304 305 /* only one valid case where server sends us request */ 306 if (smb->Command == SMB_COM_LOCKING_ANDX) 307 return 0; 308 309 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", 310 get_mid(smb)); 311 return 1; 312 } 313 314 int 315 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server) 316 { 317 struct smb_hdr *smb = (struct smb_hdr *)buf; 318 __u32 rfclen = be32_to_cpu(smb->smb_buf_length); 319 __u32 clc_len; /* calculated length */ 320 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n", 321 total_read, rfclen); 322 323 /* is this frame too small to even get to a BCC? */ 324 if (total_read < 2 + sizeof(struct smb_hdr)) { 325 if ((total_read >= sizeof(struct smb_hdr) - 1) 326 && (smb->Status.CifsError != 0)) { 327 /* it's an error return */ 328 smb->WordCount = 0; 329 /* some error cases do not return wct and bcc */ 330 return 0; 331 } else if ((total_read == sizeof(struct smb_hdr) + 1) && 332 (smb->WordCount == 0)) { 333 char *tmp = (char *)smb; 334 /* Need to work around a bug in two servers here */ 335 /* First, check if the part of bcc they sent was zero */ 336 if (tmp[sizeof(struct smb_hdr)] == 0) { 337 /* some servers return only half of bcc 338 * on simple responses (wct, bcc both zero) 339 * in particular have seen this on 340 * ulogoffX and FindClose. This leaves 341 * one byte of bcc potentially unitialized 342 */ 343 /* zero rest of bcc */ 344 tmp[sizeof(struct smb_hdr)+1] = 0; 345 return 0; 346 } 347 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n"); 348 } else { 349 cifs_dbg(VFS, "Length less than smb header size\n"); 350 } 351 return -EIO; 352 } 353 354 /* otherwise, there is enough to get to the BCC */ 355 if (check_smb_hdr(smb)) 356 return -EIO; 357 clc_len = smbCalcSize(smb, server); 358 359 if (4 + rfclen != total_read) { 360 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", 361 rfclen); 362 return -EIO; 363 } 364 365 if (4 + rfclen != clc_len) { 366 __u16 mid = get_mid(smb); 367 /* check if bcc wrapped around for large read responses */ 368 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) { 369 /* check if lengths match mod 64K */ 370 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF)) 371 return 0; /* bcc wrapped */ 372 } 373 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n", 374 clc_len, 4 + rfclen, mid); 375 376 if (4 + rfclen < clc_len) { 377 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n", 378 rfclen, mid); 379 return -EIO; 380 } else if (rfclen > clc_len + 512) { 381 /* 382 * Some servers (Windows XP in particular) send more 383 * data than the lengths in the SMB packet would 384 * indicate on certain calls (byte range locks and 385 * trans2 find first calls in particular). While the 386 * client can handle such a frame by ignoring the 387 * trailing data, we choose limit the amount of extra 388 * data to 512 bytes. 389 */ 390 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n", 391 rfclen, mid); 392 return -EIO; 393 } 394 } 395 return 0; 396 } 397 398 bool 399 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) 400 { 401 struct smb_hdr *buf = (struct smb_hdr *)buffer; 402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 403 struct list_head *tmp, *tmp1, *tmp2; 404 struct cifs_ses *ses; 405 struct cifs_tcon *tcon; 406 struct cifsInodeInfo *pCifsInode; 407 struct cifsFileInfo *netfile; 408 409 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n"); 410 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && 411 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { 412 struct smb_com_transaction_change_notify_rsp *pSMBr = 413 (struct smb_com_transaction_change_notify_rsp *)buf; 414 struct file_notify_information *pnotify; 415 __u32 data_offset = 0; 416 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length); 417 418 if (get_bcc(buf) > sizeof(struct file_notify_information)) { 419 data_offset = le32_to_cpu(pSMBr->DataOffset); 420 421 if (data_offset > 422 len - sizeof(struct file_notify_information)) { 423 cifs_dbg(FYI, "invalid data_offset %u\n", 424 data_offset); 425 return true; 426 } 427 pnotify = (struct file_notify_information *) 428 ((char *)&pSMBr->hdr.Protocol + data_offset); 429 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", 430 pnotify->FileName, pnotify->Action); 431 /* cifs_dump_mem("Rcvd notify Data: ",buf, 432 sizeof(struct smb_hdr)+60); */ 433 return true; 434 } 435 if (pSMBr->hdr.Status.CifsError) { 436 cifs_dbg(FYI, "notify err 0x%x\n", 437 pSMBr->hdr.Status.CifsError); 438 return true; 439 } 440 return false; 441 } 442 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) 443 return false; 444 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { 445 /* no sense logging error on invalid handle on oplock 446 break - harmless race between close request and oplock 447 break response is expected from time to time writing out 448 large dirty files cached on the client */ 449 if ((NT_STATUS_INVALID_HANDLE) == 450 le32_to_cpu(pSMB->hdr.Status.CifsError)) { 451 cifs_dbg(FYI, "invalid handle on oplock break\n"); 452 return true; 453 } else if (ERRbadfid == 454 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { 455 return true; 456 } else { 457 return false; /* on valid oplock brk we get "request" */ 458 } 459 } 460 if (pSMB->hdr.WordCount != 8) 461 return false; 462 463 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n", 464 pSMB->LockType, pSMB->OplockLevel); 465 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) 466 return false; 467 468 /* look up tcon based on tid & uid */ 469 spin_lock(&cifs_tcp_ses_lock); 470 list_for_each(tmp, &srv->smb_ses_list) { 471 ses = list_entry(tmp, struct cifs_ses, smb_ses_list); 472 list_for_each(tmp1, &ses->tcon_list) { 473 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); 474 if (tcon->tid != buf->Tid) 475 continue; 476 477 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); 478 spin_lock(&tcon->open_file_lock); 479 list_for_each(tmp2, &tcon->openFileList) { 480 netfile = list_entry(tmp2, struct cifsFileInfo, 481 tlist); 482 if (pSMB->Fid != netfile->fid.netfid) 483 continue; 484 485 cifs_dbg(FYI, "file id match, oplock break\n"); 486 pCifsInode = CIFS_I(d_inode(netfile->dentry)); 487 488 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, 489 &pCifsInode->flags); 490 491 /* 492 * Set flag if the server downgrades the oplock 493 * to L2 else clear. 494 */ 495 if (pSMB->OplockLevel) 496 set_bit( 497 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, 498 &pCifsInode->flags); 499 else 500 clear_bit( 501 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, 502 &pCifsInode->flags); 503 504 cifs_queue_oplock_break(netfile); 505 netfile->oplock_break_cancelled = false; 506 507 spin_unlock(&tcon->open_file_lock); 508 spin_unlock(&cifs_tcp_ses_lock); 509 return true; 510 } 511 spin_unlock(&tcon->open_file_lock); 512 spin_unlock(&cifs_tcp_ses_lock); 513 cifs_dbg(FYI, "No matching file for oplock break\n"); 514 return true; 515 } 516 } 517 spin_unlock(&cifs_tcp_ses_lock); 518 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 519 return true; 520 } 521 522 void 523 dump_smb(void *buf, int smb_buf_length) 524 { 525 if (traceSMB == 0) 526 return; 527 528 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf, 529 smb_buf_length, true); 530 } 531 532 void 533 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) 534 { 535 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 536 struct cifs_tcon *tcon = NULL; 537 538 if (cifs_sb->master_tlink) 539 tcon = cifs_sb_master_tcon(cifs_sb); 540 541 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; 542 cifs_sb->mnt_cifs_serverino_autodisabled = true; 543 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n", 544 tcon ? tcon->treeName : "new server"); 545 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n"); 546 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n"); 547 548 } 549 } 550 551 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) 552 { 553 oplock &= 0xF; 554 555 if (oplock == OPLOCK_EXCLUSIVE) { 556 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; 557 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", 558 &cinode->vfs_inode); 559 } else if (oplock == OPLOCK_READ) { 560 cinode->oplock = CIFS_CACHE_READ_FLG; 561 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", 562 &cinode->vfs_inode); 563 } else 564 cinode->oplock = 0; 565 } 566 567 /* 568 * We wait for oplock breaks to be processed before we attempt to perform 569 * writes. 570 */ 571 int cifs_get_writer(struct cifsInodeInfo *cinode) 572 { 573 int rc; 574 575 start: 576 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, 577 TASK_KILLABLE); 578 if (rc) 579 return rc; 580 581 spin_lock(&cinode->writers_lock); 582 if (!cinode->writers) 583 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 584 cinode->writers++; 585 /* Check to see if we have started servicing an oplock break */ 586 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) { 587 cinode->writers--; 588 if (cinode->writers == 0) { 589 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 590 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); 591 } 592 spin_unlock(&cinode->writers_lock); 593 goto start; 594 } 595 spin_unlock(&cinode->writers_lock); 596 return 0; 597 } 598 599 void cifs_put_writer(struct cifsInodeInfo *cinode) 600 { 601 spin_lock(&cinode->writers_lock); 602 cinode->writers--; 603 if (cinode->writers == 0) { 604 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); 605 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); 606 } 607 spin_unlock(&cinode->writers_lock); 608 } 609 610 /** 611 * cifs_queue_oplock_break - queue the oplock break handler for cfile 612 * 613 * This function is called from the demultiplex thread when it 614 * receives an oplock break for @cfile. 615 * 616 * Assumes the tcon->open_file_lock is held. 617 * Assumes cfile->file_info_lock is NOT held. 618 */ 619 void cifs_queue_oplock_break(struct cifsFileInfo *cfile) 620 { 621 /* 622 * Bump the handle refcount now while we hold the 623 * open_file_lock to enforce the validity of it for the oplock 624 * break handler. The matching put is done at the end of the 625 * handler. 626 */ 627 cifsFileInfo_get(cfile); 628 629 queue_work(cifsoplockd_wq, &cfile->oplock_break); 630 } 631 632 void cifs_done_oplock_break(struct cifsInodeInfo *cinode) 633 { 634 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); 635 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK); 636 } 637 638 bool 639 backup_cred(struct cifs_sb_info *cifs_sb) 640 { 641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) { 642 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid())) 643 return true; 644 } 645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) { 646 if (in_group_p(cifs_sb->mnt_backupgid)) 647 return true; 648 } 649 650 return false; 651 } 652 653 void 654 cifs_del_pending_open(struct cifs_pending_open *open) 655 { 656 spin_lock(&tlink_tcon(open->tlink)->open_file_lock); 657 list_del(&open->olist); 658 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); 659 } 660 661 void 662 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, 663 struct cifs_pending_open *open) 664 { 665 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); 666 open->oplock = CIFS_OPLOCK_NO_CHANGE; 667 open->tlink = tlink; 668 fid->pending_open = open; 669 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens); 670 } 671 672 void 673 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, 674 struct cifs_pending_open *open) 675 { 676 spin_lock(&tlink_tcon(tlink)->open_file_lock); 677 cifs_add_pending_open_locked(fid, tlink, open); 678 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); 679 } 680 681 /* parses DFS refferal V3 structure 682 * caller is responsible for freeing target_nodes 683 * returns: 684 * - on success - 0 685 * - on failure - errno 686 */ 687 int 688 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, 689 unsigned int *num_of_nodes, 690 struct dfs_info3_param **target_nodes, 691 const struct nls_table *nls_codepage, int remap, 692 const char *searchName, bool is_unicode) 693 { 694 int i, rc = 0; 695 char *data_end; 696 struct dfs_referral_level_3 *ref; 697 698 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); 699 700 if (*num_of_nodes < 1) { 701 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", 702 *num_of_nodes); 703 rc = -EINVAL; 704 goto parse_DFS_referrals_exit; 705 } 706 707 ref = (struct dfs_referral_level_3 *) &(rsp->referrals); 708 if (ref->VersionNumber != cpu_to_le16(3)) { 709 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n", 710 le16_to_cpu(ref->VersionNumber)); 711 rc = -EINVAL; 712 goto parse_DFS_referrals_exit; 713 } 714 715 /* get the upper boundary of the resp buffer */ 716 data_end = (char *)rsp + rsp_size; 717 718 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n", 719 *num_of_nodes, le32_to_cpu(rsp->DFSFlags)); 720 721 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param), 722 GFP_KERNEL); 723 if (*target_nodes == NULL) { 724 rc = -ENOMEM; 725 goto parse_DFS_referrals_exit; 726 } 727 728 /* collect necessary data from referrals */ 729 for (i = 0; i < *num_of_nodes; i++) { 730 char *temp; 731 int max_len; 732 struct dfs_info3_param *node = (*target_nodes)+i; 733 734 node->flags = le32_to_cpu(rsp->DFSFlags); 735 if (is_unicode) { 736 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, 737 GFP_KERNEL); 738 if (tmp == NULL) { 739 rc = -ENOMEM; 740 goto parse_DFS_referrals_exit; 741 } 742 cifsConvertToUTF16((__le16 *) tmp, searchName, 743 PATH_MAX, nls_codepage, remap); 744 node->path_consumed = cifs_utf16_bytes(tmp, 745 le16_to_cpu(rsp->PathConsumed), 746 nls_codepage); 747 kfree(tmp); 748 } else 749 node->path_consumed = le16_to_cpu(rsp->PathConsumed); 750 751 node->server_type = le16_to_cpu(ref->ServerType); 752 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); 753 754 /* copy DfsPath */ 755 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); 756 max_len = data_end - temp; 757 node->path_name = cifs_strndup_from_utf16(temp, max_len, 758 is_unicode, nls_codepage); 759 if (!node->path_name) { 760 rc = -ENOMEM; 761 goto parse_DFS_referrals_exit; 762 } 763 764 /* copy link target UNC */ 765 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); 766 max_len = data_end - temp; 767 node->node_name = cifs_strndup_from_utf16(temp, max_len, 768 is_unicode, nls_codepage); 769 if (!node->node_name) { 770 rc = -ENOMEM; 771 goto parse_DFS_referrals_exit; 772 } 773 774 node->ttl = le32_to_cpu(ref->TimeToLive); 775 776 ref++; 777 } 778 779 parse_DFS_referrals_exit: 780 if (rc) { 781 free_dfs_info_array(*target_nodes, *num_of_nodes); 782 *target_nodes = NULL; 783 *num_of_nodes = 0; 784 } 785 return rc; 786 } 787 788 struct cifs_aio_ctx * 789 cifs_aio_ctx_alloc(void) 790 { 791 struct cifs_aio_ctx *ctx; 792 793 /* 794 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io 795 * to false so that we know when we have to unreference pages within 796 * cifs_aio_ctx_release() 797 */ 798 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); 799 if (!ctx) 800 return NULL; 801 802 INIT_LIST_HEAD(&ctx->list); 803 mutex_init(&ctx->aio_mutex); 804 init_completion(&ctx->done); 805 kref_init(&ctx->refcount); 806 return ctx; 807 } 808 809 void 810 cifs_aio_ctx_release(struct kref *refcount) 811 { 812 struct cifs_aio_ctx *ctx = container_of(refcount, 813 struct cifs_aio_ctx, refcount); 814 815 cifsFileInfo_put(ctx->cfile); 816 817 /* 818 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly 819 * which means that iov_iter_get_pages() was a success and thus that 820 * we have taken reference on pages. 821 */ 822 if (ctx->bv) { 823 unsigned i; 824 825 for (i = 0; i < ctx->npages; i++) { 826 if (ctx->should_dirty) 827 set_page_dirty(ctx->bv[i].bv_page); 828 put_page(ctx->bv[i].bv_page); 829 } 830 kvfree(ctx->bv); 831 } 832 833 kfree(ctx); 834 } 835 836 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024) 837 838 int 839 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) 840 { 841 ssize_t rc; 842 unsigned int cur_npages; 843 unsigned int npages = 0; 844 unsigned int i; 845 size_t len; 846 size_t count = iov_iter_count(iter); 847 unsigned int saved_len; 848 size_t start; 849 unsigned int max_pages = iov_iter_npages(iter, INT_MAX); 850 struct page **pages = NULL; 851 struct bio_vec *bv = NULL; 852 853 if (iov_iter_is_kvec(iter)) { 854 memcpy(&ctx->iter, iter, sizeof(struct iov_iter)); 855 ctx->len = count; 856 iov_iter_advance(iter, count); 857 return 0; 858 } 859 860 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT) 861 bv = kmalloc_array(max_pages, sizeof(struct bio_vec), 862 GFP_KERNEL); 863 864 if (!bv) { 865 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec))); 866 if (!bv) 867 return -ENOMEM; 868 } 869 870 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT) 871 pages = kmalloc_array(max_pages, sizeof(struct page *), 872 GFP_KERNEL); 873 874 if (!pages) { 875 pages = vmalloc(array_size(max_pages, sizeof(struct page *))); 876 if (!pages) { 877 kvfree(bv); 878 return -ENOMEM; 879 } 880 } 881 882 saved_len = count; 883 884 while (count && npages < max_pages) { 885 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); 886 if (rc < 0) { 887 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc); 888 break; 889 } 890 891 if (rc > count) { 892 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc, 893 count); 894 break; 895 } 896 897 iov_iter_advance(iter, rc); 898 count -= rc; 899 rc += start; 900 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE); 901 902 if (npages + cur_npages > max_pages) { 903 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n", 904 npages + cur_npages, max_pages); 905 break; 906 } 907 908 for (i = 0; i < cur_npages; i++) { 909 len = rc > PAGE_SIZE ? PAGE_SIZE : rc; 910 bv[npages + i].bv_page = pages[i]; 911 bv[npages + i].bv_offset = start; 912 bv[npages + i].bv_len = len - start; 913 rc -= len; 914 start = 0; 915 } 916 917 npages += cur_npages; 918 } 919 920 kvfree(pages); 921 ctx->bv = bv; 922 ctx->len = saved_len - count; 923 ctx->npages = npages; 924 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); 925 return 0; 926 } 927 928 /** 929 * cifs_alloc_hash - allocate hash and hash context together 930 * 931 * The caller has to make sure @sdesc is initialized to either NULL or 932 * a valid context. Both can be freed via cifs_free_hash(). 933 */ 934 int 935 cifs_alloc_hash(const char *name, 936 struct crypto_shash **shash, struct sdesc **sdesc) 937 { 938 int rc = 0; 939 size_t size; 940 941 if (*sdesc != NULL) 942 return 0; 943 944 *shash = crypto_alloc_shash(name, 0, 0); 945 if (IS_ERR(*shash)) { 946 cifs_dbg(VFS, "could not allocate crypto %s\n", name); 947 rc = PTR_ERR(*shash); 948 *shash = NULL; 949 *sdesc = NULL; 950 return rc; 951 } 952 953 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash); 954 *sdesc = kmalloc(size, GFP_KERNEL); 955 if (*sdesc == NULL) { 956 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name); 957 crypto_free_shash(*shash); 958 *shash = NULL; 959 return -ENOMEM; 960 } 961 962 (*sdesc)->shash.tfm = *shash; 963 return 0; 964 } 965 966 /** 967 * cifs_free_hash - free hash and hash context together 968 * 969 * Freeing a NULL hash or context is safe. 970 */ 971 void 972 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc) 973 { 974 kfree(*sdesc); 975 *sdesc = NULL; 976 if (*shash) 977 crypto_free_shash(*shash); 978 *shash = NULL; 979 } 980 981 /** 982 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst 983 * Input: rqst - a smb_rqst, page - a page index for rqst 984 * Output: *len - the length for this page, *offset - the offset for this page 985 */ 986 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, 987 unsigned int *len, unsigned int *offset) 988 { 989 *len = rqst->rq_pagesz; 990 *offset = (page == 0) ? rqst->rq_offset : 0; 991 992 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1) 993 *len = rqst->rq_tailsz; 994 else if (page == 0) 995 *len = rqst->rq_pagesz - rqst->rq_offset; 996 } 997 998 void extract_unc_hostname(const char *unc, const char **h, size_t *len) 999 { 1000 const char *end; 1001 1002 /* skip initial slashes */ 1003 while (*unc && (*unc == '\\' || *unc == '/')) 1004 unc++; 1005 1006 end = unc; 1007 1008 while (*end && !(*end == '\\' || *end == '/')) 1009 end++; 1010 1011 *h = unc; 1012 *len = end - unc; 1013 } 1014 1015 /** 1016 * copy_path_name - copy src path to dst, possibly truncating 1017 * 1018 * returns number of bytes written (including trailing nul) 1019 */ 1020 int copy_path_name(char *dst, const char *src) 1021 { 1022 int name_len; 1023 1024 /* 1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it 1026 * will truncate and strlen(dst) will be PATH_MAX-1 1027 */ 1028 name_len = strscpy(dst, src, PATH_MAX); 1029 if (WARN_ON_ONCE(name_len < 0)) 1030 name_len = PATH_MAX-1; 1031 1032 /* we count the trailing nul */ 1033 name_len++; 1034 return name_len; 1035 } 1036
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.