~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/cifs/file.c

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *   fs/cifs/file.c
  3  *
  4  *   vfs operations that deal with files
  5  *
  6  *   Copyright (C) International Business Machines  Corp., 2002,2010
  7  *   Author(s): Steve French (sfrench@us.ibm.com)
  8  *              Jeremy Allison (jra@samba.org)
  9  *
 10  *   This library is free software; you can redistribute it and/or modify
 11  *   it under the terms of the GNU Lesser General Public License as published
 12  *   by the Free Software Foundation; either version 2.1 of the License, or
 13  *   (at your option) any later version.
 14  *
 15  *   This library is distributed in the hope that it will be useful,
 16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 18  *   the GNU Lesser General Public License for more details.
 19  *
 20  *   You should have received a copy of the GNU Lesser General Public License
 21  *   along with this library; if not, write to the Free Software
 22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 23  */
 24 #include <linux/fs.h>
 25 #include <linux/backing-dev.h>
 26 #include <linux/stat.h>
 27 #include <linux/fcntl.h>
 28 #include <linux/pagemap.h>
 29 #include <linux/pagevec.h>
 30 #include <linux/writeback.h>
 31 #include <linux/task_io_accounting_ops.h>
 32 #include <linux/delay.h>
 33 #include <linux/mount.h>
 34 #include <linux/slab.h>
 35 #include <linux/swap.h>
 36 #include <asm/div64.h>
 37 #include "cifsfs.h"
 38 #include "cifspdu.h"
 39 #include "cifsglob.h"
 40 #include "cifsproto.h"
 41 #include "cifs_unicode.h"
 42 #include "cifs_debug.h"
 43 #include "cifs_fs_sb.h"
 44 #include "fscache.h"
 45 #include "smbdirect.h"
 46 
 47 static inline int cifs_convert_flags(unsigned int flags)
 48 {
 49         if ((flags & O_ACCMODE) == O_RDONLY)
 50                 return GENERIC_READ;
 51         else if ((flags & O_ACCMODE) == O_WRONLY)
 52                 return GENERIC_WRITE;
 53         else if ((flags & O_ACCMODE) == O_RDWR) {
 54                 /* GENERIC_ALL is too much permission to request
 55                    can cause unnecessary access denied on create */
 56                 /* return GENERIC_ALL; */
 57                 return (GENERIC_READ | GENERIC_WRITE);
 58         }
 59 
 60         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
 61                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
 62                 FILE_READ_DATA);
 63 }
 64 
 65 static u32 cifs_posix_convert_flags(unsigned int flags)
 66 {
 67         u32 posix_flags = 0;
 68 
 69         if ((flags & O_ACCMODE) == O_RDONLY)
 70                 posix_flags = SMB_O_RDONLY;
 71         else if ((flags & O_ACCMODE) == O_WRONLY)
 72                 posix_flags = SMB_O_WRONLY;
 73         else if ((flags & O_ACCMODE) == O_RDWR)
 74                 posix_flags = SMB_O_RDWR;
 75 
 76         if (flags & O_CREAT) {
 77                 posix_flags |= SMB_O_CREAT;
 78                 if (flags & O_EXCL)
 79                         posix_flags |= SMB_O_EXCL;
 80         } else if (flags & O_EXCL)
 81                 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
 82                          current->comm, current->tgid);
 83 
 84         if (flags & O_TRUNC)
 85                 posix_flags |= SMB_O_TRUNC;
 86         /* be safe and imply O_SYNC for O_DSYNC */
 87         if (flags & O_DSYNC)
 88                 posix_flags |= SMB_O_SYNC;
 89         if (flags & O_DIRECTORY)
 90                 posix_flags |= SMB_O_DIRECTORY;
 91         if (flags & O_NOFOLLOW)
 92                 posix_flags |= SMB_O_NOFOLLOW;
 93         if (flags & O_DIRECT)
 94                 posix_flags |= SMB_O_DIRECT;
 95 
 96         return posix_flags;
 97 }
 98 
 99 static inline int cifs_get_disposition(unsigned int flags)
100 {
101         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102                 return FILE_CREATE;
103         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104                 return FILE_OVERWRITE_IF;
105         else if ((flags & O_CREAT) == O_CREAT)
106                 return FILE_OPEN_IF;
107         else if ((flags & O_TRUNC) == O_TRUNC)
108                 return FILE_OVERWRITE;
109         else
110                 return FILE_OPEN;
111 }
112 
113 int cifs_posix_open(char *full_path, struct inode **pinode,
114                         struct super_block *sb, int mode, unsigned int f_flags,
115                         __u32 *poplock, __u16 *pnetfid, unsigned int xid)
116 {
117         int rc;
118         FILE_UNIX_BASIC_INFO *presp_data;
119         __u32 posix_flags = 0;
120         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121         struct cifs_fattr fattr;
122         struct tcon_link *tlink;
123         struct cifs_tcon *tcon;
124 
125         cifs_dbg(FYI, "posix open %s\n", full_path);
126 
127         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128         if (presp_data == NULL)
129                 return -ENOMEM;
130 
131         tlink = cifs_sb_tlink(cifs_sb);
132         if (IS_ERR(tlink)) {
133                 rc = PTR_ERR(tlink);
134                 goto posix_open_ret;
135         }
136 
137         tcon = tlink_tcon(tlink);
138         mode &= ~current_umask();
139 
140         posix_flags = cifs_posix_convert_flags(f_flags);
141         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142                              poplock, full_path, cifs_sb->local_nls,
143                              cifs_remap(cifs_sb));
144         cifs_put_tlink(tlink);
145 
146         if (rc)
147                 goto posix_open_ret;
148 
149         if (presp_data->Type == cpu_to_le32(-1))
150                 goto posix_open_ret; /* open ok, caller does qpathinfo */
151 
152         if (!pinode)
153                 goto posix_open_ret; /* caller does not need info */
154 
155         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156 
157         /* get new inode and set it up */
158         if (*pinode == NULL) {
159                 cifs_fill_uniqueid(sb, &fattr);
160                 *pinode = cifs_iget(sb, &fattr);
161                 if (!*pinode) {
162                         rc = -ENOMEM;
163                         goto posix_open_ret;
164                 }
165         } else {
166                 cifs_fattr_to_inode(*pinode, &fattr);
167         }
168 
169 posix_open_ret:
170         kfree(presp_data);
171         return rc;
172 }
173 
174 static int
175 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
176              struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177              struct cifs_fid *fid, unsigned int xid)
178 {
179         int rc;
180         int desired_access;
181         int disposition;
182         int create_options = CREATE_NOT_DIR;
183         FILE_ALL_INFO *buf;
184         struct TCP_Server_Info *server = tcon->ses->server;
185         struct cifs_open_parms oparms;
186 
187         if (!server->ops->open)
188                 return -ENOSYS;
189 
190         desired_access = cifs_convert_flags(f_flags);
191 
192 /*********************************************************************
193  *  open flag mapping table:
194  *
195  *      POSIX Flag            CIFS Disposition
196  *      ----------            ----------------
197  *      O_CREAT               FILE_OPEN_IF
198  *      O_CREAT | O_EXCL      FILE_CREATE
199  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
200  *      O_TRUNC               FILE_OVERWRITE
201  *      none of the above     FILE_OPEN
202  *
203  *      Note that there is not a direct match between disposition
204  *      FILE_SUPERSEDE (ie create whether or not file exists although
205  *      O_CREAT | O_TRUNC is similar but truncates the existing
206  *      file rather than creating a new file as FILE_SUPERSEDE does
207  *      (which uses the attributes / metadata passed in on open call)
208  *?
209  *?  O_SYNC is a reasonable match to CIFS writethrough flag
210  *?  and the read write flags match reasonably.  O_LARGEFILE
211  *?  is irrelevant because largefile support is always used
212  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214  *********************************************************************/
215 
216         disposition = cifs_get_disposition(f_flags);
217 
218         /* BB pass O_SYNC flag through on file attributes .. BB */
219 
220         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221         if (!buf)
222                 return -ENOMEM;
223 
224         if (backup_cred(cifs_sb))
225                 create_options |= CREATE_OPEN_BACKUP_INTENT;
226 
227         /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228         if (f_flags & O_SYNC)
229                 create_options |= CREATE_WRITE_THROUGH;
230 
231         if (f_flags & O_DIRECT)
232                 create_options |= CREATE_NO_BUFFER;
233 
234         oparms.tcon = tcon;
235         oparms.cifs_sb = cifs_sb;
236         oparms.desired_access = desired_access;
237         oparms.create_options = create_options;
238         oparms.disposition = disposition;
239         oparms.path = full_path;
240         oparms.fid = fid;
241         oparms.reconnect = false;
242 
243         rc = server->ops->open(xid, &oparms, oplock, buf);
244 
245         if (rc)
246                 goto out;
247 
248         if (tcon->unix_ext)
249                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250                                               xid);
251         else
252                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
253                                          xid, fid);
254 
255 out:
256         kfree(buf);
257         return rc;
258 }
259 
260 static bool
261 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262 {
263         struct cifs_fid_locks *cur;
264         bool has_locks = false;
265 
266         down_read(&cinode->lock_sem);
267         list_for_each_entry(cur, &cinode->llist, llist) {
268                 if (!list_empty(&cur->locks)) {
269                         has_locks = true;
270                         break;
271                 }
272         }
273         up_read(&cinode->lock_sem);
274         return has_locks;
275 }
276 
277 struct cifsFileInfo *
278 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
279                   struct tcon_link *tlink, __u32 oplock)
280 {
281         struct dentry *dentry = file_dentry(file);
282         struct inode *inode = d_inode(dentry);
283         struct cifsInodeInfo *cinode = CIFS_I(inode);
284         struct cifsFileInfo *cfile;
285         struct cifs_fid_locks *fdlocks;
286         struct cifs_tcon *tcon = tlink_tcon(tlink);
287         struct TCP_Server_Info *server = tcon->ses->server;
288 
289         cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290         if (cfile == NULL)
291                 return cfile;
292 
293         fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294         if (!fdlocks) {
295                 kfree(cfile);
296                 return NULL;
297         }
298 
299         INIT_LIST_HEAD(&fdlocks->locks);
300         fdlocks->cfile = cfile;
301         cfile->llist = fdlocks;
302         down_write(&cinode->lock_sem);
303         list_add(&fdlocks->llist, &cinode->llist);
304         up_write(&cinode->lock_sem);
305 
306         cfile->count = 1;
307         cfile->pid = current->tgid;
308         cfile->uid = current_fsuid();
309         cfile->dentry = dget(dentry);
310         cfile->f_flags = file->f_flags;
311         cfile->invalidHandle = false;
312         cfile->tlink = cifs_get_tlink(tlink);
313         INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
314         mutex_init(&cfile->fh_mutex);
315         spin_lock_init(&cfile->file_info_lock);
316 
317         cifs_sb_active(inode->i_sb);
318 
319         /*
320          * If the server returned a read oplock and we have mandatory brlocks,
321          * set oplock level to None.
322          */
323         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
324                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
325                 oplock = 0;
326         }
327 
328         spin_lock(&tcon->open_file_lock);
329         if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
330                 oplock = fid->pending_open->oplock;
331         list_del(&fid->pending_open->olist);
332 
333         fid->purge_cache = false;
334         server->ops->set_fid(cfile, fid, oplock);
335 
336         list_add(&cfile->tlist, &tcon->openFileList);
337         atomic_inc(&tcon->num_local_opens);
338 
339         /* if readable file instance put first in list*/
340         if (file->f_mode & FMODE_READ)
341                 list_add(&cfile->flist, &cinode->openFileList);
342         else
343                 list_add_tail(&cfile->flist, &cinode->openFileList);
344         spin_unlock(&tcon->open_file_lock);
345 
346         if (fid->purge_cache)
347                 cifs_zap_mapping(inode);
348 
349         file->private_data = cfile;
350         return cfile;
351 }
352 
353 struct cifsFileInfo *
354 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
355 {
356         spin_lock(&cifs_file->file_info_lock);
357         cifsFileInfo_get_locked(cifs_file);
358         spin_unlock(&cifs_file->file_info_lock);
359         return cifs_file;
360 }
361 
362 /*
363  * Release a reference on the file private data. This may involve closing
364  * the filehandle out on the server. Must be called without holding
365  * tcon->open_file_lock and cifs_file->file_info_lock.
366  */
367 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
368 {
369         struct inode *inode = d_inode(cifs_file->dentry);
370         struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
371         struct TCP_Server_Info *server = tcon->ses->server;
372         struct cifsInodeInfo *cifsi = CIFS_I(inode);
373         struct super_block *sb = inode->i_sb;
374         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
375         struct cifsLockInfo *li, *tmp;
376         struct cifs_fid fid;
377         struct cifs_pending_open open;
378         bool oplock_break_cancelled;
379 
380         spin_lock(&tcon->open_file_lock);
381 
382         spin_lock(&cifs_file->file_info_lock);
383         if (--cifs_file->count > 0) {
384                 spin_unlock(&cifs_file->file_info_lock);
385                 spin_unlock(&tcon->open_file_lock);
386                 return;
387         }
388         spin_unlock(&cifs_file->file_info_lock);
389 
390         if (server->ops->get_lease_key)
391                 server->ops->get_lease_key(inode, &fid);
392 
393         /* store open in pending opens to make sure we don't miss lease break */
394         cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
395 
396         /* remove it from the lists */
397         list_del(&cifs_file->flist);
398         list_del(&cifs_file->tlist);
399         atomic_dec(&tcon->num_local_opens);
400 
401         if (list_empty(&cifsi->openFileList)) {
402                 cifs_dbg(FYI, "closing last open instance for inode %p\n",
403                          d_inode(cifs_file->dentry));
404                 /*
405                  * In strict cache mode we need invalidate mapping on the last
406                  * close  because it may cause a error when we open this file
407                  * again and get at least level II oplock.
408                  */
409                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
410                         set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
411                 cifs_set_oplock_level(cifsi, 0);
412         }
413 
414         spin_unlock(&tcon->open_file_lock);
415 
416         oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
417 
418         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
419                 struct TCP_Server_Info *server = tcon->ses->server;
420                 unsigned int xid;
421 
422                 xid = get_xid();
423                 if (server->ops->close)
424                         server->ops->close(xid, tcon, &cifs_file->fid);
425                 _free_xid(xid);
426         }
427 
428         if (oplock_break_cancelled)
429                 cifs_done_oplock_break(cifsi);
430 
431         cifs_del_pending_open(&open);
432 
433         /*
434          * Delete any outstanding lock records. We'll lose them when the file
435          * is closed anyway.
436          */
437         down_write(&cifsi->lock_sem);
438         list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
439                 list_del(&li->llist);
440                 cifs_del_lock_waiters(li);
441                 kfree(li);
442         }
443         list_del(&cifs_file->llist->llist);
444         kfree(cifs_file->llist);
445         up_write(&cifsi->lock_sem);
446 
447         cifs_put_tlink(cifs_file->tlink);
448         dput(cifs_file->dentry);
449         cifs_sb_deactive(sb);
450         kfree(cifs_file);
451 }
452 
453 int cifs_open(struct inode *inode, struct file *file)
454 
455 {
456         int rc = -EACCES;
457         unsigned int xid;
458         __u32 oplock;
459         struct cifs_sb_info *cifs_sb;
460         struct TCP_Server_Info *server;
461         struct cifs_tcon *tcon;
462         struct tcon_link *tlink;
463         struct cifsFileInfo *cfile = NULL;
464         char *full_path = NULL;
465         bool posix_open_ok = false;
466         struct cifs_fid fid;
467         struct cifs_pending_open open;
468 
469         xid = get_xid();
470 
471         cifs_sb = CIFS_SB(inode->i_sb);
472         tlink = cifs_sb_tlink(cifs_sb);
473         if (IS_ERR(tlink)) {
474                 free_xid(xid);
475                 return PTR_ERR(tlink);
476         }
477         tcon = tlink_tcon(tlink);
478         server = tcon->ses->server;
479 
480         full_path = build_path_from_dentry(file_dentry(file));
481         if (full_path == NULL) {
482                 rc = -ENOMEM;
483                 goto out;
484         }
485 
486         cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
487                  inode, file->f_flags, full_path);
488 
489         if (file->f_flags & O_DIRECT &&
490             cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
491                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
492                         file->f_op = &cifs_file_direct_nobrl_ops;
493                 else
494                         file->f_op = &cifs_file_direct_ops;
495         }
496 
497         if (server->oplocks)
498                 oplock = REQ_OPLOCK;
499         else
500                 oplock = 0;
501 
502         if (!tcon->broken_posix_open && tcon->unix_ext &&
503             cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
504                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
505                 /* can not refresh inode info since size could be stale */
506                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
507                                 cifs_sb->mnt_file_mode /* ignored */,
508                                 file->f_flags, &oplock, &fid.netfid, xid);
509                 if (rc == 0) {
510                         cifs_dbg(FYI, "posix open succeeded\n");
511                         posix_open_ok = true;
512                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
513                         if (tcon->ses->serverNOS)
514                                 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
515                                          tcon->ses->serverName,
516                                          tcon->ses->serverNOS);
517                         tcon->broken_posix_open = true;
518                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
519                          (rc != -EOPNOTSUPP)) /* path not found or net err */
520                         goto out;
521                 /*
522                  * Else fallthrough to retry open the old way on network i/o
523                  * or DFS errors.
524                  */
525         }
526 
527         if (server->ops->get_lease_key)
528                 server->ops->get_lease_key(inode, &fid);
529 
530         cifs_add_pending_open(&fid, tlink, &open);
531 
532         if (!posix_open_ok) {
533                 if (server->ops->get_lease_key)
534                         server->ops->get_lease_key(inode, &fid);
535 
536                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
537                                   file->f_flags, &oplock, &fid, xid);
538                 if (rc) {
539                         cifs_del_pending_open(&open);
540                         goto out;
541                 }
542         }
543 
544         cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
545         if (cfile == NULL) {
546                 if (server->ops->close)
547                         server->ops->close(xid, tcon, &fid);
548                 cifs_del_pending_open(&open);
549                 rc = -ENOMEM;
550                 goto out;
551         }
552 
553         cifs_fscache_set_inode_cookie(inode, file);
554 
555         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
556                 /*
557                  * Time to set mode which we can not set earlier due to
558                  * problems creating new read-only files.
559                  */
560                 struct cifs_unix_set_info_args args = {
561                         .mode   = inode->i_mode,
562                         .uid    = INVALID_UID, /* no change */
563                         .gid    = INVALID_GID, /* no change */
564                         .ctime  = NO_CHANGE_64,
565                         .atime  = NO_CHANGE_64,
566                         .mtime  = NO_CHANGE_64,
567                         .device = 0,
568                 };
569                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
570                                        cfile->pid);
571         }
572 
573 out:
574         kfree(full_path);
575         free_xid(xid);
576         cifs_put_tlink(tlink);
577         return rc;
578 }
579 
580 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
581 
582 /*
583  * Try to reacquire byte range locks that were released when session
584  * to server was lost.
585  */
586 static int
587 cifs_relock_file(struct cifsFileInfo *cfile)
588 {
589         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
590         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
591         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
592         int rc = 0;
593 
594         down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
595         if (cinode->can_cache_brlcks) {
596                 /* can cache locks - no need to relock */
597                 up_read(&cinode->lock_sem);
598                 return rc;
599         }
600 
601         if (cap_unix(tcon->ses) &&
602             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
603             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
604                 rc = cifs_push_posix_locks(cfile);
605         else
606                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
607 
608         up_read(&cinode->lock_sem);
609         return rc;
610 }
611 
612 static int
613 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
614 {
615         int rc = -EACCES;
616         unsigned int xid;
617         __u32 oplock;
618         struct cifs_sb_info *cifs_sb;
619         struct cifs_tcon *tcon;
620         struct TCP_Server_Info *server;
621         struct cifsInodeInfo *cinode;
622         struct inode *inode;
623         char *full_path = NULL;
624         int desired_access;
625         int disposition = FILE_OPEN;
626         int create_options = CREATE_NOT_DIR;
627         struct cifs_open_parms oparms;
628 
629         xid = get_xid();
630         mutex_lock(&cfile->fh_mutex);
631         if (!cfile->invalidHandle) {
632                 mutex_unlock(&cfile->fh_mutex);
633                 rc = 0;
634                 free_xid(xid);
635                 return rc;
636         }
637 
638         inode = d_inode(cfile->dentry);
639         cifs_sb = CIFS_SB(inode->i_sb);
640         tcon = tlink_tcon(cfile->tlink);
641         server = tcon->ses->server;
642 
643         /*
644          * Can not grab rename sem here because various ops, including those
645          * that already have the rename sem can end up causing writepage to get
646          * called and if the server was down that means we end up here, and we
647          * can never tell if the caller already has the rename_sem.
648          */
649         full_path = build_path_from_dentry(cfile->dentry);
650         if (full_path == NULL) {
651                 rc = -ENOMEM;
652                 mutex_unlock(&cfile->fh_mutex);
653                 free_xid(xid);
654                 return rc;
655         }
656 
657         cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
658                  inode, cfile->f_flags, full_path);
659 
660         if (tcon->ses->server->oplocks)
661                 oplock = REQ_OPLOCK;
662         else
663                 oplock = 0;
664 
665         if (tcon->unix_ext && cap_unix(tcon->ses) &&
666             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
667                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
668                 /*
669                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
670                  * original open. Must mask them off for a reopen.
671                  */
672                 unsigned int oflags = cfile->f_flags &
673                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
674 
675                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
676                                      cifs_sb->mnt_file_mode /* ignored */,
677                                      oflags, &oplock, &cfile->fid.netfid, xid);
678                 if (rc == 0) {
679                         cifs_dbg(FYI, "posix reopen succeeded\n");
680                         oparms.reconnect = true;
681                         goto reopen_success;
682                 }
683                 /*
684                  * fallthrough to retry open the old way on errors, especially
685                  * in the reconnect path it is important to retry hard
686                  */
687         }
688 
689         desired_access = cifs_convert_flags(cfile->f_flags);
690 
691         if (backup_cred(cifs_sb))
692                 create_options |= CREATE_OPEN_BACKUP_INTENT;
693 
694         if (server->ops->get_lease_key)
695                 server->ops->get_lease_key(inode, &cfile->fid);
696 
697         oparms.tcon = tcon;
698         oparms.cifs_sb = cifs_sb;
699         oparms.desired_access = desired_access;
700         oparms.create_options = create_options;
701         oparms.disposition = disposition;
702         oparms.path = full_path;
703         oparms.fid = &cfile->fid;
704         oparms.reconnect = true;
705 
706         /*
707          * Can not refresh inode by passing in file_info buf to be returned by
708          * ops->open and then calling get_inode_info with returned buf since
709          * file might have write behind data that needs to be flushed and server
710          * version of file size can be stale. If we knew for sure that inode was
711          * not dirty locally we could do this.
712          */
713         rc = server->ops->open(xid, &oparms, &oplock, NULL);
714         if (rc == -ENOENT && oparms.reconnect == false) {
715                 /* durable handle timeout is expired - open the file again */
716                 rc = server->ops->open(xid, &oparms, &oplock, NULL);
717                 /* indicate that we need to relock the file */
718                 oparms.reconnect = true;
719         }
720 
721         if (rc) {
722                 mutex_unlock(&cfile->fh_mutex);
723                 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
724                 cifs_dbg(FYI, "oplock: %d\n", oplock);
725                 goto reopen_error_exit;
726         }
727 
728 reopen_success:
729         cfile->invalidHandle = false;
730         mutex_unlock(&cfile->fh_mutex);
731         cinode = CIFS_I(inode);
732 
733         if (can_flush) {
734                 rc = filemap_write_and_wait(inode->i_mapping);
735                 if (!is_interrupt_error(rc))
736                         mapping_set_error(inode->i_mapping, rc);
737 
738                 if (tcon->unix_ext)
739                         rc = cifs_get_inode_info_unix(&inode, full_path,
740                                                       inode->i_sb, xid);
741                 else
742                         rc = cifs_get_inode_info(&inode, full_path, NULL,
743                                                  inode->i_sb, xid, NULL);
744         }
745         /*
746          * Else we are writing out data to server already and could deadlock if
747          * we tried to flush data, and since we do not know if we have data that
748          * would invalidate the current end of file on the server we can not go
749          * to the server to get the new inode info.
750          */
751 
752         /*
753          * If the server returned a read oplock and we have mandatory brlocks,
754          * set oplock level to None.
755          */
756         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
757                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
758                 oplock = 0;
759         }
760 
761         server->ops->set_fid(cfile, &cfile->fid, oplock);
762         if (oparms.reconnect)
763                 cifs_relock_file(cfile);
764 
765 reopen_error_exit:
766         kfree(full_path);
767         free_xid(xid);
768         return rc;
769 }
770 
771 int cifs_close(struct inode *inode, struct file *file)
772 {
773         if (file->private_data != NULL) {
774                 cifsFileInfo_put(file->private_data);
775                 file->private_data = NULL;
776         }
777 
778         /* return code from the ->release op is always ignored */
779         return 0;
780 }
781 
782 void
783 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
784 {
785         struct cifsFileInfo *open_file;
786         struct list_head *tmp;
787         struct list_head *tmp1;
788         struct list_head tmp_list;
789 
790         if (!tcon->use_persistent || !tcon->need_reopen_files)
791                 return;
792 
793         tcon->need_reopen_files = false;
794 
795         cifs_dbg(FYI, "Reopen persistent handles");
796         INIT_LIST_HEAD(&tmp_list);
797 
798         /* list all files open on tree connection, reopen resilient handles  */
799         spin_lock(&tcon->open_file_lock);
800         list_for_each(tmp, &tcon->openFileList) {
801                 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
802                 if (!open_file->invalidHandle)
803                         continue;
804                 cifsFileInfo_get(open_file);
805                 list_add_tail(&open_file->rlist, &tmp_list);
806         }
807         spin_unlock(&tcon->open_file_lock);
808 
809         list_for_each_safe(tmp, tmp1, &tmp_list) {
810                 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
811                 if (cifs_reopen_file(open_file, false /* do not flush */))
812                         tcon->need_reopen_files = true;
813                 list_del_init(&open_file->rlist);
814                 cifsFileInfo_put(open_file);
815         }
816 }
817 
818 int cifs_closedir(struct inode *inode, struct file *file)
819 {
820         int rc = 0;
821         unsigned int xid;
822         struct cifsFileInfo *cfile = file->private_data;
823         struct cifs_tcon *tcon;
824         struct TCP_Server_Info *server;
825         char *buf;
826 
827         cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
828 
829         if (cfile == NULL)
830                 return rc;
831 
832         xid = get_xid();
833         tcon = tlink_tcon(cfile->tlink);
834         server = tcon->ses->server;
835 
836         cifs_dbg(FYI, "Freeing private data in close dir\n");
837         spin_lock(&cfile->file_info_lock);
838         if (server->ops->dir_needs_close(cfile)) {
839                 cfile->invalidHandle = true;
840                 spin_unlock(&cfile->file_info_lock);
841                 if (server->ops->close_dir)
842                         rc = server->ops->close_dir(xid, tcon, &cfile->fid);
843                 else
844                         rc = -ENOSYS;
845                 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
846                 /* not much we can do if it fails anyway, ignore rc */
847                 rc = 0;
848         } else
849                 spin_unlock(&cfile->file_info_lock);
850 
851         buf = cfile->srch_inf.ntwrk_buf_start;
852         if (buf) {
853                 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
854                 cfile->srch_inf.ntwrk_buf_start = NULL;
855                 if (cfile->srch_inf.smallBuf)
856                         cifs_small_buf_release(buf);
857                 else
858                         cifs_buf_release(buf);
859         }
860 
861         cifs_put_tlink(cfile->tlink);
862         kfree(file->private_data);
863         file->private_data = NULL;
864         /* BB can we lock the filestruct while this is going on? */
865         free_xid(xid);
866         return rc;
867 }
868 
869 static struct cifsLockInfo *
870 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
871 {
872         struct cifsLockInfo *lock =
873                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
874         if (!lock)
875                 return lock;
876         lock->offset = offset;
877         lock->length = length;
878         lock->type = type;
879         lock->pid = current->tgid;
880         lock->flags = flags;
881         INIT_LIST_HEAD(&lock->blist);
882         init_waitqueue_head(&lock->block_q);
883         return lock;
884 }
885 
886 void
887 cifs_del_lock_waiters(struct cifsLockInfo *lock)
888 {
889         struct cifsLockInfo *li, *tmp;
890         list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
891                 list_del_init(&li->blist);
892                 wake_up(&li->block_q);
893         }
894 }
895 
896 #define CIFS_LOCK_OP    0
897 #define CIFS_READ_OP    1
898 #define CIFS_WRITE_OP   2
899 
900 /* @rw_check : 0 - no op, 1 - read, 2 - write */
901 static bool
902 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
903                             __u64 length, __u8 type, __u16 flags,
904                             struct cifsFileInfo *cfile,
905                             struct cifsLockInfo **conf_lock, int rw_check)
906 {
907         struct cifsLockInfo *li;
908         struct cifsFileInfo *cur_cfile = fdlocks->cfile;
909         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
910 
911         list_for_each_entry(li, &fdlocks->locks, llist) {
912                 if (offset + length <= li->offset ||
913                     offset >= li->offset + li->length)
914                         continue;
915                 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
916                     server->ops->compare_fids(cfile, cur_cfile)) {
917                         /* shared lock prevents write op through the same fid */
918                         if (!(li->type & server->vals->shared_lock_type) ||
919                             rw_check != CIFS_WRITE_OP)
920                                 continue;
921                 }
922                 if ((type & server->vals->shared_lock_type) &&
923                     ((server->ops->compare_fids(cfile, cur_cfile) &&
924                      current->tgid == li->pid) || type == li->type))
925                         continue;
926                 if (rw_check == CIFS_LOCK_OP &&
927                     (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
928                     server->ops->compare_fids(cfile, cur_cfile))
929                         continue;
930                 if (conf_lock)
931                         *conf_lock = li;
932                 return true;
933         }
934         return false;
935 }
936 
937 bool
938 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
939                         __u8 type, __u16 flags,
940                         struct cifsLockInfo **conf_lock, int rw_check)
941 {
942         bool rc = false;
943         struct cifs_fid_locks *cur;
944         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
945 
946         list_for_each_entry(cur, &cinode->llist, llist) {
947                 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
948                                                  flags, cfile, conf_lock,
949                                                  rw_check);
950                 if (rc)
951                         break;
952         }
953 
954         return rc;
955 }
956 
957 /*
958  * Check if there is another lock that prevents us to set the lock (mandatory
959  * style). If such a lock exists, update the flock structure with its
960  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
961  * or leave it the same if we can't. Returns 0 if we don't need to request to
962  * the server or 1 otherwise.
963  */
964 static int
965 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
966                __u8 type, struct file_lock *flock)
967 {
968         int rc = 0;
969         struct cifsLockInfo *conf_lock;
970         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
971         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
972         bool exist;
973 
974         down_read(&cinode->lock_sem);
975 
976         exist = cifs_find_lock_conflict(cfile, offset, length, type,
977                                         flock->fl_flags, &conf_lock,
978                                         CIFS_LOCK_OP);
979         if (exist) {
980                 flock->fl_start = conf_lock->offset;
981                 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
982                 flock->fl_pid = conf_lock->pid;
983                 if (conf_lock->type & server->vals->shared_lock_type)
984                         flock->fl_type = F_RDLCK;
985                 else
986                         flock->fl_type = F_WRLCK;
987         } else if (!cinode->can_cache_brlcks)
988                 rc = 1;
989         else
990                 flock->fl_type = F_UNLCK;
991 
992         up_read(&cinode->lock_sem);
993         return rc;
994 }
995 
996 static void
997 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
998 {
999         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1000         down_write(&cinode->lock_sem);
1001         list_add_tail(&lock->llist, &cfile->llist->locks);
1002         up_write(&cinode->lock_sem);
1003 }
1004 
1005 /*
1006  * Set the byte-range lock (mandatory style). Returns:
1007  * 1) 0, if we set the lock and don't need to request to the server;
1008  * 2) 1, if no locks prevent us but we need to request to the server;
1009  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1010  */
1011 static int
1012 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1013                  bool wait)
1014 {
1015         struct cifsLockInfo *conf_lock;
1016         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1017         bool exist;
1018         int rc = 0;
1019 
1020 try_again:
1021         exist = false;
1022         down_write(&cinode->lock_sem);
1023 
1024         exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1025                                         lock->type, lock->flags, &conf_lock,
1026                                         CIFS_LOCK_OP);
1027         if (!exist && cinode->can_cache_brlcks) {
1028                 list_add_tail(&lock->llist, &cfile->llist->locks);
1029                 up_write(&cinode->lock_sem);
1030                 return rc;
1031         }
1032 
1033         if (!exist)
1034                 rc = 1;
1035         else if (!wait)
1036                 rc = -EACCES;
1037         else {
1038                 list_add_tail(&lock->blist, &conf_lock->blist);
1039                 up_write(&cinode->lock_sem);
1040                 rc = wait_event_interruptible(lock->block_q,
1041                                         (lock->blist.prev == &lock->blist) &&
1042                                         (lock->blist.next == &lock->blist));
1043                 if (!rc)
1044                         goto try_again;
1045                 down_write(&cinode->lock_sem);
1046                 list_del_init(&lock->blist);
1047         }
1048 
1049         up_write(&cinode->lock_sem);
1050         return rc;
1051 }
1052 
1053 /*
1054  * Check if there is another lock that prevents us to set the lock (posix
1055  * style). If such a lock exists, update the flock structure with its
1056  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1057  * or leave it the same if we can't. Returns 0 if we don't need to request to
1058  * the server or 1 otherwise.
1059  */
1060 static int
1061 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1062 {
1063         int rc = 0;
1064         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1065         unsigned char saved_type = flock->fl_type;
1066 
1067         if ((flock->fl_flags & FL_POSIX) == 0)
1068                 return 1;
1069 
1070         down_read(&cinode->lock_sem);
1071         posix_test_lock(file, flock);
1072 
1073         if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1074                 flock->fl_type = saved_type;
1075                 rc = 1;
1076         }
1077 
1078         up_read(&cinode->lock_sem);
1079         return rc;
1080 }
1081 
1082 /*
1083  * Set the byte-range lock (posix style). Returns:
1084  * 1) 0, if we set the lock and don't need to request to the server;
1085  * 2) 1, if we need to request to the server;
1086  * 3) <0, if the error occurs while setting the lock.
1087  */
1088 static int
1089 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1090 {
1091         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1092         int rc = 1;
1093 
1094         if ((flock->fl_flags & FL_POSIX) == 0)
1095                 return rc;
1096 
1097 try_again:
1098         down_write(&cinode->lock_sem);
1099         if (!cinode->can_cache_brlcks) {
1100                 up_write(&cinode->lock_sem);
1101                 return rc;
1102         }
1103 
1104         rc = posix_lock_file(file, flock, NULL);
1105         up_write(&cinode->lock_sem);
1106         if (rc == FILE_LOCK_DEFERRED) {
1107                 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1108                 if (!rc)
1109                         goto try_again;
1110                 posix_unblock_lock(flock);
1111         }
1112         return rc;
1113 }
1114 
1115 int
1116 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1117 {
1118         unsigned int xid;
1119         int rc = 0, stored_rc;
1120         struct cifsLockInfo *li, *tmp;
1121         struct cifs_tcon *tcon;
1122         unsigned int num, max_num, max_buf;
1123         LOCKING_ANDX_RANGE *buf, *cur;
1124         static const int types[] = {
1125                 LOCKING_ANDX_LARGE_FILES,
1126                 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1127         };
1128         int i;
1129 
1130         xid = get_xid();
1131         tcon = tlink_tcon(cfile->tlink);
1132 
1133         /*
1134          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1135          * and check it before using.
1136          */
1137         max_buf = tcon->ses->server->maxBuf;
1138         if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1139                 free_xid(xid);
1140                 return -EINVAL;
1141         }
1142 
1143         BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1144                      PAGE_SIZE);
1145         max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1146                         PAGE_SIZE);
1147         max_num = (max_buf - sizeof(struct smb_hdr)) /
1148                                                 sizeof(LOCKING_ANDX_RANGE);
1149         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1150         if (!buf) {
1151                 free_xid(xid);
1152                 return -ENOMEM;
1153         }
1154 
1155         for (i = 0; i < 2; i++) {
1156                 cur = buf;
1157                 num = 0;
1158                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1159                         if (li->type != types[i])
1160                                 continue;
1161                         cur->Pid = cpu_to_le16(li->pid);
1162                         cur->LengthLow = cpu_to_le32((u32)li->length);
1163                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1164                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1165                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1166                         if (++num == max_num) {
1167                                 stored_rc = cifs_lockv(xid, tcon,
1168                                                        cfile->fid.netfid,
1169                                                        (__u8)li->type, 0, num,
1170                                                        buf);
1171                                 if (stored_rc)
1172                                         rc = stored_rc;
1173                                 cur = buf;
1174                                 num = 0;
1175                         } else
1176                                 cur++;
1177                 }
1178 
1179                 if (num) {
1180                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1181                                                (__u8)types[i], 0, num, buf);
1182                         if (stored_rc)
1183                                 rc = stored_rc;
1184                 }
1185         }
1186 
1187         kfree(buf);
1188         free_xid(xid);
1189         return rc;
1190 }
1191 
1192 static __u32
1193 hash_lockowner(fl_owner_t owner)
1194 {
1195         return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1196 }
1197 
1198 struct lock_to_push {
1199         struct list_head llist;
1200         __u64 offset;
1201         __u64 length;
1202         __u32 pid;
1203         __u16 netfid;
1204         __u8 type;
1205 };
1206 
1207 static int
1208 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1209 {
1210         struct inode *inode = d_inode(cfile->dentry);
1211         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1212         struct file_lock *flock;
1213         struct file_lock_context *flctx = inode->i_flctx;
1214         unsigned int count = 0, i;
1215         int rc = 0, xid, type;
1216         struct list_head locks_to_send, *el;
1217         struct lock_to_push *lck, *tmp;
1218         __u64 length;
1219 
1220         xid = get_xid();
1221 
1222         if (!flctx)
1223                 goto out;
1224 
1225         spin_lock(&flctx->flc_lock);
1226         list_for_each(el, &flctx->flc_posix) {
1227                 count++;
1228         }
1229         spin_unlock(&flctx->flc_lock);
1230 
1231         INIT_LIST_HEAD(&locks_to_send);
1232 
1233         /*
1234          * Allocating count locks is enough because no FL_POSIX locks can be
1235          * added to the list while we are holding cinode->lock_sem that
1236          * protects locking operations of this inode.
1237          */
1238         for (i = 0; i < count; i++) {
1239                 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1240                 if (!lck) {
1241                         rc = -ENOMEM;
1242                         goto err_out;
1243                 }
1244                 list_add_tail(&lck->llist, &locks_to_send);
1245         }
1246 
1247         el = locks_to_send.next;
1248         spin_lock(&flctx->flc_lock);
1249         list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1250                 if (el == &locks_to_send) {
1251                         /*
1252                          * The list ended. We don't have enough allocated
1253                          * structures - something is really wrong.
1254                          */
1255                         cifs_dbg(VFS, "Can't push all brlocks!\n");
1256                         break;
1257                 }
1258                 length = 1 + flock->fl_end - flock->fl_start;
1259                 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1260                         type = CIFS_RDLCK;
1261                 else
1262                         type = CIFS_WRLCK;
1263                 lck = list_entry(el, struct lock_to_push, llist);
1264                 lck->pid = hash_lockowner(flock->fl_owner);
1265                 lck->netfid = cfile->fid.netfid;
1266                 lck->length = length;
1267                 lck->type = type;
1268                 lck->offset = flock->fl_start;
1269         }
1270         spin_unlock(&flctx->flc_lock);
1271 
1272         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1273                 int stored_rc;
1274 
1275                 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1276                                              lck->offset, lck->length, NULL,
1277                                              lck->type, 0);
1278                 if (stored_rc)
1279                         rc = stored_rc;
1280                 list_del(&lck->llist);
1281                 kfree(lck);
1282         }
1283 
1284 out:
1285         free_xid(xid);
1286         return rc;
1287 err_out:
1288         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1289                 list_del(&lck->llist);
1290                 kfree(lck);
1291         }
1292         goto out;
1293 }
1294 
1295 static int
1296 cifs_push_locks(struct cifsFileInfo *cfile)
1297 {
1298         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1299         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1300         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1301         int rc = 0;
1302 
1303         /* we are going to update can_cache_brlcks here - need a write access */
1304         down_write(&cinode->lock_sem);
1305         if (!cinode->can_cache_brlcks) {
1306                 up_write(&cinode->lock_sem);
1307                 return rc;
1308         }
1309 
1310         if (cap_unix(tcon->ses) &&
1311             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1312             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1313                 rc = cifs_push_posix_locks(cfile);
1314         else
1315                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1316 
1317         cinode->can_cache_brlcks = false;
1318         up_write(&cinode->lock_sem);
1319         return rc;
1320 }
1321 
1322 static void
1323 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1324                 bool *wait_flag, struct TCP_Server_Info *server)
1325 {
1326         if (flock->fl_flags & FL_POSIX)
1327                 cifs_dbg(FYI, "Posix\n");
1328         if (flock->fl_flags & FL_FLOCK)
1329                 cifs_dbg(FYI, "Flock\n");
1330         if (flock->fl_flags & FL_SLEEP) {
1331                 cifs_dbg(FYI, "Blocking lock\n");
1332                 *wait_flag = true;
1333         }
1334         if (flock->fl_flags & FL_ACCESS)
1335                 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1336         if (flock->fl_flags & FL_LEASE)
1337                 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1338         if (flock->fl_flags &
1339             (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1340                FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1341                 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1342 
1343         *type = server->vals->large_lock_type;
1344         if (flock->fl_type == F_WRLCK) {
1345                 cifs_dbg(FYI, "F_WRLCK\n");
1346                 *type |= server->vals->exclusive_lock_type;
1347                 *lock = 1;
1348         } else if (flock->fl_type == F_UNLCK) {
1349                 cifs_dbg(FYI, "F_UNLCK\n");
1350                 *type |= server->vals->unlock_lock_type;
1351                 *unlock = 1;
1352                 /* Check if unlock includes more than one lock range */
1353         } else if (flock->fl_type == F_RDLCK) {
1354                 cifs_dbg(FYI, "F_RDLCK\n");
1355                 *type |= server->vals->shared_lock_type;
1356                 *lock = 1;
1357         } else if (flock->fl_type == F_EXLCK) {
1358                 cifs_dbg(FYI, "F_EXLCK\n");
1359                 *type |= server->vals->exclusive_lock_type;
1360                 *lock = 1;
1361         } else if (flock->fl_type == F_SHLCK) {
1362                 cifs_dbg(FYI, "F_SHLCK\n");
1363                 *type |= server->vals->shared_lock_type;
1364                 *lock = 1;
1365         } else
1366                 cifs_dbg(FYI, "Unknown type of lock\n");
1367 }
1368 
1369 static int
1370 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1371            bool wait_flag, bool posix_lck, unsigned int xid)
1372 {
1373         int rc = 0;
1374         __u64 length = 1 + flock->fl_end - flock->fl_start;
1375         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1376         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1377         struct TCP_Server_Info *server = tcon->ses->server;
1378         __u16 netfid = cfile->fid.netfid;
1379 
1380         if (posix_lck) {
1381                 int posix_lock_type;
1382 
1383                 rc = cifs_posix_lock_test(file, flock);
1384                 if (!rc)
1385                         return rc;
1386 
1387                 if (type & server->vals->shared_lock_type)
1388                         posix_lock_type = CIFS_RDLCK;
1389                 else
1390                         posix_lock_type = CIFS_WRLCK;
1391                 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1392                                       hash_lockowner(flock->fl_owner),
1393                                       flock->fl_start, length, flock,
1394                                       posix_lock_type, wait_flag);
1395                 return rc;
1396         }
1397 
1398         rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1399         if (!rc)
1400                 return rc;
1401 
1402         /* BB we could chain these into one lock request BB */
1403         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1404                                     1, 0, false);
1405         if (rc == 0) {
1406                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1407                                             type, 0, 1, false);
1408                 flock->fl_type = F_UNLCK;
1409                 if (rc != 0)
1410                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1411                                  rc);
1412                 return 0;
1413         }
1414 
1415         if (type & server->vals->shared_lock_type) {
1416                 flock->fl_type = F_WRLCK;
1417                 return 0;
1418         }
1419 
1420         type &= ~server->vals->exclusive_lock_type;
1421 
1422         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1423                                     type | server->vals->shared_lock_type,
1424                                     1, 0, false);
1425         if (rc == 0) {
1426                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1427                         type | server->vals->shared_lock_type, 0, 1, false);
1428                 flock->fl_type = F_RDLCK;
1429                 if (rc != 0)
1430                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1431                                  rc);
1432         } else
1433                 flock->fl_type = F_WRLCK;
1434 
1435         return 0;
1436 }
1437 
1438 void
1439 cifs_move_llist(struct list_head *source, struct list_head *dest)
1440 {
1441         struct list_head *li, *tmp;
1442         list_for_each_safe(li, tmp, source)
1443                 list_move(li, dest);
1444 }
1445 
1446 void
1447 cifs_free_llist(struct list_head *llist)
1448 {
1449         struct cifsLockInfo *li, *tmp;
1450         list_for_each_entry_safe(li, tmp, llist, llist) {
1451                 cifs_del_lock_waiters(li);
1452                 list_del(&li->llist);
1453                 kfree(li);
1454         }
1455 }
1456 
1457 int
1458 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1459                   unsigned int xid)
1460 {
1461         int rc = 0, stored_rc;
1462         static const int types[] = {
1463                 LOCKING_ANDX_LARGE_FILES,
1464                 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1465         };
1466         unsigned int i;
1467         unsigned int max_num, num, max_buf;
1468         LOCKING_ANDX_RANGE *buf, *cur;
1469         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1470         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1471         struct cifsLockInfo *li, *tmp;
1472         __u64 length = 1 + flock->fl_end - flock->fl_start;
1473         struct list_head tmp_llist;
1474 
1475         INIT_LIST_HEAD(&tmp_llist);
1476 
1477         /*
1478          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1479          * and check it before using.
1480          */
1481         max_buf = tcon->ses->server->maxBuf;
1482         if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1483                 return -EINVAL;
1484 
1485         BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1486                      PAGE_SIZE);
1487         max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1488                         PAGE_SIZE);
1489         max_num = (max_buf - sizeof(struct smb_hdr)) /
1490                                                 sizeof(LOCKING_ANDX_RANGE);
1491         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1492         if (!buf)
1493                 return -ENOMEM;
1494 
1495         down_write(&cinode->lock_sem);
1496         for (i = 0; i < 2; i++) {
1497                 cur = buf;
1498                 num = 0;
1499                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1500                         if (flock->fl_start > li->offset ||
1501                             (flock->fl_start + length) <
1502                             (li->offset + li->length))
1503                                 continue;
1504                         if (current->tgid != li->pid)
1505                                 continue;
1506                         if (types[i] != li->type)
1507                                 continue;
1508                         if (cinode->can_cache_brlcks) {
1509                                 /*
1510                                  * We can cache brlock requests - simply remove
1511                                  * a lock from the file's list.
1512                                  */
1513                                 list_del(&li->llist);
1514                                 cifs_del_lock_waiters(li);
1515                                 kfree(li);
1516                                 continue;
1517                         }
1518                         cur->Pid = cpu_to_le16(li->pid);
1519                         cur->LengthLow = cpu_to_le32((u32)li->length);
1520                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1521                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1522                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1523                         /*
1524                          * We need to save a lock here to let us add it again to
1525                          * the file's list if the unlock range request fails on
1526                          * the server.
1527                          */
1528                         list_move(&li->llist, &tmp_llist);
1529                         if (++num == max_num) {
1530                                 stored_rc = cifs_lockv(xid, tcon,
1531                                                        cfile->fid.netfid,
1532                                                        li->type, num, 0, buf);
1533                                 if (stored_rc) {
1534                                         /*
1535                                          * We failed on the unlock range
1536                                          * request - add all locks from the tmp
1537                                          * list to the head of the file's list.
1538                                          */
1539                                         cifs_move_llist(&tmp_llist,
1540                                                         &cfile->llist->locks);
1541                                         rc = stored_rc;
1542                                 } else
1543                                         /*
1544                                          * The unlock range request succeed -
1545                                          * free the tmp list.
1546                                          */
1547                                         cifs_free_llist(&tmp_llist);
1548                                 cur = buf;
1549                                 num = 0;
1550                         } else
1551                                 cur++;
1552                 }
1553                 if (num) {
1554                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1555                                                types[i], num, 0, buf);
1556                         if (stored_rc) {
1557                                 cifs_move_llist(&tmp_llist,
1558                                                 &cfile->llist->locks);
1559                                 rc = stored_rc;
1560                         } else
1561                                 cifs_free_llist(&tmp_llist);
1562                 }
1563         }
1564 
1565         up_write(&cinode->lock_sem);
1566         kfree(buf);
1567         return rc;
1568 }
1569 
1570 static int
1571 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1572            bool wait_flag, bool posix_lck, int lock, int unlock,
1573            unsigned int xid)
1574 {
1575         int rc = 0;
1576         __u64 length = 1 + flock->fl_end - flock->fl_start;
1577         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1578         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1579         struct TCP_Server_Info *server = tcon->ses->server;
1580         struct inode *inode = d_inode(cfile->dentry);
1581 
1582         if (posix_lck) {
1583                 int posix_lock_type;
1584 
1585                 rc = cifs_posix_lock_set(file, flock);
1586                 if (!rc || rc < 0)
1587                         return rc;
1588 
1589                 if (type & server->vals->shared_lock_type)
1590                         posix_lock_type = CIFS_RDLCK;
1591                 else
1592                         posix_lock_type = CIFS_WRLCK;
1593 
1594                 if (unlock == 1)
1595                         posix_lock_type = CIFS_UNLCK;
1596 
1597                 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1598                                       hash_lockowner(flock->fl_owner),
1599                                       flock->fl_start, length,
1600                                       NULL, posix_lock_type, wait_flag);
1601                 goto out;
1602         }
1603 
1604         if (lock) {
1605                 struct cifsLockInfo *lock;
1606 
1607                 lock = cifs_lock_init(flock->fl_start, length, type,
1608                                       flock->fl_flags);
1609                 if (!lock)
1610                         return -ENOMEM;
1611 
1612                 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1613                 if (rc < 0) {
1614                         kfree(lock);
1615                         return rc;
1616                 }
1617                 if (!rc)
1618                         goto out;
1619 
1620                 /*
1621                  * Windows 7 server can delay breaking lease from read to None
1622                  * if we set a byte-range lock on a file - break it explicitly
1623                  * before sending the lock to the server to be sure the next
1624                  * read won't conflict with non-overlapted locks due to
1625                  * pagereading.
1626                  */
1627                 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1628                                         CIFS_CACHE_READ(CIFS_I(inode))) {
1629                         cifs_zap_mapping(inode);
1630                         cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1631                                  inode);
1632                         CIFS_I(inode)->oplock = 0;
1633                 }
1634 
1635                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1636                                             type, 1, 0, wait_flag);
1637                 if (rc) {
1638                         kfree(lock);
1639                         return rc;
1640                 }
1641 
1642                 cifs_lock_add(cfile, lock);
1643         } else if (unlock)
1644                 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1645 
1646 out:
1647         if (flock->fl_flags & FL_POSIX && !rc)
1648                 rc = locks_lock_file_wait(file, flock);
1649         return rc;
1650 }
1651 
1652 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1653 {
1654         int rc, xid;
1655         int lock = 0, unlock = 0;
1656         bool wait_flag = false;
1657         bool posix_lck = false;
1658         struct cifs_sb_info *cifs_sb;
1659         struct cifs_tcon *tcon;
1660         struct cifsInodeInfo *cinode;
1661         struct cifsFileInfo *cfile;
1662         __u16 netfid;
1663         __u32 type;
1664 
1665         rc = -EACCES;
1666         xid = get_xid();
1667 
1668         cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1669                  cmd, flock->fl_flags, flock->fl_type,
1670                  flock->fl_start, flock->fl_end);
1671 
1672         cfile = (struct cifsFileInfo *)file->private_data;
1673         tcon = tlink_tcon(cfile->tlink);
1674 
1675         cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1676                         tcon->ses->server);
1677         cifs_sb = CIFS_FILE_SB(file);
1678         netfid = cfile->fid.netfid;
1679         cinode = CIFS_I(file_inode(file));
1680 
1681         if (cap_unix(tcon->ses) &&
1682             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1683             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1684                 posix_lck = true;
1685         /*
1686          * BB add code here to normalize offset and length to account for
1687          * negative length which we can not accept over the wire.
1688          */
1689         if (IS_GETLK(cmd)) {
1690                 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1691                 free_xid(xid);
1692                 return rc;
1693         }
1694 
1695         if (!lock && !unlock) {
1696                 /*
1697                  * if no lock or unlock then nothing to do since we do not
1698                  * know what it is
1699                  */
1700                 free_xid(xid);
1701                 return -EOPNOTSUPP;
1702         }
1703 
1704         rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1705                         xid);
1706         free_xid(xid);
1707         return rc;
1708 }
1709 
1710 /*
1711  * update the file size (if needed) after a write. Should be called with
1712  * the inode->i_lock held
1713  */
1714 void
1715 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1716                       unsigned int bytes_written)
1717 {
1718         loff_t end_of_write = offset + bytes_written;
1719 
1720         if (end_of_write > cifsi->server_eof)
1721                 cifsi->server_eof = end_of_write;
1722 }
1723 
1724 static ssize_t
1725 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1726            size_t write_size, loff_t *offset)
1727 {
1728         int rc = 0;
1729         unsigned int bytes_written = 0;
1730         unsigned int total_written;
1731         struct cifs_sb_info *cifs_sb;
1732         struct cifs_tcon *tcon;
1733         struct TCP_Server_Info *server;
1734         unsigned int xid;
1735         struct dentry *dentry = open_file->dentry;
1736         struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1737         struct cifs_io_parms io_parms;
1738 
1739         cifs_sb = CIFS_SB(dentry->d_sb);
1740 
1741         cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1742                  write_size, *offset, dentry);
1743 
1744         tcon = tlink_tcon(open_file->tlink);
1745         server = tcon->ses->server;
1746 
1747         if (!server->ops->sync_write)
1748                 return -ENOSYS;
1749 
1750         xid = get_xid();
1751 
1752         for (total_written = 0; write_size > total_written;
1753              total_written += bytes_written) {
1754                 rc = -EAGAIN;
1755                 while (rc == -EAGAIN) {
1756                         struct kvec iov[2];
1757                         unsigned int len;
1758 
1759                         if (open_file->invalidHandle) {
1760                                 /* we could deadlock if we called
1761                                    filemap_fdatawait from here so tell
1762                                    reopen_file not to flush data to
1763                                    server now */
1764                                 rc = cifs_reopen_file(open_file, false);
1765                                 if (rc != 0)
1766                                         break;
1767                         }
1768 
1769                         len = min(server->ops->wp_retry_size(d_inode(dentry)),
1770                                   (unsigned int)write_size - total_written);
1771                         /* iov[0] is reserved for smb header */
1772                         iov[1].iov_base = (char *)write_data + total_written;
1773                         iov[1].iov_len = len;
1774                         io_parms.pid = pid;
1775                         io_parms.tcon = tcon;
1776                         io_parms.offset = *offset;
1777                         io_parms.length = len;
1778                         rc = server->ops->sync_write(xid, &open_file->fid,
1779                                         &io_parms, &bytes_written, iov, 1);
1780                 }
1781                 if (rc || (bytes_written == 0)) {
1782                         if (total_written)
1783                                 break;
1784                         else {
1785                                 free_xid(xid);
1786                                 return rc;
1787                         }
1788                 } else {
1789                         spin_lock(&d_inode(dentry)->i_lock);
1790                         cifs_update_eof(cifsi, *offset, bytes_written);
1791                         spin_unlock(&d_inode(dentry)->i_lock);
1792                         *offset += bytes_written;
1793                 }
1794         }
1795 
1796         cifs_stats_bytes_written(tcon, total_written);
1797 
1798         if (total_written > 0) {
1799                 spin_lock(&d_inode(dentry)->i_lock);
1800                 if (*offset > d_inode(dentry)->i_size)
1801                         i_size_write(d_inode(dentry), *offset);
1802                 spin_unlock(&d_inode(dentry)->i_lock);
1803         }
1804         mark_inode_dirty_sync(d_inode(dentry));
1805         free_xid(xid);
1806         return total_written;
1807 }
1808 
1809 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1810                                         bool fsuid_only)
1811 {
1812         struct cifsFileInfo *open_file = NULL;
1813         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1814         struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1815 
1816         /* only filter by fsuid on multiuser mounts */
1817         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1818                 fsuid_only = false;
1819 
1820         spin_lock(&tcon->open_file_lock);
1821         /* we could simply get the first_list_entry since write-only entries
1822            are always at the end of the list but since the first entry might
1823            have a close pending, we go through the whole list */
1824         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1825                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1826                         continue;
1827                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1828                         if (!open_file->invalidHandle) {
1829                                 /* found a good file */
1830                                 /* lock it so it will not be closed on us */
1831                                 cifsFileInfo_get(open_file);
1832                                 spin_unlock(&tcon->open_file_lock);
1833                                 return open_file;
1834                         } /* else might as well continue, and look for
1835                              another, or simply have the caller reopen it
1836                              again rather than trying to fix this handle */
1837                 } else /* write only file */
1838                         break; /* write only files are last so must be done */
1839         }
1840         spin_unlock(&tcon->open_file_lock);
1841         return NULL;
1842 }
1843 
1844 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1845                                         bool fsuid_only)
1846 {
1847         struct cifsFileInfo *open_file, *inv_file = NULL;
1848         struct cifs_sb_info *cifs_sb;
1849         struct cifs_tcon *tcon;
1850         bool any_available = false;
1851         int rc;
1852         unsigned int refind = 0;
1853 
1854         /* Having a null inode here (because mapping->host was set to zero by
1855         the VFS or MM) should not happen but we had reports of on oops (due to
1856         it being zero) during stress testcases so we need to check for it */
1857 
1858         if (cifs_inode == NULL) {
1859                 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1860                 dump_stack();
1861                 return NULL;
1862         }
1863 
1864         cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1865         tcon = cifs_sb_master_tcon(cifs_sb);
1866 
1867         /* only filter by fsuid on multiuser mounts */
1868         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1869                 fsuid_only = false;
1870 
1871         spin_lock(&tcon->open_file_lock);
1872 refind_writable:
1873         if (refind > MAX_REOPEN_ATT) {
1874                 spin_unlock(&tcon->open_file_lock);
1875                 return NULL;
1876         }
1877         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1878                 if (!any_available && open_file->pid != current->tgid)
1879                         continue;
1880                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1881                         continue;
1882                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1883                         if (!open_file->invalidHandle) {
1884                                 /* found a good writable file */
1885                                 cifsFileInfo_get(open_file);
1886                                 spin_unlock(&tcon->open_file_lock);
1887                                 return open_file;
1888                         } else {
1889                                 if (!inv_file)
1890                                         inv_file = open_file;
1891                         }
1892                 }
1893         }
1894         /* couldn't find useable FH with same pid, try any available */
1895         if (!any_available) {
1896                 any_available = true;
1897                 goto refind_writable;
1898         }
1899 
1900         if (inv_file) {
1901                 any_available = false;
1902                 cifsFileInfo_get(inv_file);
1903         }
1904 
1905         spin_unlock(&tcon->open_file_lock);
1906 
1907         if (inv_file) {
1908                 rc = cifs_reopen_file(inv_file, false);
1909                 if (!rc)
1910                         return inv_file;
1911                 else {
1912                         spin_lock(&tcon->open_file_lock);
1913                         list_move_tail(&inv_file->flist,
1914                                         &cifs_inode->openFileList);
1915                         spin_unlock(&tcon->open_file_lock);
1916                         cifsFileInfo_put(inv_file);
1917                         ++refind;
1918                         inv_file = NULL;
1919                         spin_lock(&tcon->open_file_lock);
1920                         goto refind_writable;
1921                 }
1922         }
1923 
1924         return NULL;
1925 }
1926 
1927 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1928 {
1929         struct address_space *mapping = page->mapping;
1930         loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1931         char *write_data;
1932         int rc = -EFAULT;
1933         int bytes_written = 0;
1934         struct inode *inode;
1935         struct cifsFileInfo *open_file;
1936 
1937         if (!mapping || !mapping->host)
1938                 return -EFAULT;
1939 
1940         inode = page->mapping->host;
1941 
1942         offset += (loff_t)from;
1943         write_data = kmap(page);
1944         write_data += from;
1945 
1946         if ((to > PAGE_SIZE) || (from > to)) {
1947                 kunmap(page);
1948                 return -EIO;
1949         }
1950 
1951         /* racing with truncate? */
1952         if (offset > mapping->host->i_size) {
1953                 kunmap(page);
1954                 return 0; /* don't care */
1955         }
1956 
1957         /* check to make sure that we are not extending the file */
1958         if (mapping->host->i_size - offset < (loff_t)to)
1959                 to = (unsigned)(mapping->host->i_size - offset);
1960 
1961         open_file = find_writable_file(CIFS_I(mapping->host), false);
1962         if (open_file) {
1963                 bytes_written = cifs_write(open_file, open_file->pid,
1964                                            write_data, to - from, &offset);
1965                 cifsFileInfo_put(open_file);
1966                 /* Does mm or vfs already set times? */
1967                 inode->i_atime = inode->i_mtime = current_time(inode);
1968                 if ((bytes_written > 0) && (offset))
1969                         rc = 0;
1970                 else if (bytes_written < 0)
1971                         rc = bytes_written;
1972         } else {
1973                 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1974                 rc = -EIO;
1975         }
1976 
1977         kunmap(page);
1978         return rc;
1979 }
1980 
1981 static struct cifs_writedata *
1982 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1983                           pgoff_t end, pgoff_t *index,
1984                           unsigned int *found_pages)
1985 {
1986         struct cifs_writedata *wdata;
1987 
1988         wdata = cifs_writedata_alloc((unsigned int)tofind,
1989                                      cifs_writev_complete);
1990         if (!wdata)
1991                 return NULL;
1992 
1993         *found_pages = find_get_pages_range_tag(mapping, index, end,
1994                                 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
1995         return wdata;
1996 }
1997 
1998 static unsigned int
1999 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2000                     struct address_space *mapping,
2001                     struct writeback_control *wbc,
2002                     pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2003 {
2004         unsigned int nr_pages = 0, i;
2005         struct page *page;
2006 
2007         for (i = 0; i < found_pages; i++) {
2008                 page = wdata->pages[i];
2009                 /*
2010                  * At this point we hold neither the i_pages lock nor the
2011                  * page lock: the page may be truncated or invalidated
2012                  * (changing page->mapping to NULL), or even swizzled
2013                  * back from swapper_space to tmpfs file mapping
2014                  */
2015 
2016                 if (nr_pages == 0)
2017                         lock_page(page);
2018                 else if (!trylock_page(page))
2019                         break;
2020 
2021                 if (unlikely(page->mapping != mapping)) {
2022                         unlock_page(page);
2023                         break;
2024                 }
2025 
2026                 if (!wbc->range_cyclic && page->index > end) {
2027                         *done = true;
2028                         unlock_page(page);
2029                         break;
2030                 }
2031 
2032                 if (*next && (page->index != *next)) {
2033                         /* Not next consecutive page */
2034                         unlock_page(page);
2035                         break;
2036                 }
2037 
2038                 if (wbc->sync_mode != WB_SYNC_NONE)
2039                         wait_on_page_writeback(page);
2040 
2041                 if (PageWriteback(page) ||
2042                                 !clear_page_dirty_for_io(page)) {
2043                         unlock_page(page);
2044                         break;
2045                 }
2046 
2047                 /*
2048                  * This actually clears the dirty bit in the radix tree.
2049                  * See cifs_writepage() for more commentary.
2050                  */
2051                 set_page_writeback(page);
2052                 if (page_offset(page) >= i_size_read(mapping->host)) {
2053                         *done = true;
2054                         unlock_page(page);
2055                         end_page_writeback(page);
2056                         break;
2057                 }
2058 
2059                 wdata->pages[i] = page;
2060                 *next = page->index + 1;
2061                 ++nr_pages;
2062         }
2063 
2064         /* reset index to refind any pages skipped */
2065         if (nr_pages == 0)
2066                 *index = wdata->pages[0]->index + 1;
2067 
2068         /* put any pages we aren't going to use */
2069         for (i = nr_pages; i < found_pages; i++) {
2070                 put_page(wdata->pages[i]);
2071                 wdata->pages[i] = NULL;
2072         }
2073 
2074         return nr_pages;
2075 }
2076 
2077 static int
2078 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2079                  struct address_space *mapping, struct writeback_control *wbc)
2080 {
2081         int rc = 0;
2082         struct TCP_Server_Info *server;
2083         unsigned int i;
2084 
2085         wdata->sync_mode = wbc->sync_mode;
2086         wdata->nr_pages = nr_pages;
2087         wdata->offset = page_offset(wdata->pages[0]);
2088         wdata->pagesz = PAGE_SIZE;
2089         wdata->tailsz = min(i_size_read(mapping->host) -
2090                         page_offset(wdata->pages[nr_pages - 1]),
2091                         (loff_t)PAGE_SIZE);
2092         wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2093 
2094         if (wdata->cfile != NULL)
2095                 cifsFileInfo_put(wdata->cfile);
2096         wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2097         if (!wdata->cfile) {
2098                 cifs_dbg(VFS, "No writable handles for inode\n");
2099                 rc = -EBADF;
2100         } else {
2101                 wdata->pid = wdata->cfile->pid;
2102                 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2103                 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2104         }
2105 
2106         for (i = 0; i < nr_pages; ++i)
2107                 unlock_page(wdata->pages[i]);
2108 
2109         return rc;
2110 }
2111 
2112 static int cifs_writepages(struct address_space *mapping,
2113                            struct writeback_control *wbc)
2114 {
2115         struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2116         struct TCP_Server_Info *server;
2117         bool done = false, scanned = false, range_whole = false;
2118         pgoff_t end, index;
2119         struct cifs_writedata *wdata;
2120         int rc = 0;
2121         int saved_rc = 0;
2122         unsigned int xid;
2123 
2124         /*
2125          * If wsize is smaller than the page cache size, default to writing
2126          * one page at a time via cifs_writepage
2127          */
2128         if (cifs_sb->wsize < PAGE_SIZE)
2129                 return generic_writepages(mapping, wbc);
2130 
2131         xid = get_xid();
2132         if (wbc->range_cyclic) {
2133                 index = mapping->writeback_index; /* Start from prev offset */
2134                 end = -1;
2135         } else {
2136                 index = wbc->range_start >> PAGE_SHIFT;
2137                 end = wbc->range_end >> PAGE_SHIFT;
2138                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2139                         range_whole = true;
2140                 scanned = true;
2141         }
2142         server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2143 retry:
2144         while (!done && index <= end) {
2145                 unsigned int i, nr_pages, found_pages, wsize, credits;
2146                 pgoff_t next = 0, tofind, saved_index = index;
2147 
2148                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2149                                                    &wsize, &credits);
2150                 if (rc != 0) {
2151                         done = true;
2152                         break;
2153                 }
2154 
2155                 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2156 
2157                 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2158                                                   &found_pages);
2159                 if (!wdata) {
2160                         rc = -ENOMEM;
2161                         done = true;
2162                         add_credits_and_wake_if(server, credits, 0);
2163                         break;
2164                 }
2165 
2166                 if (found_pages == 0) {
2167                         kref_put(&wdata->refcount, cifs_writedata_release);
2168                         add_credits_and_wake_if(server, credits, 0);
2169                         break;
2170                 }
2171 
2172                 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2173                                                end, &index, &next, &done);
2174 
2175                 /* nothing to write? */
2176                 if (nr_pages == 0) {
2177                         kref_put(&wdata->refcount, cifs_writedata_release);
2178                         add_credits_and_wake_if(server, credits, 0);
2179                         continue;
2180                 }
2181 
2182                 wdata->credits = credits;
2183 
2184                 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2185 
2186                 /* send failure -- clean up the mess */
2187                 if (rc != 0) {
2188                         add_credits_and_wake_if(server, wdata->credits, 0);
2189                         for (i = 0; i < nr_pages; ++i) {
2190                                 if (is_retryable_error(rc))
2191                                         redirty_page_for_writepage(wbc,
2192                                                            wdata->pages[i]);
2193                                 else
2194                                         SetPageError(wdata->pages[i]);
2195                                 end_page_writeback(wdata->pages[i]);
2196                                 put_page(wdata->pages[i]);
2197                         }
2198                         if (!is_retryable_error(rc))
2199                                 mapping_set_error(mapping, rc);
2200                 }
2201                 kref_put(&wdata->refcount, cifs_writedata_release);
2202 
2203                 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2204                         index = saved_index;
2205                         continue;
2206                 }
2207 
2208                 /* Return immediately if we received a signal during writing */
2209                 if (is_interrupt_error(rc)) {
2210                         done = true;
2211                         break;
2212                 }
2213 
2214                 if (rc != 0 && saved_rc == 0)
2215                         saved_rc = rc;
2216 
2217                 wbc->nr_to_write -= nr_pages;
2218                 if (wbc->nr_to_write <= 0)
2219                         done = true;
2220 
2221                 index = next;
2222         }
2223 
2224         if (!scanned && !done) {
2225                 /*
2226                  * We hit the last page and there is more work to be done: wrap
2227                  * back to the start of the file
2228                  */
2229                 scanned = true;
2230                 index = 0;
2231                 goto retry;
2232         }
2233 
2234         if (saved_rc != 0)
2235                 rc = saved_rc;
2236 
2237         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2238                 mapping->writeback_index = index;
2239 
2240         free_xid(xid);
2241         return rc;
2242 }
2243 
2244 static int
2245 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2246 {
2247         int rc;
2248         unsigned int xid;
2249 
2250         xid = get_xid();
2251 /* BB add check for wbc flags */
2252         get_page(page);
2253         if (!PageUptodate(page))
2254                 cifs_dbg(FYI, "ppw - page not up to date\n");
2255 
2256         /*
2257          * Set the "writeback" flag, and clear "dirty" in the radix tree.
2258          *
2259          * A writepage() implementation always needs to do either this,
2260          * or re-dirty the page with "redirty_page_for_writepage()" in
2261          * the case of a failure.
2262          *
2263          * Just unlocking the page will cause the radix tree tag-bits
2264          * to fail to update with the state of the page correctly.
2265          */
2266         set_page_writeback(page);
2267 retry_write:
2268         rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2269         if (is_retryable_error(rc)) {
2270                 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2271                         goto retry_write;
2272                 redirty_page_for_writepage(wbc, page);
2273         } else if (rc != 0) {
2274                 SetPageError(page);
2275                 mapping_set_error(page->mapping, rc);
2276         } else {
2277                 SetPageUptodate(page);
2278         }
2279         end_page_writeback(page);
2280         put_page(page);
2281         free_xid(xid);
2282         return rc;
2283 }
2284 
2285 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2286 {
2287         int rc = cifs_writepage_locked(page, wbc);
2288         unlock_page(page);
2289         return rc;
2290 }
2291 
2292 static int cifs_write_end(struct file *file, struct address_space *mapping,
2293                         loff_t pos, unsigned len, unsigned copied,
2294                         struct page *page, void *fsdata)
2295 {
2296         int rc;
2297         struct inode *inode = mapping->host;
2298         struct cifsFileInfo *cfile = file->private_data;
2299         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2300         __u32 pid;
2301 
2302         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2303                 pid = cfile->pid;
2304         else
2305                 pid = current->tgid;
2306 
2307         cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2308                  page, pos, copied);
2309 
2310         if (PageChecked(page)) {
2311                 if (copied == len)
2312                         SetPageUptodate(page);
2313                 ClearPageChecked(page);
2314         } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2315                 SetPageUptodate(page);
2316 
2317         if (!PageUptodate(page)) {
2318                 char *page_data;
2319                 unsigned offset = pos & (PAGE_SIZE - 1);
2320                 unsigned int xid;
2321 
2322                 xid = get_xid();
2323                 /* this is probably better than directly calling
2324                    partialpage_write since in this function the file handle is
2325                    known which we might as well leverage */
2326                 /* BB check if anything else missing out of ppw
2327                    such as updating last write time */
2328                 page_data = kmap(page);
2329                 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2330                 /* if (rc < 0) should we set writebehind rc? */
2331                 kunmap(page);
2332 
2333                 free_xid(xid);
2334         } else {
2335                 rc = copied;
2336                 pos += copied;
2337                 set_page_dirty(page);
2338         }
2339 
2340         if (rc > 0) {
2341                 spin_lock(&inode->i_lock);
2342                 if (pos > inode->i_size)
2343                         i_size_write(inode, pos);
2344                 spin_unlock(&inode->i_lock);
2345         }
2346 
2347         unlock_page(page);
2348         put_page(page);
2349 
2350         return rc;
2351 }
2352 
2353 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2354                       int datasync)
2355 {
2356         unsigned int xid;
2357         int rc = 0;
2358         struct cifs_tcon *tcon;
2359         struct TCP_Server_Info *server;
2360         struct cifsFileInfo *smbfile = file->private_data;
2361         struct inode *inode = file_inode(file);
2362         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2363 
2364         rc = file_write_and_wait_range(file, start, end);
2365         if (rc)
2366                 return rc;
2367         inode_lock(inode);
2368 
2369         xid = get_xid();
2370 
2371         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2372                  file, datasync);
2373 
2374         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2375                 rc = cifs_zap_mapping(inode);
2376                 if (rc) {
2377                         cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2378                         rc = 0; /* don't care about it in fsync */
2379                 }
2380         }
2381 
2382         tcon = tlink_tcon(smbfile->tlink);
2383         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2384                 server = tcon->ses->server;
2385                 if (server->ops->flush)
2386                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2387                 else
2388                         rc = -ENOSYS;
2389         }
2390 
2391         free_xid(xid);
2392         inode_unlock(inode);
2393         return rc;
2394 }
2395 
2396 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2397 {
2398         unsigned int xid;
2399         int rc = 0;
2400         struct cifs_tcon *tcon;
2401         struct TCP_Server_Info *server;
2402         struct cifsFileInfo *smbfile = file->private_data;
2403         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2404         struct inode *inode = file->f_mapping->host;
2405 
2406         rc = file_write_and_wait_range(file, start, end);
2407         if (rc)
2408                 return rc;
2409         inode_lock(inode);
2410 
2411         xid = get_xid();
2412 
2413         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2414                  file, datasync);
2415 
2416         tcon = tlink_tcon(smbfile->tlink);
2417         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2418                 server = tcon->ses->server;
2419                 if (server->ops->flush)
2420                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
2421                 else
2422                         rc = -ENOSYS;
2423         }
2424 
2425         free_xid(xid);
2426         inode_unlock(inode);
2427         return rc;
2428 }
2429 
2430 /*
2431  * As file closes, flush all cached write data for this inode checking
2432  * for write behind errors.
2433  */
2434 int cifs_flush(struct file *file, fl_owner_t id)
2435 {
2436         struct inode *inode = file_inode(file);
2437         int rc = 0;
2438 
2439         if (file->f_mode & FMODE_WRITE)
2440                 rc = filemap_write_and_wait(inode->i_mapping);
2441 
2442         cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2443 
2444         return rc;
2445 }
2446 
2447 static int
2448 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2449 {
2450         int rc = 0;
2451         unsigned long i;
2452 
2453         for (i = 0; i < num_pages; i++) {
2454                 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2455                 if (!pages[i]) {
2456                         /*
2457                          * save number of pages we have already allocated and
2458                          * return with ENOMEM error
2459                          */
2460                         num_pages = i;
2461                         rc = -ENOMEM;
2462                         break;
2463                 }
2464         }
2465 
2466         if (rc) {
2467                 for (i = 0; i < num_pages; i++)
2468                         put_page(pages[i]);
2469         }
2470         return rc;
2471 }
2472 
2473 static inline
2474 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2475 {
2476         size_t num_pages;
2477         size_t clen;
2478 
2479         clen = min_t(const size_t, len, wsize);
2480         num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2481 
2482         if (cur_len)
2483                 *cur_len = clen;
2484 
2485         return num_pages;
2486 }
2487 
2488 static void
2489 cifs_uncached_writedata_release(struct kref *refcount)
2490 {
2491         int i;
2492         struct cifs_writedata *wdata = container_of(refcount,
2493                                         struct cifs_writedata, refcount);
2494 
2495         kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2496         for (i = 0; i < wdata->nr_pages; i++)
2497                 put_page(wdata->pages[i]);
2498         cifs_writedata_release(refcount);
2499 }
2500 
2501 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2502 
2503 static void
2504 cifs_uncached_writev_complete(struct work_struct *work)
2505 {
2506         struct cifs_writedata *wdata = container_of(work,
2507                                         struct cifs_writedata, work);
2508         struct inode *inode = d_inode(wdata->cfile->dentry);
2509         struct cifsInodeInfo *cifsi = CIFS_I(inode);
2510 
2511         spin_lock(&inode->i_lock);
2512         cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2513         if (cifsi->server_eof > inode->i_size)
2514                 i_size_write(inode, cifsi->server_eof);
2515         spin_unlock(&inode->i_lock);
2516 
2517         complete(&wdata->done);
2518         collect_uncached_write_data(wdata->ctx);
2519         /* the below call can possibly free the last ref to aio ctx */
2520         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2521 }
2522 
2523 static int
2524 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2525                       size_t *len, unsigned long *num_pages)
2526 {
2527         size_t save_len, copied, bytes, cur_len = *len;
2528         unsigned long i, nr_pages = *num_pages;
2529 
2530         save_len = cur_len;
2531         for (i = 0; i < nr_pages; i++) {
2532                 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2533                 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2534                 cur_len -= copied;
2535                 /*
2536                  * If we didn't copy as much as we expected, then that
2537                  * may mean we trod into an unmapped area. Stop copying
2538                  * at that point. On the next pass through the big
2539                  * loop, we'll likely end up getting a zero-length
2540                  * write and bailing out of it.
2541                  */
2542                 if (copied < bytes)
2543                         break;
2544         }
2545         cur_len = save_len - cur_len;
2546         *len = cur_len;
2547 
2548         /*
2549          * If we have no data to send, then that probably means that
2550          * the copy above failed altogether. That's most likely because
2551          * the address in the iovec was bogus. Return -EFAULT and let
2552          * the caller free anything we allocated and bail out.
2553          */
2554         if (!cur_len)
2555                 return -EFAULT;
2556 
2557         /*
2558          * i + 1 now represents the number of pages we actually used in
2559          * the copy phase above.
2560          */
2561         *num_pages = i + 1;
2562         return 0;
2563 }
2564 
2565 static int
2566 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2567         struct cifs_aio_ctx *ctx)
2568 {
2569         unsigned int wsize, credits;
2570         int rc;
2571         struct TCP_Server_Info *server =
2572                 tlink_tcon(wdata->cfile->tlink)->ses->server;
2573 
2574         /*
2575          * Wait for credits to resend this wdata.
2576          * Note: we are attempting to resend the whole wdata not in segments
2577          */
2578         do {
2579                 rc = server->ops->wait_mtu_credits(
2580                         server, wdata->bytes, &wsize, &credits);
2581 
2582                 if (rc)
2583                         goto out;
2584 
2585                 if (wsize < wdata->bytes) {
2586                         add_credits_and_wake_if(server, credits, 0);
2587                         msleep(1000);
2588                 }
2589         } while (wsize < wdata->bytes);
2590 
2591         rc = -EAGAIN;
2592         while (rc == -EAGAIN) {
2593                 rc = 0;
2594                 if (wdata->cfile->invalidHandle)
2595                         rc = cifs_reopen_file(wdata->cfile, false);
2596                 if (!rc)
2597                         rc = server->ops->async_writev(wdata,
2598                                         cifs_uncached_writedata_release);
2599         }
2600 
2601         if (!rc) {
2602                 list_add_tail(&wdata->list, wdata_list);
2603                 return 0;
2604         }
2605 
2606         add_credits_and_wake_if(server, wdata->credits, 0);
2607 out:
2608         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2609 
2610         return rc;
2611 }
2612 
2613 static int
2614 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2615                      struct cifsFileInfo *open_file,
2616                      struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2617                      struct cifs_aio_ctx *ctx)
2618 {
2619         int rc = 0;
2620         size_t cur_len;
2621         unsigned long nr_pages, num_pages, i;
2622         struct cifs_writedata *wdata;
2623         struct iov_iter saved_from = *from;
2624         loff_t saved_offset = offset;
2625         pid_t pid;
2626         struct TCP_Server_Info *server;
2627         struct page **pagevec;
2628         size_t start;
2629 
2630         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2631                 pid = open_file->pid;
2632         else
2633                 pid = current->tgid;
2634 
2635         server = tlink_tcon(open_file->tlink)->ses->server;
2636 
2637         do {
2638                 unsigned int wsize, credits;
2639 
2640                 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2641                                                    &wsize, &credits);
2642                 if (rc)
2643                         break;
2644 
2645                 cur_len = min_t(const size_t, len, wsize);
2646 
2647                 if (ctx->direct_io) {
2648                         ssize_t result;
2649 
2650                         result = iov_iter_get_pages_alloc(
2651                                 from, &pagevec, cur_len, &start);
2652                         if (result < 0) {
2653                                 cifs_dbg(VFS,
2654                                         "direct_writev couldn't get user pages "
2655                                         "(rc=%zd) iter type %d iov_offset %zd "
2656                                         "count %zd\n",
2657                                         result, from->type,
2658                                         from->iov_offset, from->count);
2659                                 dump_stack();
2660 
2661                                 rc = result;
2662                                 add_credits_and_wake_if(server, credits, 0);
2663                                 break;
2664                         }
2665                         cur_len = (size_t)result;
2666                         iov_iter_advance(from, cur_len);
2667 
2668                         nr_pages =
2669                                 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2670 
2671                         wdata = cifs_writedata_direct_alloc(pagevec,
2672                                              cifs_uncached_writev_complete);
2673                         if (!wdata) {
2674                                 rc = -ENOMEM;
2675                                 add_credits_and_wake_if(server, credits, 0);
2676                                 break;
2677                         }
2678 
2679 
2680                         wdata->page_offset = start;
2681                         wdata->tailsz =
2682                                 nr_pages > 1 ?
2683                                         cur_len - (PAGE_SIZE - start) -
2684                                         (nr_pages - 2) * PAGE_SIZE :
2685                                         cur_len;
2686                 } else {
2687                         nr_pages = get_numpages(wsize, len, &cur_len);
2688                         wdata = cifs_writedata_alloc(nr_pages,
2689                                              cifs_uncached_writev_complete);
2690                         if (!wdata) {
2691                                 rc = -ENOMEM;
2692                                 add_credits_and_wake_if(server, credits, 0);
2693                                 break;
2694                         }
2695 
2696                         rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2697                         if (rc) {
2698                                 kvfree(wdata->pages);
2699                                 kfree(wdata);
2700                                 add_credits_and_wake_if(server, credits, 0);
2701                                 break;
2702                         }
2703 
2704                         num_pages = nr_pages;
2705                         rc = wdata_fill_from_iovec(
2706                                 wdata, from, &cur_len, &num_pages);
2707                         if (rc) {
2708                                 for (i = 0; i < nr_pages; i++)
2709                                         put_page(wdata->pages[i]);
2710                                 kvfree(wdata->pages);
2711                                 kfree(wdata);
2712                                 add_credits_and_wake_if(server, credits, 0);
2713                                 break;
2714                         }
2715 
2716                         /*
2717                          * Bring nr_pages down to the number of pages we
2718                          * actually used, and free any pages that we didn't use.
2719                          */
2720                         for ( ; nr_pages > num_pages; nr_pages--)
2721                                 put_page(wdata->pages[nr_pages - 1]);
2722 
2723                         wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2724                 }
2725 
2726                 wdata->sync_mode = WB_SYNC_ALL;
2727                 wdata->nr_pages = nr_pages;
2728                 wdata->offset = (__u64)offset;
2729                 wdata->cfile = cifsFileInfo_get(open_file);
2730                 wdata->pid = pid;
2731                 wdata->bytes = cur_len;
2732                 wdata->pagesz = PAGE_SIZE;
2733                 wdata->credits = credits;
2734                 wdata->ctx = ctx;
2735                 kref_get(&ctx->refcount);
2736 
2737                 if (!wdata->cfile->invalidHandle ||
2738                     !(rc = cifs_reopen_file(wdata->cfile, false)))
2739                         rc = server->ops->async_writev(wdata,
2740                                         cifs_uncached_writedata_release);
2741                 if (rc) {
2742                         add_credits_and_wake_if(server, wdata->credits, 0);
2743                         kref_put(&wdata->refcount,
2744                                  cifs_uncached_writedata_release);
2745                         if (rc == -EAGAIN) {
2746                                 *from = saved_from;
2747                                 iov_iter_advance(from, offset - saved_offset);
2748                                 continue;
2749                         }
2750                         break;
2751                 }
2752 
2753                 list_add_tail(&wdata->list, wdata_list);
2754                 offset += cur_len;
2755                 len -= cur_len;
2756         } while (len > 0);
2757 
2758         return rc;
2759 }
2760 
2761 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2762 {
2763         struct cifs_writedata *wdata, *tmp;
2764         struct cifs_tcon *tcon;
2765         struct cifs_sb_info *cifs_sb;
2766         struct dentry *dentry = ctx->cfile->dentry;
2767         unsigned int i;
2768         int rc;
2769 
2770         tcon = tlink_tcon(ctx->cfile->tlink);
2771         cifs_sb = CIFS_SB(dentry->d_sb);
2772 
2773         mutex_lock(&ctx->aio_mutex);
2774 
2775         if (list_empty(&ctx->list)) {
2776                 mutex_unlock(&ctx->aio_mutex);
2777                 return;
2778         }
2779 
2780         rc = ctx->rc;
2781         /*
2782          * Wait for and collect replies for any successful sends in order of
2783          * increasing offset. Once an error is hit, then return without waiting
2784          * for any more replies.
2785          */
2786 restart_loop:
2787         list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2788                 if (!rc) {
2789                         if (!try_wait_for_completion(&wdata->done)) {
2790                                 mutex_unlock(&ctx->aio_mutex);
2791                                 return;
2792                         }
2793 
2794                         if (wdata->result)
2795                                 rc = wdata->result;
2796                         else
2797                                 ctx->total_len += wdata->bytes;
2798 
2799                         /* resend call if it's a retryable error */
2800                         if (rc == -EAGAIN) {
2801                                 struct list_head tmp_list;
2802                                 struct iov_iter tmp_from = ctx->iter;
2803 
2804                                 INIT_LIST_HEAD(&tmp_list);
2805                                 list_del_init(&wdata->list);
2806 
2807                                 if (ctx->direct_io)
2808                                         rc = cifs_resend_wdata(
2809                                                 wdata, &tmp_list, ctx);
2810                                 else {
2811                                         iov_iter_advance(&tmp_from,
2812                                                  wdata->offset - ctx->pos);
2813 
2814                                         rc = cifs_write_from_iter(wdata->offset,
2815                                                 wdata->bytes, &tmp_from,
2816                                                 ctx->cfile, cifs_sb, &tmp_list,
2817                                                 ctx);
2818                                 }
2819 
2820                                 list_splice(&tmp_list, &ctx->list);
2821 
2822                                 kref_put(&wdata->refcount,
2823                                          cifs_uncached_writedata_release);
2824                                 goto restart_loop;
2825                         }
2826                 }
2827                 list_del_init(&wdata->list);
2828                 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2829         }
2830 
2831         if (!ctx->direct_io)
2832                 for (i = 0; i < ctx->npages; i++)
2833                         put_page(ctx->bv[i].bv_page);
2834 
2835         cifs_stats_bytes_written(tcon, ctx->total_len);
2836         set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2837 
2838         ctx->rc = (rc == 0) ? ctx->total_len : rc;
2839 
2840         mutex_unlock(&ctx->aio_mutex);
2841 
2842         if (ctx->iocb && ctx->iocb->ki_complete)
2843                 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2844         else
2845                 complete(&ctx->done);
2846 }
2847 
2848 static ssize_t __cifs_writev(
2849         struct kiocb *iocb, struct iov_iter *from, bool direct)
2850 {
2851         struct file *file = iocb->ki_filp;
2852         ssize_t total_written = 0;
2853         struct cifsFileInfo *cfile;
2854         struct cifs_tcon *tcon;
2855         struct cifs_sb_info *cifs_sb;
2856         struct cifs_aio_ctx *ctx;
2857         struct iov_iter saved_from = *from;
2858         size_t len = iov_iter_count(from);
2859         int rc;
2860 
2861         /*
2862          * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2863          * In this case, fall back to non-direct write function.
2864          * this could be improved by getting pages directly in ITER_KVEC
2865          */
2866         if (direct && from->type & ITER_KVEC) {
2867                 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2868                 direct = false;
2869         }
2870 
2871         rc = generic_write_checks(iocb, from);
2872         if (rc <= 0)
2873                 return rc;
2874 
2875         cifs_sb = CIFS_FILE_SB(file);
2876         cfile = file->private_data;
2877         tcon = tlink_tcon(cfile->tlink);
2878 
2879         if (!tcon->ses->server->ops->async_writev)
2880                 return -ENOSYS;
2881 
2882         ctx = cifs_aio_ctx_alloc();
2883         if (!ctx)
2884                 return -ENOMEM;
2885 
2886         ctx->cfile = cifsFileInfo_get(cfile);
2887 
2888         if (!is_sync_kiocb(iocb))
2889                 ctx->iocb = iocb;
2890 
2891         ctx->pos = iocb->ki_pos;
2892 
2893         if (direct) {
2894                 ctx->direct_io = true;
2895                 ctx->iter = *from;
2896                 ctx->len = len;
2897         } else {
2898                 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2899                 if (rc) {
2900                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
2901                         return rc;
2902                 }
2903         }
2904 
2905         /* grab a lock here due to read response handlers can access ctx */
2906         mutex_lock(&ctx->aio_mutex);
2907 
2908         rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2909                                   cfile, cifs_sb, &ctx->list, ctx);
2910 
2911         /*
2912          * If at least one write was successfully sent, then discard any rc
2913          * value from the later writes. If the other write succeeds, then
2914          * we'll end up returning whatever was written. If it fails, then
2915          * we'll get a new rc value from that.
2916          */
2917         if (!list_empty(&ctx->list))
2918                 rc = 0;
2919 
2920         mutex_unlock(&ctx->aio_mutex);
2921 
2922         if (rc) {
2923                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2924                 return rc;
2925         }
2926 
2927         if (!is_sync_kiocb(iocb)) {
2928                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2929                 return -EIOCBQUEUED;
2930         }
2931 
2932         rc = wait_for_completion_killable(&ctx->done);
2933         if (rc) {
2934                 mutex_lock(&ctx->aio_mutex);
2935                 ctx->rc = rc = -EINTR;
2936                 total_written = ctx->total_len;
2937                 mutex_unlock(&ctx->aio_mutex);
2938         } else {
2939                 rc = ctx->rc;
2940                 total_written = ctx->total_len;
2941         }
2942 
2943         kref_put(&ctx->refcount, cifs_aio_ctx_release);
2944 
2945         if (unlikely(!total_written))
2946                 return rc;
2947 
2948         iocb->ki_pos += total_written;
2949         return total_written;
2950 }
2951 
2952 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
2953 {
2954         return __cifs_writev(iocb, from, true);
2955 }
2956 
2957 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2958 {
2959         return __cifs_writev(iocb, from, false);
2960 }
2961 
2962 static ssize_t
2963 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2964 {
2965         struct file *file = iocb->ki_filp;
2966         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2967         struct inode *inode = file->f_mapping->host;
2968         struct cifsInodeInfo *cinode = CIFS_I(inode);
2969         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2970         ssize_t rc;
2971 
2972         inode_lock(inode);
2973         /*
2974          * We need to hold the sem to be sure nobody modifies lock list
2975          * with a brlock that prevents writing.
2976          */
2977         down_read(&cinode->lock_sem);
2978 
2979         rc = generic_write_checks(iocb, from);
2980         if (rc <= 0)
2981                 goto out;
2982 
2983         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2984                                      server->vals->exclusive_lock_type, 0,
2985                                      NULL, CIFS_WRITE_OP))
2986                 rc = __generic_file_write_iter(iocb, from);
2987         else
2988                 rc = -EACCES;
2989 out:
2990         up_read(&cinode->lock_sem);
2991         inode_unlock(inode);
2992 
2993         if (rc > 0)
2994                 rc = generic_write_sync(iocb, rc);
2995         return rc;
2996 }
2997 
2998 ssize_t
2999 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3000 {
3001         struct inode *inode = file_inode(iocb->ki_filp);
3002         struct cifsInodeInfo *cinode = CIFS_I(inode);
3003         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3004         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3005                                                 iocb->ki_filp->private_data;
3006         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3007         ssize_t written;
3008 
3009         written = cifs_get_writer(cinode);
3010         if (written)
3011                 return written;
3012 
3013         if (CIFS_CACHE_WRITE(cinode)) {
3014                 if (cap_unix(tcon->ses) &&
3015                 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3016                   && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3017                         written = generic_file_write_iter(iocb, from);
3018                         goto out;
3019                 }
3020                 written = cifs_writev(iocb, from);
3021                 goto out;
3022         }
3023         /*
3024          * For non-oplocked files in strict cache mode we need to write the data
3025          * to the server exactly from the pos to pos+len-1 rather than flush all
3026          * affected pages because it may cause a error with mandatory locks on
3027          * these pages but not on the region from pos to ppos+len-1.
3028          */
3029         written = cifs_user_writev(iocb, from);
3030         if (written > 0 && CIFS_CACHE_READ(cinode)) {
3031                 /*
3032                  * Windows 7 server can delay breaking level2 oplock if a write
3033                  * request comes - break it on the client to prevent reading
3034                  * an old data.
3035                  */
3036                 cifs_zap_mapping(inode);
3037                 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
3038                          inode);
3039                 cinode->oplock = 0;
3040         }
3041 out:
3042         cifs_put_writer(cinode);
3043         return written;
3044 }
3045 
3046 static struct cifs_readdata *
3047 cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3048 {
3049         struct cifs_readdata *rdata;
3050 
3051         rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3052         if (rdata != NULL) {
3053                 rdata->pages = pages;
3054                 kref_init(&rdata->refcount);
3055                 INIT_LIST_HEAD(&rdata->list);
3056                 init_completion(&rdata->done);
3057                 INIT_WORK(&rdata->work, complete);
3058         }
3059 
3060         return rdata;
3061 }
3062 
3063 static struct cifs_readdata *
3064 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3065 {
3066         struct page **pages =
3067                 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3068         struct cifs_readdata *ret = NULL;
3069 
3070         if (pages) {
3071                 ret = cifs_readdata_direct_alloc(pages, complete);
3072                 if (!ret)
3073                         kfree(pages);
3074         }
3075 
3076         return ret;
3077 }
3078 
3079 void
3080 cifs_readdata_release(struct kref *refcount)
3081 {
3082         struct cifs_readdata *rdata = container_of(refcount,
3083                                         struct cifs_readdata, refcount);
3084 #ifdef CONFIG_CIFS_SMB_DIRECT
3085         if (rdata->mr) {
3086                 smbd_deregister_mr(rdata->mr);
3087                 rdata->mr = NULL;
3088         }
3089 #endif
3090         if (rdata->cfile)
3091                 cifsFileInfo_put(rdata->cfile);
3092 
3093         kvfree(rdata->pages);
3094         kfree(rdata);
3095 }
3096 
3097 static int
3098 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3099 {
3100         int rc = 0;
3101         struct page *page;
3102         unsigned int i;
3103 
3104         for (i = 0; i < nr_pages; i++) {
3105                 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3106                 if (!page) {
3107                         rc = -ENOMEM;
3108                         break;
3109                 }
3110                 rdata->pages[i] = page;
3111         }
3112 
3113         if (rc) {
3114                 for (i = 0; i < nr_pages; i++) {
3115                         put_page(rdata->pages[i]);
3116                         rdata->pages[i] = NULL;
3117                 }
3118         }
3119         return rc;
3120 }
3121 
3122 static void
3123 cifs_uncached_readdata_release(struct kref *refcount)
3124 {
3125         struct cifs_readdata *rdata = container_of(refcount,
3126                                         struct cifs_readdata, refcount);
3127         unsigned int i;
3128 
3129         kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3130         for (i = 0; i < rdata->nr_pages; i++) {
3131                 put_page(rdata->pages[i]);
3132         }
3133         cifs_readdata_release(refcount);
3134 }
3135 
3136 /**
3137  * cifs_readdata_to_iov - copy data from pages in response to an iovec
3138  * @rdata:      the readdata response with list of pages holding data
3139  * @iter:       destination for our data
3140  *
3141  * This function copies data from a list of pages in a readdata response into
3142  * an array of iovecs. It will first calculate where the data should go
3143  * based on the info in the readdata and then copy the data into that spot.
3144  */
3145 static int
3146 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
3147 {
3148         size_t remaining = rdata->got_bytes;
3149         unsigned int i;
3150 
3151         for (i = 0; i < rdata->nr_pages; i++) {
3152                 struct page *page = rdata->pages[i];
3153                 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
3154                 size_t written;
3155 
3156                 if (unlikely(iov_iter_is_pipe(iter))) {
3157                         void *addr = kmap_atomic(page);
3158 
3159                         written = copy_to_iter(addr, copy, iter);
3160                         kunmap_atomic(addr);
3161                 } else
3162                         written = copy_page_to_iter(page, 0, copy, iter);
3163                 remaining -= written;
3164                 if (written < copy && iov_iter_count(iter) > 0)
3165                         break;
3166         }
3167         return remaining ? -EFAULT : 0;
3168 }
3169 
3170 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3171 
3172 static void
3173 cifs_uncached_readv_complete(struct work_struct *work)
3174 {
3175         struct cifs_readdata *rdata = container_of(work,
3176                                                 struct cifs_readdata, work);
3177 
3178         complete(&rdata->done);
3179         collect_uncached_read_data(rdata->ctx);
3180         /* the below call can possibly free the last ref to aio ctx */
3181         kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3182 }
3183 
3184 static int
3185 uncached_fill_pages(struct TCP_Server_Info *server,
3186                     struct cifs_readdata *rdata, struct iov_iter *iter,
3187                     unsigned int len)
3188 {
3189         int result = 0;
3190         unsigned int i;
3191         unsigned int nr_pages = rdata->nr_pages;
3192         unsigned int page_offset = rdata->page_offset;
3193 
3194         rdata->got_bytes = 0;
3195         rdata->tailsz = PAGE_SIZE;
3196         for (i = 0; i < nr_pages; i++) {
3197                 struct page *page = rdata->pages[i];
3198                 size_t n;
3199                 unsigned int segment_size = rdata->pagesz;
3200 
3201                 if (i == 0)
3202                         segment_size -= page_offset;
3203                 else
3204                         page_offset = 0;
3205 
3206 
3207                 if (len <= 0) {
3208                         /* no need to hold page hostage */
3209                         rdata->pages[i] = NULL;
3210                         rdata->nr_pages--;
3211                         put_page(page);
3212                         continue;
3213                 }
3214 
3215                 n = len;
3216                 if (len >= segment_size)
3217                         /* enough data to fill the page */
3218                         n = segment_size;
3219                 else
3220                         rdata->tailsz = len;
3221                 len -= n;
3222 
3223                 if (iter)
3224                         result = copy_page_from_iter(
3225                                         page, page_offset, n, iter);
3226 #ifdef CONFIG_CIFS_SMB_DIRECT
3227                 else if (rdata->mr)
3228                         result = n;
3229 #endif
3230                 else
3231                         result = cifs_read_page_from_socket(
3232                                         server, page, page_offset, n);
3233                 if (result < 0)
3234                         break;
3235 
3236                 rdata->got_bytes += result;
3237         }
3238 
3239         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3240                                                 rdata->got_bytes : result;
3241 }
3242 
3243 static int
3244 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3245                               struct cifs_readdata *rdata, unsigned int len)
3246 {
3247         return uncached_fill_pages(server, rdata, NULL, len);
3248 }
3249 
3250 static int
3251 cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3252                               struct cifs_readdata *rdata,
3253                               struct iov_iter *iter)
3254 {
3255         return uncached_fill_pages(server, rdata, iter, iter->count);
3256 }
3257 
3258 static int cifs_resend_rdata(struct cifs_readdata *rdata,
3259                         struct list_head *rdata_list,
3260                         struct cifs_aio_ctx *ctx)
3261 {
3262         unsigned int rsize, credits;
3263         int rc;
3264         struct TCP_Server_Info *server =
3265                 tlink_tcon(rdata->cfile->tlink)->ses->server;
3266 
3267         /*
3268          * Wait for credits to resend this rdata.
3269          * Note: we are attempting to resend the whole rdata not in segments
3270          */
3271         do {
3272                 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3273                                                 &rsize, &credits);
3274 
3275                 if (rc)
3276                         goto out;
3277 
3278                 if (rsize < rdata->bytes) {
3279                         add_credits_and_wake_if(server, credits, 0);
3280                         msleep(1000);
3281                 }
3282         } while (rsize < rdata->bytes);
3283 
3284         rc = -EAGAIN;
3285         while (rc == -EAGAIN) {
3286                 rc = 0;
3287                 if (rdata->cfile->invalidHandle)
3288                         rc = cifs_reopen_file(rdata->cfile, true);
3289                 if (!rc)
3290                         rc = server->ops->async_readv(rdata);
3291         }
3292 
3293         if (!rc) {
3294                 /* Add to aio pending list */
3295                 list_add_tail(&rdata->list, rdata_list);
3296                 return 0;
3297         }
3298 
3299         add_credits_and_wake_if(server, rdata->credits, 0);
3300 out:
3301         kref_put(&rdata->refcount,
3302                 cifs_uncached_readdata_release);
3303 
3304         return rc;
3305 }
3306 
3307 static int
3308 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3309                      struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3310                      struct cifs_aio_ctx *ctx)
3311 {
3312         struct cifs_readdata *rdata;
3313         unsigned int npages, rsize, credits;
3314         size_t cur_len;
3315         int rc;
3316         pid_t pid;
3317         struct TCP_Server_Info *server;
3318         struct page **pagevec;
3319         size_t start;
3320         struct iov_iter direct_iov = ctx->iter;
3321 
3322         server = tlink_tcon(open_file->tlink)->ses->server;
3323 
3324         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3325                 pid = open_file->pid;
3326         else
3327                 pid = current->tgid;
3328 
3329         if (ctx->direct_io)
3330                 iov_iter_advance(&direct_iov, offset - ctx->pos);
3331 
3332         do {
3333                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3334                                                    &rsize, &credits);
3335                 if (rc)
3336                         break;
3337 
3338                 cur_len = min_t(const size_t, len, rsize);
3339 
3340                 if (ctx->direct_io) {
3341                         ssize_t result;
3342 
3343                         result = iov_iter_get_pages_alloc(
3344                                         &direct_iov, &pagevec,
3345                                         cur_len, &start);
3346                         if (result < 0) {
3347                                 cifs_dbg(VFS,
3348                                         "couldn't get user pages (rc=%zd)"
3349                                         " iter type %d"
3350                                         " iov_offset %zd count %zd\n",
3351                                         result, direct_iov.type,
3352                                         direct_iov.iov_offset,
3353                                         direct_iov.count);
3354                                 dump_stack();
3355 
3356                                 rc = result;
3357                                 add_credits_and_wake_if(server, credits, 0);
3358                                 break;
3359                         }
3360                         cur_len = (size_t)result;
3361                         iov_iter_advance(&direct_iov, cur_len);
3362 
3363                         rdata = cifs_readdata_direct_alloc(
3364                                         pagevec, cifs_uncached_readv_complete);
3365                         if (!rdata) {
3366                                 add_credits_and_wake_if(server, credits, 0);
3367                                 rc = -ENOMEM;
3368                                 break;
3369                         }
3370 
3371                         npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3372                         rdata->page_offset = start;
3373                         rdata->tailsz = npages > 1 ?
3374                                 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3375                                 cur_len;
3376 
3377                 } else {
3378 
3379                         npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3380                         /* allocate a readdata struct */
3381                         rdata = cifs_readdata_alloc(npages,
3382                                             cifs_uncached_readv_complete);
3383                         if (!rdata) {
3384                                 add_credits_and_wake_if(server, credits, 0);
3385                                 rc = -ENOMEM;
3386                                 break;
3387                         }
3388 
3389                         rc = cifs_read_allocate_pages(rdata, npages);
3390                         if (rc) {
3391                                 kvfree(rdata->pages);
3392                                 kfree(rdata);
3393                                 add_credits_and_wake_if(server, credits, 0);
3394                                 break;
3395                         }
3396 
3397                         rdata->tailsz = PAGE_SIZE;
3398                 }
3399 
3400                 rdata->cfile = cifsFileInfo_get(open_file);
3401                 rdata->nr_pages = npages;
3402                 rdata->offset = offset;
3403                 rdata->bytes = cur_len;
3404                 rdata->pid = pid;
3405                 rdata->pagesz = PAGE_SIZE;
3406                 rdata->read_into_pages = cifs_uncached_read_into_pages;
3407                 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
3408                 rdata->credits = credits;
3409                 rdata->ctx = ctx;
3410                 kref_get(&ctx->refcount);
3411 
3412                 if (!rdata->cfile->invalidHandle ||
3413                     !(rc = cifs_reopen_file(rdata->cfile, true)))
3414                         rc = server->ops->async_readv(rdata);
3415                 if (rc) {
3416                         add_credits_and_wake_if(server, rdata->credits, 0);
3417                         kref_put(&rdata->refcount,
3418                                 cifs_uncached_readdata_release);
3419                         if (rc == -EAGAIN) {
3420                                 iov_iter_revert(&direct_iov, cur_len);
3421                                 continue;
3422                         }
3423                         break;
3424                 }
3425 
3426                 list_add_tail(&rdata->list, rdata_list);
3427                 offset += cur_len;
3428                 len -= cur_len;
3429         } while (len > 0);
3430 
3431         return rc;
3432 }
3433 
3434 static void
3435 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3436 {
3437         struct cifs_readdata *rdata, *tmp;
3438         struct iov_iter *to = &ctx->iter;
3439         struct cifs_sb_info *cifs_sb;
3440         struct cifs_tcon *tcon;
3441         unsigned int i;
3442         int rc;
3443 
3444         tcon = tlink_tcon(ctx->cfile->tlink);
3445         cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3446 
3447         mutex_lock(&ctx->aio_mutex);
3448 
3449         if (list_empty(&ctx->list)) {
3450                 mutex_unlock(&ctx->aio_mutex);
3451                 return;
3452         }
3453 
3454         rc = ctx->rc;
3455         /* the loop below should proceed in the order of increasing offsets */
3456 again:
3457         list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3458                 if (!rc) {
3459                         if (!try_wait_for_completion(&rdata->done)) {
3460                                 mutex_unlock(&ctx->aio_mutex);
3461                                 return;
3462                         }
3463 
3464                         if (rdata->result == -EAGAIN) {
3465                                 /* resend call if it's a retryable error */
3466                                 struct list_head tmp_list;
3467                                 unsigned int got_bytes = rdata->got_bytes;
3468 
3469                                 list_del_init(&rdata->list);
3470                                 INIT_LIST_HEAD(&tmp_list);
3471 
3472                                 /*
3473                                  * Got a part of data and then reconnect has
3474                                  * happened -- fill the buffer and continue
3475                                  * reading.
3476                                  */
3477                                 if (got_bytes && got_bytes < rdata->bytes) {
3478                                         rc = 0;
3479                                         if (!ctx->direct_io)
3480                                                 rc = cifs_readdata_to_iov(rdata, to);
3481                                         if (rc) {
3482                                                 kref_put(&rdata->refcount,
3483                                                         cifs_uncached_readdata_release);
3484                                                 continue;
3485                                         }
3486                                 }
3487 
3488                                 if (ctx->direct_io) {
3489                                         /*
3490                                          * Re-use rdata as this is a
3491                                          * direct I/O
3492                                          */
3493                                         rc = cifs_resend_rdata(
3494                                                 rdata,
3495                                                 &tmp_list, ctx);
3496                                 } else {
3497                                         rc = cifs_send_async_read(
3498                                                 rdata->offset + got_bytes,
3499                                                 rdata->bytes - got_bytes,
3500                                                 rdata->cfile, cifs_sb,
3501                                                 &tmp_list, ctx);
3502 
3503                                         kref_put(&rdata->refcount,
3504                                                 cifs_uncached_readdata_release);
3505                                 }
3506 
3507                                 list_splice(&tmp_list, &ctx->list);
3508 
3509                                 goto again;
3510                         } else if (rdata->result)
3511                                 rc = rdata->result;
3512                         else if (!ctx->direct_io)
3513                                 rc = cifs_readdata_to_iov(rdata, to);
3514 
3515                         /* if there was a short read -- discard anything left */
3516                         if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3517                                 rc = -ENODATA;
3518 
3519                         ctx->total_len += rdata->got_bytes;
3520                 }
3521                 list_del_init(&rdata->list);
3522                 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3523         }
3524 
3525         if (!ctx->direct_io) {
3526                 for (i = 0; i < ctx->npages; i++) {
3527                         if (ctx->should_dirty)
3528                                 set_page_dirty(ctx->bv[i].bv_page);
3529                         put_page(ctx->bv[i].bv_page);
3530                 }
3531 
3532                 ctx->total_len = ctx->len - iov_iter_count(to);
3533         }
3534 
3535         cifs_stats_bytes_read(tcon, ctx->total_len);
3536 
3537         /* mask nodata case */
3538         if (rc == -ENODATA)
3539                 rc = 0;
3540 
3541         ctx->rc = (rc == 0) ? ctx->total_len : rc;
3542 
3543         mutex_unlock(&ctx->aio_mutex);
3544 
3545         if (ctx->iocb && ctx->iocb->ki_complete)
3546                 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3547         else
3548                 complete(&ctx->done);
3549 }
3550 
3551 static ssize_t __cifs_readv(
3552         struct kiocb *iocb, struct iov_iter *to, bool direct)
3553 {
3554         size_t len;
3555         struct file *file = iocb->ki_filp;
3556         struct cifs_sb_info *cifs_sb;
3557         struct cifsFileInfo *cfile;
3558         struct cifs_tcon *tcon;
3559         ssize_t rc, total_read = 0;
3560         loff_t offset = iocb->ki_pos;
3561         struct cifs_aio_ctx *ctx;
3562 
3563         /*
3564          * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3565          * fall back to data copy read path
3566          * this could be improved by getting pages directly in ITER_KVEC
3567          */
3568         if (direct && to->type & ITER_KVEC) {
3569                 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3570                 direct = false;
3571         }
3572 
3573         len = iov_iter_count(to);
3574         if (!len)
3575                 return 0;
3576 
3577         cifs_sb = CIFS_FILE_SB(file);
3578         cfile = file->private_data;
3579         tcon = tlink_tcon(cfile->tlink);
3580 
3581         if (!tcon->ses->server->ops->async_readv)
3582                 return -ENOSYS;
3583 
3584         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3585                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3586 
3587         ctx = cifs_aio_ctx_alloc();
3588         if (!ctx)
3589                 return -ENOMEM;
3590 
3591         ctx->cfile = cifsFileInfo_get(cfile);
3592 
3593         if (!is_sync_kiocb(iocb))
3594                 ctx->iocb = iocb;
3595 
3596         if (iter_is_iovec(to))
3597                 ctx->should_dirty = true;
3598 
3599         if (direct) {
3600                 ctx->pos = offset;
3601                 ctx->direct_io = true;
3602                 ctx->iter = *to;
3603                 ctx->len = len;
3604         } else {
3605                 rc = setup_aio_ctx_iter(ctx, to, READ);
3606                 if (rc) {
3607                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
3608                         return rc;
3609                 }
3610                 len = ctx->len;
3611         }
3612 
3613         /* grab a lock here due to read response handlers can access ctx */
3614         mutex_lock(&ctx->aio_mutex);
3615 
3616         rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3617 
3618         /* if at least one read request send succeeded, then reset rc */
3619         if (!list_empty(&ctx->list))
3620                 rc = 0;
3621 
3622         mutex_unlock(&ctx->aio_mutex);
3623 
3624         if (rc) {
3625                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3626                 return rc;
3627         }
3628 
3629         if (!is_sync_kiocb(iocb)) {
3630                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3631                 return -EIOCBQUEUED;
3632         }
3633 
3634         rc = wait_for_completion_killable(&ctx->done);
3635         if (rc) {
3636                 mutex_lock(&ctx->aio_mutex);
3637                 ctx->rc = rc = -EINTR;
3638                 total_read = ctx->total_len;
3639                 mutex_unlock(&ctx->aio_mutex);
3640         } else {
3641                 rc = ctx->rc;
3642                 total_read = ctx->total_len;
3643         }
3644 
3645         kref_put(&ctx->refcount, cifs_aio_ctx_release);
3646 
3647         if (total_read) {
3648                 iocb->ki_pos += total_read;
3649                 return total_read;
3650         }
3651         return rc;
3652 }
3653 
3654 ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3655 {
3656         return __cifs_readv(iocb, to, true);
3657 }
3658 
3659 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3660 {
3661         return __cifs_readv(iocb, to, false);
3662 }
3663 
3664 ssize_t
3665 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3666 {
3667         struct inode *inode = file_inode(iocb->ki_filp);
3668         struct cifsInodeInfo *cinode = CIFS_I(inode);
3669         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3670         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3671                                                 iocb->ki_filp->private_data;
3672         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3673         int rc = -EACCES;
3674 
3675         /*
3676          * In strict cache mode we need to read from the server all the time
3677          * if we don't have level II oplock because the server can delay mtime
3678          * change - so we can't make a decision about inode invalidating.
3679          * And we can also fail with pagereading if there are mandatory locks
3680          * on pages affected by this read but not on the region from pos to
3681          * pos+len-1.
3682          */
3683         if (!CIFS_CACHE_READ(cinode))
3684                 return cifs_user_readv(iocb, to);
3685 
3686         if (cap_unix(tcon->ses) &&
3687             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3688             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3689                 return generic_file_read_iter(iocb, to);
3690 
3691         /*
3692          * We need to hold the sem to be sure nobody modifies lock list
3693          * with a brlock that prevents reading.
3694          */
3695         down_read(&cinode->lock_sem);
3696         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3697                                      tcon->ses->server->vals->shared_lock_type,
3698                                      0, NULL, CIFS_READ_OP))
3699                 rc = generic_file_read_iter(iocb, to);
3700         up_read(&cinode->lock_sem);
3701         return rc;
3702 }
3703 
3704 static ssize_t
3705 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3706 {
3707         int rc = -EACCES;
3708         unsigned int bytes_read = 0;
3709         unsigned int total_read;
3710         unsigned int current_read_size;
3711         unsigned int rsize;
3712         struct cifs_sb_info *cifs_sb;
3713         struct cifs_tcon *tcon;
3714         struct TCP_Server_Info *server;
3715         unsigned int xid;
3716         char *cur_offset;
3717         struct cifsFileInfo *open_file;
3718         struct cifs_io_parms io_parms;
3719         int buf_type = CIFS_NO_BUFFER;
3720         __u32 pid;
3721 
3722         xid = get_xid();
3723         cifs_sb = CIFS_FILE_SB(file);
3724 
3725         /* FIXME: set up handlers for larger reads and/or convert to async */
3726         rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3727 
3728         if (file->private_data == NULL) {
3729                 rc = -EBADF;
3730                 free_xid(xid);
3731                 return rc;
3732         }
3733         open_file = file->private_data;
3734         tcon = tlink_tcon(open_file->tlink);
3735         server = tcon->ses->server;
3736 
3737         if (!server->ops->sync_read) {
3738                 free_xid(xid);
3739                 return -ENOSYS;
3740         }
3741 
3742         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3743                 pid = open_file->pid;
3744         else
3745                 pid = current->tgid;
3746 
3747         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3748                 cifs_dbg(FYI, "attempting read on write only file instance\n");
3749 
3750         for (total_read = 0, cur_offset = read_data; read_size > total_read;
3751              total_read += bytes_read, cur_offset += bytes_read) {
3752                 do {
3753                         current_read_size = min_t(uint, read_size - total_read,
3754                                                   rsize);
3755                         /*
3756                          * For windows me and 9x we do not want to request more
3757                          * than it negotiated since it will refuse the read
3758                          * then.
3759                          */
3760                         if ((tcon->ses) && !(tcon->ses->capabilities &
3761                                 tcon->ses->server->vals->cap_large_files)) {
3762                                 current_read_size = min_t(uint,
3763                                         current_read_size, CIFSMaxBufSize);
3764                         }
3765                         if (open_file->invalidHandle) {
3766                                 rc = cifs_reopen_file(open_file, true);
3767                                 if (rc != 0)
3768                                         break;
3769                         }
3770                         io_parms.pid = pid;
3771                         io_parms.tcon = tcon;
3772                         io_parms.offset = *offset;
3773                         io_parms.length = current_read_size;
3774                         rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3775                                                     &bytes_read, &cur_offset,
3776                                                     &buf_type);
3777                 } while (rc == -EAGAIN);
3778 
3779                 if (rc || (bytes_read == 0)) {
3780                         if (total_read) {
3781                                 break;
3782                         } else {
3783                                 free_xid(xid);
3784                                 return rc;
3785                         }
3786                 } else {
3787                         cifs_stats_bytes_read(tcon, total_read);
3788                         *offset += bytes_read;
3789                 }
3790         }
3791         free_xid(xid);
3792         return total_read;
3793 }
3794 
3795 /*
3796  * If the page is mmap'ed into a process' page tables, then we need to make
3797  * sure that it doesn't change while being written back.
3798  */
3799 static vm_fault_t
3800 cifs_page_mkwrite(struct vm_fault *vmf)
3801 {
3802         struct page *page = vmf->page;
3803 
3804         lock_page(page);
3805         return VM_FAULT_LOCKED;
3806 }
3807 
3808 static const struct vm_operations_struct cifs_file_vm_ops = {
3809         .fault = filemap_fault,
3810         .map_pages = filemap_map_pages,
3811         .page_mkwrite = cifs_page_mkwrite,
3812 };
3813 
3814 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3815 {
3816         int xid, rc = 0;
3817         struct inode *inode = file_inode(file);
3818 
3819         xid = get_xid();
3820 
3821         if (!CIFS_CACHE_READ(CIFS_I(inode)))
3822                 rc = cifs_zap_mapping(inode);
3823         if (!rc)
3824                 rc = generic_file_mmap(file, vma);
3825         if (!rc)
3826                 vma->vm_ops = &cifs_file_vm_ops;
3827 
3828         free_xid(xid);
3829         return rc;
3830 }
3831 
3832 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3833 {
3834         int rc, xid;
3835 
3836         xid = get_xid();
3837 
3838         rc = cifs_revalidate_file(file);
3839         if (rc)
3840                 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3841                          rc);
3842         if (!rc)
3843                 rc = generic_file_mmap(file, vma);
3844         if (!rc)
3845                 vma->vm_ops = &cifs_file_vm_ops;
3846 
3847         free_xid(xid);
3848         return rc;
3849 }
3850 
3851 static void
3852 cifs_readv_complete(struct work_struct *work)
3853 {
3854         unsigned int i, got_bytes;
3855         struct cifs_readdata *rdata = container_of(work,
3856                                                 struct cifs_readdata, work);
3857 
3858         got_bytes = rdata->got_bytes;
3859         for (i = 0; i < rdata->nr_pages; i++) {
3860                 struct page *page = rdata->pages[i];
3861 
3862                 lru_cache_add_file(page);
3863 
3864                 if (rdata->result == 0 ||
3865                     (rdata->result == -EAGAIN && got_bytes)) {
3866                         flush_dcache_page(page);
3867                         SetPageUptodate(page);
3868                 }
3869 
3870                 unlock_page(page);
3871 
3872                 if (rdata->result == 0 ||
3873                     (rdata->result == -EAGAIN && got_bytes))
3874                         cifs_readpage_to_fscache(rdata->mapping->host, page);
3875 
3876                 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3877 
3878                 put_page(page);
3879                 rdata->pages[i] = NULL;
3880         }
3881         kref_put(&rdata->refcount, cifs_readdata_release);
3882 }
3883 
3884 static int
3885 readpages_fill_pages(struct TCP_Server_Info *server,
3886                      struct cifs_readdata *rdata, struct iov_iter *iter,
3887                      unsigned int len)
3888 {
3889         int result = 0;
3890         unsigned int i;
3891         u64 eof;
3892         pgoff_t eof_index;
3893         unsigned int nr_pages = rdata->nr_pages;
3894         unsigned int page_offset = rdata->page_offset;
3895 
3896         /* determine the eof that the server (probably) has */
3897         eof = CIFS_I(rdata->mapping->host)->server_eof;
3898         eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3899         cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3900 
3901         rdata->got_bytes = 0;
3902         rdata->tailsz = PAGE_SIZE;
3903         for (i = 0; i < nr_pages; i++) {
3904                 struct page *page = rdata->pages[i];
3905                 unsigned int to_read = rdata->pagesz;
3906                 size_t n;
3907 
3908                 if (i == 0)
3909                         to_read -= page_offset;
3910                 else
3911                         page_offset = 0;
3912 
3913                 n = to_read;
3914 
3915                 if (len >= to_read) {
3916                         len -= to_read;
3917                 } else if (len > 0) {
3918                         /* enough for partial page, fill and zero the rest */
3919                         zero_user(page, len + page_offset, to_read - len);
3920                         n = rdata->tailsz = len;
3921                         len = 0;
3922                 } else if (page->index > eof_index) {
3923                         /*
3924                          * The VFS will not try to do readahead past the
3925                          * i_size, but it's possible that we have outstanding
3926                          * writes with gaps in the middle and the i_size hasn't
3927                          * caught up yet. Populate those with zeroed out pages
3928                          * to prevent the VFS from repeatedly attempting to
3929                          * fill them until the writes are flushed.
3930                          */
3931                         zero_user(page, 0, PAGE_SIZE);
3932                         lru_cache_add_file(page);
3933                         flush_dcache_page(page);
3934                         SetPageUptodate(page);
3935                         unlock_page(page);
3936                         put_page(page);
3937                         rdata->pages[i] = NULL;
3938                         rdata->nr_pages--;
3939                         continue;
3940                 } else {
3941                         /* no need to hold page hostage */
3942                         lru_cache_add_file(page);
3943                         unlock_page(page);
3944                         put_page(page);
3945                         rdata->pages[i] = NULL;
3946                         rdata->nr_pages--;
3947                         continue;
3948                 }
3949 
3950                 if (iter)
3951                         result = copy_page_from_iter(
3952                                         page, page_offset, n, iter);
3953 #ifdef CONFIG_CIFS_SMB_DIRECT
3954                 else if (rdata->mr)
3955                         result = n;
3956 #endif
3957                 else
3958                         result = cifs_read_page_from_socket(
3959                                         server, page, page_offset, n);
3960                 if (result < 0)
3961                         break;
3962 
3963                 rdata->got_bytes += result;
3964         }
3965 
3966         return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3967                                                 rdata->got_bytes : result;
3968 }
3969 
3970 static int
3971 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3972                                struct cifs_readdata *rdata, unsigned int len)
3973 {
3974         return readpages_fill_pages(server, rdata, NULL, len);
3975 }
3976 
3977 static int
3978 cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3979                                struct cifs_readdata *rdata,
3980                                struct iov_iter *iter)
3981 {
3982         return readpages_fill_pages(server, rdata, iter, iter->count);
3983 }
3984 
3985 static int
3986 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3987                     unsigned int rsize, struct list_head *tmplist,
3988                     unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3989 {
3990         struct page *page, *tpage;
3991         unsigned int expected_index;
3992         int rc;
3993         gfp_t gfp = readahead_gfp_mask(mapping);
3994 
3995         INIT_LIST_HEAD(tmplist);
3996 
3997         page = list_entry(page_list->prev, struct page, lru);
3998 
3999         /*
4000          * Lock the page and put it in the cache. Since no one else
4001          * should have access to this page, we're safe to simply set
4002          * PG_locked without checking it first.
4003          */
4004         __SetPageLocked(page);
4005         rc = add_to_page_cache_locked(page, mapping,
4006                                       page->index, gfp);
4007 
4008         /* give up if we can't stick it in the cache */
4009         if (rc) {
4010                 __ClearPageLocked(page);
4011                 return rc;
4012         }
4013 
4014         /* move first page to the tmplist */
4015         *offset = (loff_t)page->index << PAGE_SHIFT;
4016         *bytes = PAGE_SIZE;
4017         *nr_pages = 1;
4018         list_move_tail(&page->lru, tmplist);
4019 
4020         /* now try and add more pages onto the request */
4021         expected_index = page->index + 1;
4022         list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4023                 /* discontinuity ? */
4024                 if (page->index != expected_index)
4025                         break;
4026 
4027                 /* would this page push the read over the rsize? */
4028                 if (*bytes + PAGE_SIZE > rsize)
4029                         break;
4030 
4031                 __SetPageLocked(page);
4032                 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
4033                         __ClearPageLocked(page);
4034                         break;
4035                 }
4036                 list_move_tail(&page->lru, tmplist);
4037                 (*bytes) += PAGE_SIZE;
4038                 expected_index++;
4039                 (*nr_pages)++;
4040         }
4041         return rc;
4042 }
4043 
4044 static int cifs_readpages(struct file *file, struct address_space *mapping,
4045         struct list_head *page_list, unsigned num_pages)
4046 {
4047         int rc;
4048         struct list_head tmplist;
4049         struct cifsFileInfo *open_file = file->private_data;
4050         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
4051         struct TCP_Server_Info *server;
4052         pid_t pid;
4053         unsigned int xid;
4054 
4055         xid = get_xid();
4056         /*
4057          * Reads as many pages as possible from fscache. Returns -ENOBUFS
4058          * immediately if the cookie is negative
4059          *
4060          * After this point, every page in the list might have PG_fscache set,
4061          * so we will need to clean that up off of every page we don't use.
4062          */
4063         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4064                                          &num_pages);
4065         if (rc == 0) {
4066                 free_xid(xid);
4067                 return rc;
4068         }
4069 
4070         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4071                 pid = open_file->pid;
4072         else
4073                 pid = current->tgid;
4074 
4075         rc = 0;
4076         server = tlink_tcon(open_file->tlink)->ses->server;
4077 
4078         cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4079                  __func__, file, mapping, num_pages);
4080 
4081         /*
4082          * Start with the page at end of list and move it to private
4083          * list. Do the same with any following pages until we hit
4084          * the rsize limit, hit an index discontinuity, or run out of
4085          * pages. Issue the async read and then start the loop again
4086          * until the list is empty.
4087          *
4088          * Note that list order is important. The page_list is in
4089          * the order of declining indexes. When we put the pages in
4090          * the rdata->pages, then we want them in increasing order.
4091          */
4092         while (!list_empty(page_list)) {
4093                 unsigned int i, nr_pages, bytes, rsize;
4094                 loff_t offset;
4095                 struct page *page, *tpage;
4096                 struct cifs_readdata *rdata;
4097                 unsigned credits;
4098 
4099                 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
4100                                                    &rsize, &credits);
4101                 if (rc)
4102                         break;
4103 
4104                 /*
4105                  * Give up immediately if rsize is too small to read an entire
4106                  * page. The VFS will fall back to readpage. We should never
4107                  * reach this point however since we set ra_pages to 0 when the
4108                  * rsize is smaller than a cache page.
4109                  */
4110                 if (unlikely(rsize < PAGE_SIZE)) {
4111                         add_credits_and_wake_if(server, credits, 0);
4112                         free_xid(xid);
4113                         return 0;
4114                 }
4115 
4116                 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4117                                          &nr_pages, &offset, &bytes);
4118                 if (rc) {
4119                         add_credits_and_wake_if(server, credits, 0);
4120                         break;
4121                 }
4122 
4123                 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
4124                 if (!rdata) {
4125                         /* best to give up if we're out of mem */
4126                         list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4127                                 list_del(&page->lru);
4128                                 lru_cache_add_file(page);
4129                                 unlock_page(page);
4130                                 put_page(page);
4131                         }
4132                         rc = -ENOMEM;
4133                         add_credits_and_wake_if(server, credits, 0);
4134                         break;
4135                 }
4136 
4137                 rdata->cfile = cifsFileInfo_get(open_file);
4138                 rdata->mapping = mapping;
4139                 rdata->offset = offset;
4140                 rdata->bytes = bytes;
4141                 rdata->pid = pid;
4142                 rdata->pagesz = PAGE_SIZE;
4143                 rdata->tailsz = PAGE_SIZE;
4144                 rdata->read_into_pages = cifs_readpages_read_into_pages;
4145                 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4146                 rdata->credits = credits;
4147 
4148                 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4149                         list_del(&page->lru);
4150                         rdata->pages[rdata->nr_pages++] = page;
4151                 }
4152 
4153                 if (!rdata->cfile->invalidHandle ||
4154                     !(rc = cifs_reopen_file(rdata->cfile, true)))
4155                         rc = server->ops->async_readv(rdata);
4156                 if (rc) {
4157                         add_credits_and_wake_if(server, rdata->credits, 0);
4158                         for (i = 0; i < rdata->nr_pages; i++) {
4159                                 page = rdata->pages[i];
4160                                 lru_cache_add_file(page);
4161                                 unlock_page(page);
4162                                 put_page(page);
4163                         }
4164                         /* Fallback to the readpage in error/reconnect cases */
4165                         kref_put(&rdata->refcount, cifs_readdata_release);
4166                         break;
4167                 }
4168 
4169                 kref_put(&rdata->refcount, cifs_readdata_release);
4170         }
4171 
4172         /* Any pages that have been shown to fscache but didn't get added to
4173          * the pagecache must be uncached before they get returned to the
4174          * allocator.
4175          */
4176         cifs_fscache_readpages_cancel(mapping->host, page_list);
4177         free_xid(xid);
4178         return rc;
4179 }
4180 
4181 /*
4182  * cifs_readpage_worker must be called with the page pinned
4183  */
4184 static int cifs_readpage_worker(struct file *file, struct page *page,
4185         loff_t *poffset)
4186 {
4187         char *read_data;
4188         int rc;
4189 
4190         /* Is the page cached? */
4191         rc = cifs_readpage_from_fscache(file_inode(file), page);
4192         if (rc == 0)
4193                 goto read_complete;
4194 
4195         read_data = kmap(page);
4196         /* for reads over a certain size could initiate async read ahead */
4197 
4198         rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4199 
4200         if (rc < 0)
4201                 goto io_error;
4202         else
4203                 cifs_dbg(FYI, "Bytes read %d\n", rc);
4204 
4205         /* we do not want atime to be less than mtime, it broke some apps */
4206         file_inode(file)->i_atime = current_time(file_inode(file));
4207         if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4208                 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4209         else
4210                 file_inode(file)->i_atime = current_time(file_inode(file));
4211 
4212         if (PAGE_SIZE > rc)
4213                 memset(read_data + rc, 0, PAGE_SIZE - rc);
4214 
4215         flush_dcache_page(page);
4216         SetPageUptodate(page);
4217 
4218         /* send this page to the cache */
4219         cifs_readpage_to_fscache(file_inode(file), page);
4220 
4221         rc = 0;
4222 
4223 io_error:
4224         kunmap(page);
4225         unlock_page(page);
4226 
4227 read_complete:
4228         return rc;
4229 }
4230 
4231 static int cifs_readpage(struct file *file, struct page *page)
4232 {
4233         loff_t offset = (loff_t)page->index << PAGE_SHIFT;
4234         int rc = -EACCES;
4235         unsigned int xid;
4236 
4237         xid = get_xid();
4238 
4239         if (file->private_data == NULL) {
4240                 rc = -EBADF;
4241                 free_xid(xid);
4242                 return rc;
4243         }
4244 
4245         cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
4246                  page, (int)offset, (int)offset);
4247 
4248         rc = cifs_readpage_worker(file, page, &offset);
4249 
4250         free_xid(xid);
4251         return rc;
4252 }
4253 
4254 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4255 {
4256         struct cifsFileInfo *open_file;
4257         struct cifs_tcon *tcon =
4258                 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
4259 
4260         spin_lock(&tcon->open_file_lock);
4261         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4262                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4263                         spin_unlock(&tcon->open_file_lock);
4264                         return 1;
4265                 }
4266         }
4267         spin_unlock(&tcon->open_file_lock);
4268         return 0;
4269 }
4270 
4271 /* We do not want to update the file size from server for inodes
4272    open for write - to avoid races with writepage extending
4273    the file - in the future we could consider allowing
4274    refreshing the inode only on increases in the file size
4275    but this is tricky to do without racing with writebehind
4276    page caching in the current Linux kernel design */
4277 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4278 {
4279         if (!cifsInode)
4280                 return true;
4281 
4282         if (is_inode_writable(cifsInode)) {
4283                 /* This inode is open for write at least once */
4284                 struct cifs_sb_info *cifs_sb;
4285 
4286                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
4287                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4288                         /* since no page cache to corrupt on directio
4289                         we can change size safely */
4290                         return true;
4291                 }
4292 
4293                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4294                         return true;
4295 
4296                 return false;
4297         } else
4298                 return true;
4299 }
4300 
4301 static int cifs_write_begin(struct file *file, struct address_space *mapping,
4302                         loff_t pos, unsigned len, unsigned flags,
4303                         struct page **pagep, void **fsdata)
4304 {
4305         int oncethru = 0;
4306         pgoff_t index = pos >> PAGE_SHIFT;
4307         loff_t offset = pos & (PAGE_SIZE - 1);
4308         loff_t page_start = pos & PAGE_MASK;
4309         loff_t i_size;
4310         struct page *page;
4311         int rc = 0;
4312 
4313         cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4314 
4315 start:
4316         page = grab_cache_page_write_begin(mapping, index, flags);
4317         if (!page) {
4318                 rc = -ENOMEM;
4319                 goto out;
4320         }
4321 
4322         if (PageUptodate(page))
4323                 goto out;
4324 
4325         /*
4326          * If we write a full page it will be up to date, no need to read from
4327          * the server. If the write is short, we'll end up doing a sync write
4328          * instead.
4329          */
4330         if (len == PAGE_SIZE)
4331                 goto out;
4332 
4333         /*
4334          * optimize away the read when we have an oplock, and we're not
4335          * expecting to use any of the data we'd be reading in. That
4336          * is, when the page lies beyond the EOF, or straddles the EOF
4337          * and the write will cover all of the existing data.
4338          */
4339         if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4340                 i_size = i_size_read(mapping->host);
4341                 if (page_start >= i_size ||
4342                     (offset == 0 && (pos + len) >= i_size)) {
4343                         zero_user_segments(page, 0, offset,
4344                                            offset + len,
4345                                            PAGE_SIZE);
4346                         /*
4347                          * PageChecked means that the parts of the page
4348                          * to which we're not writing are considered up
4349                          * to date. Once the data is copied to the
4350                          * page, it can be set uptodate.
4351                          */
4352                         SetPageChecked(page);
4353                         goto out;
4354                 }
4355         }
4356 
4357         if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4358                 /*
4359                  * might as well read a page, it is fast enough. If we get
4360                  * an error, we don't need to return it. cifs_write_end will
4361                  * do a sync write instead since PG_uptodate isn't set.
4362                  */
4363                 cifs_readpage_worker(file, page, &page_start);
4364                 put_page(page);
4365                 oncethru = 1;
4366                 goto start;
4367         } else {
4368                 /* we could try using another file handle if there is one -
4369                    but how would we lock it to prevent close of that handle
4370                    racing with this read? In any case
4371                    this will be written out by write_end so is fine */
4372         }
4373 out:
4374         *pagep = page;
4375         return rc;
4376 }
4377 
4378 static int cifs_release_page(struct page *page, gfp_t gfp)
4379 {
4380         if (PagePrivate(page))
4381                 return 0;
4382 
4383         return cifs_fscache_release_page(page, gfp);
4384 }
4385 
4386 static void cifs_invalidate_page(struct page *page, unsigned int offset,
4387                                  unsigned int length)
4388 {
4389         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4390 
4391         if (offset == 0 && length == PAGE_SIZE)
4392                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4393 }
4394 
4395 static int cifs_launder_page(struct page *page)
4396 {
4397         int rc = 0;
4398         loff_t range_start = page_offset(page);
4399         loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4400         struct writeback_control wbc = {
4401                 .sync_mode = WB_SYNC_ALL,
4402                 .nr_to_write = 0,
4403                 .range_start = range_start,
4404                 .range_end = range_end,
4405         };
4406 
4407         cifs_dbg(FYI, "Launder page: %p\n", page);
4408 
4409         if (clear_page_dirty_for_io(page))
4410                 rc = cifs_writepage_locked(page, &wbc);
4411 
4412         cifs_fscache_invalidate_page(page, page->mapping->host);
4413         return rc;
4414 }
4415 
4416 void cifs_oplock_break(struct work_struct *work)
4417 {
4418         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4419                                                   oplock_break);
4420         struct inode *inode = d_inode(cfile->dentry);
4421         struct cifsInodeInfo *cinode = CIFS_I(inode);
4422         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4423         struct TCP_Server_Info *server = tcon->ses->server;
4424         int rc = 0;
4425 
4426         wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4427                         TASK_UNINTERRUPTIBLE);
4428 
4429         server->ops->downgrade_oplock(server, cinode,
4430                 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4431 
4432         if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4433                                                 cifs_has_mand_locks(cinode)) {
4434                 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4435                          inode);
4436                 cinode->oplock = 0;
4437         }
4438 
4439         if (inode && S_ISREG(inode->i_mode)) {
4440                 if (CIFS_CACHE_READ(cinode))
4441                         break_lease(inode, O_RDONLY);
4442                 else
4443                         break_lease(inode, O_WRONLY);
4444                 rc = filemap_fdatawrite(inode->i_mapping);
4445                 if (!CIFS_CACHE_READ(cinode)) {
4446                         rc = filemap_fdatawait(inode->i_mapping);
4447                         mapping_set_error(inode->i_mapping, rc);
4448                         cifs_zap_mapping(inode);
4449                 }
4450                 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4451         }
4452 
4453         rc = cifs_push_locks(cfile);
4454         if (rc)
4455                 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4456 
4457         /*
4458          * releasing stale oplock after recent reconnect of smb session using
4459          * a now incorrect file handle is not a data integrity issue but do
4460          * not bother sending an oplock release if session to server still is
4461          * disconnected since oplock already released by the server
4462          */
4463         if (!cfile->oplock_break_cancelled) {
4464                 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4465                                                              cinode);
4466                 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4467         }
4468         cifs_done_oplock_break(cinode);
4469 }
4470 
4471 /*
4472  * The presence of cifs_direct_io() in the address space ops vector
4473  * allowes open() O_DIRECT flags which would have failed otherwise.
4474  *
4475  * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4476  * so this method should never be called.
4477  *
4478  * Direct IO is not yet supported in the cached mode. 
4479  */
4480 static ssize_t
4481 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4482 {
4483         /*
4484          * FIXME
4485          * Eventually need to support direct IO for non forcedirectio mounts
4486          */
4487         return -EINVAL;
4488 }
4489 
4490 
4491 const struct address_space_operations cifs_addr_ops = {
4492         .readpage = cifs_readpage,
4493         .readpages = cifs_readpages,
4494         .writepage = cifs_writepage,
4495         .writepages = cifs_writepages,
4496         .write_begin = cifs_write_begin,
4497         .write_end = cifs_write_end,
4498         .set_page_dirty = __set_page_dirty_nobuffers,
4499         .releasepage = cifs_release_page,
4500         .direct_IO = cifs_direct_io,
4501         .invalidatepage = cifs_invalidate_page,
4502         .launder_page = cifs_launder_page,
4503 };
4504 
4505 /*
4506  * cifs_readpages requires the server to support a buffer large enough to
4507  * contain the header plus one complete page of data.  Otherwise, we need
4508  * to leave cifs_readpages out of the address space operations.
4509  */
4510 const struct address_space_operations cifs_addr_ops_smallbuf = {
4511         .readpage = cifs_readpage,
4512         .writepage = cifs_writepage,
4513         .writepages = cifs_writepages,
4514         .write_begin = cifs_write_begin,
4515         .write_end = cifs_write_end,
4516         .set_page_dirty = __set_page_dirty_nobuffers,
4517         .releasepage = cifs_release_page,
4518         .invalidatepage = cifs_invalidate_page,
4519         .launder_page = cifs_launder_page,
4520 };
4521 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp