~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/fuse/dev.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2   FUSE: Filesystem in Userspace
  3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
  4 
  5   This program can be distributed under the terms of the GNU GPL.
  6   See the file COPYING.
  7 */
  8 
  9 #include "fuse_i.h"
 10 
 11 #include <linux/init.h>
 12 #include <linux/module.h>
 13 #include <linux/poll.h>
 14 #include <linux/sched/signal.h>
 15 #include <linux/uio.h>
 16 #include <linux/miscdevice.h>
 17 #include <linux/pagemap.h>
 18 #include <linux/file.h>
 19 #include <linux/slab.h>
 20 #include <linux/pipe_fs_i.h>
 21 #include <linux/swap.h>
 22 #include <linux/splice.h>
 23 #include <linux/sched.h>
 24 
 25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 26 MODULE_ALIAS("devname:fuse");
 27 
 28 static struct kmem_cache *fuse_req_cachep;
 29 
 30 static struct fuse_dev *fuse_get_dev(struct file *file)
 31 {
 32         /*
 33          * Lockless access is OK, because file->private data is set
 34          * once during mount and is valid until the file is released.
 35          */
 36         return READ_ONCE(file->private_data);
 37 }
 38 
 39 static void fuse_request_init(struct fuse_req *req, struct page **pages,
 40                               struct fuse_page_desc *page_descs,
 41                               unsigned npages)
 42 {
 43         memset(req, 0, sizeof(*req));
 44         memset(pages, 0, sizeof(*pages) * npages);
 45         memset(page_descs, 0, sizeof(*page_descs) * npages);
 46         INIT_LIST_HEAD(&req->list);
 47         INIT_LIST_HEAD(&req->intr_entry);
 48         init_waitqueue_head(&req->waitq);
 49         refcount_set(&req->count, 1);
 50         req->pages = pages;
 51         req->page_descs = page_descs;
 52         req->max_pages = npages;
 53         __set_bit(FR_PENDING, &req->flags);
 54 }
 55 
 56 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
 57 {
 58         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
 59         if (req) {
 60                 struct page **pages;
 61                 struct fuse_page_desc *page_descs;
 62 
 63                 if (npages <= FUSE_REQ_INLINE_PAGES) {
 64                         pages = req->inline_pages;
 65                         page_descs = req->inline_page_descs;
 66                 } else {
 67                         pages = kmalloc(sizeof(struct page *) * npages, flags);
 68                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
 69                                              npages, flags);
 70                 }
 71 
 72                 if (!pages || !page_descs) {
 73                         kfree(pages);
 74                         kfree(page_descs);
 75                         kmem_cache_free(fuse_req_cachep, req);
 76                         return NULL;
 77                 }
 78 
 79                 fuse_request_init(req, pages, page_descs, npages);
 80         }
 81         return req;
 82 }
 83 
 84 struct fuse_req *fuse_request_alloc(unsigned npages)
 85 {
 86         return __fuse_request_alloc(npages, GFP_KERNEL);
 87 }
 88 EXPORT_SYMBOL_GPL(fuse_request_alloc);
 89 
 90 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
 91 {
 92         return __fuse_request_alloc(npages, GFP_NOFS);
 93 }
 94 
 95 void fuse_request_free(struct fuse_req *req)
 96 {
 97         if (req->pages != req->inline_pages) {
 98                 kfree(req->pages);
 99                 kfree(req->page_descs);
100         }
101         kmem_cache_free(fuse_req_cachep, req);
102 }
103 
104 void __fuse_get_request(struct fuse_req *req)
105 {
106         refcount_inc(&req->count);
107 }
108 
109 /* Must be called with > 1 refcount */
110 static void __fuse_put_request(struct fuse_req *req)
111 {
112         refcount_dec(&req->count);
113 }
114 
115 static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
116 {
117         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
118         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
119         req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
120 }
121 
122 void fuse_set_initialized(struct fuse_conn *fc)
123 {
124         /* Make sure stores before this are seen on another CPU */
125         smp_wmb();
126         fc->initialized = 1;
127 }
128 
129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
130 {
131         return !fc->initialized || (for_background && fc->blocked);
132 }
133 
134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
135                                        bool for_background)
136 {
137         struct fuse_req *req;
138         int err;
139         atomic_inc(&fc->num_waiting);
140 
141         if (fuse_block_alloc(fc, for_background)) {
142                 err = -EINTR;
143                 if (wait_event_killable_exclusive(fc->blocked_waitq,
144                                 !fuse_block_alloc(fc, for_background)))
145                         goto out;
146         }
147         /* Matches smp_wmb() in fuse_set_initialized() */
148         smp_rmb();
149 
150         err = -ENOTCONN;
151         if (!fc->connected)
152                 goto out;
153 
154         err = -ECONNREFUSED;
155         if (fc->conn_error)
156                 goto out;
157 
158         req = fuse_request_alloc(npages);
159         err = -ENOMEM;
160         if (!req) {
161                 if (for_background)
162                         wake_up(&fc->blocked_waitq);
163                 goto out;
164         }
165 
166         fuse_req_init_context(fc, req);
167         __set_bit(FR_WAITING, &req->flags);
168         if (for_background)
169                 __set_bit(FR_BACKGROUND, &req->flags);
170 
171         return req;
172 
173  out:
174         atomic_dec(&fc->num_waiting);
175         return ERR_PTR(err);
176 }
177 
178 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
179 {
180         return __fuse_get_req(fc, npages, false);
181 }
182 EXPORT_SYMBOL_GPL(fuse_get_req);
183 
184 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
185                                              unsigned npages)
186 {
187         return __fuse_get_req(fc, npages, true);
188 }
189 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
190 
191 /*
192  * Return request in fuse_file->reserved_req.  However that may
193  * currently be in use.  If that is the case, wait for it to become
194  * available.
195  */
196 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
197                                          struct file *file)
198 {
199         struct fuse_req *req = NULL;
200         struct fuse_file *ff = file->private_data;
201 
202         do {
203                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
204                 spin_lock(&fc->lock);
205                 if (ff->reserved_req) {
206                         req = ff->reserved_req;
207                         ff->reserved_req = NULL;
208                         req->stolen_file = get_file(file);
209                 }
210                 spin_unlock(&fc->lock);
211         } while (!req);
212 
213         return req;
214 }
215 
216 /*
217  * Put stolen request back into fuse_file->reserved_req
218  */
219 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
220 {
221         struct file *file = req->stolen_file;
222         struct fuse_file *ff = file->private_data;
223 
224         spin_lock(&fc->lock);
225         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
226         BUG_ON(ff->reserved_req);
227         ff->reserved_req = req;
228         wake_up_all(&fc->reserved_req_waitq);
229         spin_unlock(&fc->lock);
230         fput(file);
231 }
232 
233 /*
234  * Gets a requests for a file operation, always succeeds
235  *
236  * This is used for sending the FLUSH request, which must get to
237  * userspace, due to POSIX locks which may need to be unlocked.
238  *
239  * If allocation fails due to OOM, use the reserved request in
240  * fuse_file.
241  *
242  * This is very unlikely to deadlock accidentally, since the
243  * filesystem should not have it's own file open.  If deadlock is
244  * intentional, it can still be broken by "aborting" the filesystem.
245  */
246 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
247                                              struct file *file)
248 {
249         struct fuse_req *req;
250 
251         atomic_inc(&fc->num_waiting);
252         wait_event(fc->blocked_waitq, fc->initialized);
253         /* Matches smp_wmb() in fuse_set_initialized() */
254         smp_rmb();
255         req = fuse_request_alloc(0);
256         if (!req)
257                 req = get_reserved_req(fc, file);
258 
259         fuse_req_init_context(fc, req);
260         __set_bit(FR_WAITING, &req->flags);
261         __clear_bit(FR_BACKGROUND, &req->flags);
262         return req;
263 }
264 
265 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
266 {
267         if (refcount_dec_and_test(&req->count)) {
268                 if (test_bit(FR_BACKGROUND, &req->flags)) {
269                         /*
270                          * We get here in the unlikely case that a background
271                          * request was allocated but not sent
272                          */
273                         spin_lock(&fc->lock);
274                         if (!fc->blocked)
275                                 wake_up(&fc->blocked_waitq);
276                         spin_unlock(&fc->lock);
277                 }
278 
279                 if (test_bit(FR_WAITING, &req->flags)) {
280                         __clear_bit(FR_WAITING, &req->flags);
281                         atomic_dec(&fc->num_waiting);
282                 }
283 
284                 if (req->stolen_file)
285                         put_reserved_req(fc, req);
286                 else
287                         fuse_request_free(req);
288         }
289 }
290 EXPORT_SYMBOL_GPL(fuse_put_request);
291 
292 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
293 {
294         unsigned nbytes = 0;
295         unsigned i;
296 
297         for (i = 0; i < numargs; i++)
298                 nbytes += args[i].size;
299 
300         return nbytes;
301 }
302 
303 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
304 {
305         return ++fiq->reqctr;
306 }
307 
308 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
309 {
310         req->in.h.len = sizeof(struct fuse_in_header) +
311                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
312         list_add_tail(&req->list, &fiq->pending);
313         wake_up_locked(&fiq->waitq);
314         kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
315 }
316 
317 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
318                        u64 nodeid, u64 nlookup)
319 {
320         struct fuse_iqueue *fiq = &fc->iq;
321 
322         forget->forget_one.nodeid = nodeid;
323         forget->forget_one.nlookup = nlookup;
324 
325         spin_lock(&fiq->waitq.lock);
326         if (fiq->connected) {
327                 fiq->forget_list_tail->next = forget;
328                 fiq->forget_list_tail = forget;
329                 wake_up_locked(&fiq->waitq);
330                 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
331         } else {
332                 kfree(forget);
333         }
334         spin_unlock(&fiq->waitq.lock);
335 }
336 
337 static void flush_bg_queue(struct fuse_conn *fc)
338 {
339         while (fc->active_background < fc->max_background &&
340                !list_empty(&fc->bg_queue)) {
341                 struct fuse_req *req;
342                 struct fuse_iqueue *fiq = &fc->iq;
343 
344                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
345                 list_del(&req->list);
346                 fc->active_background++;
347                 spin_lock(&fiq->waitq.lock);
348                 req->in.h.unique = fuse_get_unique(fiq);
349                 queue_request(fiq, req);
350                 spin_unlock(&fiq->waitq.lock);
351         }
352 }
353 
354 /*
355  * This function is called when a request is finished.  Either a reply
356  * has arrived or it was aborted (and not yet sent) or some error
357  * occurred during communication with userspace, or the device file
358  * was closed.  The requester thread is woken up (if still waiting),
359  * the 'end' callback is called if given, else the reference to the
360  * request is released
361  */
362 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
363 {
364         struct fuse_iqueue *fiq = &fc->iq;
365 
366         if (test_and_set_bit(FR_FINISHED, &req->flags))
367                 return;
368 
369         spin_lock(&fiq->waitq.lock);
370         list_del_init(&req->intr_entry);
371         spin_unlock(&fiq->waitq.lock);
372         WARN_ON(test_bit(FR_PENDING, &req->flags));
373         WARN_ON(test_bit(FR_SENT, &req->flags));
374         if (test_bit(FR_BACKGROUND, &req->flags)) {
375                 spin_lock(&fc->lock);
376                 clear_bit(FR_BACKGROUND, &req->flags);
377                 if (fc->num_background == fc->max_background)
378                         fc->blocked = 0;
379 
380                 /* Wake up next waiter, if any */
381                 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
382                         wake_up(&fc->blocked_waitq);
383 
384                 if (fc->num_background == fc->congestion_threshold &&
385                     fc->connected && fc->sb) {
386                         clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
387                         clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
388                 }
389                 fc->num_background--;
390                 fc->active_background--;
391                 flush_bg_queue(fc);
392                 spin_unlock(&fc->lock);
393         }
394         wake_up(&req->waitq);
395         if (req->end)
396                 req->end(fc, req);
397         fuse_put_request(fc, req);
398 }
399 
400 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
401 {
402         spin_lock(&fiq->waitq.lock);
403         if (test_bit(FR_FINISHED, &req->flags)) {
404                 spin_unlock(&fiq->waitq.lock);
405                 return;
406         }
407         if (list_empty(&req->intr_entry)) {
408                 list_add_tail(&req->intr_entry, &fiq->interrupts);
409                 wake_up_locked(&fiq->waitq);
410         }
411         spin_unlock(&fiq->waitq.lock);
412         kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
413 }
414 
415 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
416 {
417         struct fuse_iqueue *fiq = &fc->iq;
418         int err;
419 
420         if (!fc->no_interrupt) {
421                 /* Any signal may interrupt this */
422                 err = wait_event_interruptible(req->waitq,
423                                         test_bit(FR_FINISHED, &req->flags));
424                 if (!err)
425                         return;
426 
427                 set_bit(FR_INTERRUPTED, &req->flags);
428                 /* matches barrier in fuse_dev_do_read() */
429                 smp_mb__after_atomic();
430                 if (test_bit(FR_SENT, &req->flags))
431                         queue_interrupt(fiq, req);
432         }
433 
434         if (!test_bit(FR_FORCE, &req->flags)) {
435                 /* Only fatal signals may interrupt this */
436                 err = wait_event_killable(req->waitq,
437                                         test_bit(FR_FINISHED, &req->flags));
438                 if (!err)
439                         return;
440 
441                 spin_lock(&fiq->waitq.lock);
442                 /* Request is not yet in userspace, bail out */
443                 if (test_bit(FR_PENDING, &req->flags)) {
444                         list_del(&req->list);
445                         spin_unlock(&fiq->waitq.lock);
446                         __fuse_put_request(req);
447                         req->out.h.error = -EINTR;
448                         return;
449                 }
450                 spin_unlock(&fiq->waitq.lock);
451         }
452 
453         /*
454          * Either request is already in userspace, or it was forced.
455          * Wait it out.
456          */
457         wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
458 }
459 
460 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
461 {
462         struct fuse_iqueue *fiq = &fc->iq;
463 
464         BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
465         spin_lock(&fiq->waitq.lock);
466         if (!fiq->connected) {
467                 spin_unlock(&fiq->waitq.lock);
468                 req->out.h.error = -ENOTCONN;
469         } else {
470                 req->in.h.unique = fuse_get_unique(fiq);
471                 queue_request(fiq, req);
472                 /* acquire extra reference, since request is still needed
473                    after request_end() */
474                 __fuse_get_request(req);
475                 spin_unlock(&fiq->waitq.lock);
476 
477                 request_wait_answer(fc, req);
478                 /* Pairs with smp_wmb() in request_end() */
479                 smp_rmb();
480         }
481 }
482 
483 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
484 {
485         __set_bit(FR_ISREPLY, &req->flags);
486         if (!test_bit(FR_WAITING, &req->flags)) {
487                 __set_bit(FR_WAITING, &req->flags);
488                 atomic_inc(&fc->num_waiting);
489         }
490         __fuse_request_send(fc, req);
491 }
492 EXPORT_SYMBOL_GPL(fuse_request_send);
493 
494 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
495 {
496         if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
497                 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
498 
499         if (fc->minor < 9) {
500                 switch (args->in.h.opcode) {
501                 case FUSE_LOOKUP:
502                 case FUSE_CREATE:
503                 case FUSE_MKNOD:
504                 case FUSE_MKDIR:
505                 case FUSE_SYMLINK:
506                 case FUSE_LINK:
507                         args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
508                         break;
509                 case FUSE_GETATTR:
510                 case FUSE_SETATTR:
511                         args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
512                         break;
513                 }
514         }
515         if (fc->minor < 12) {
516                 switch (args->in.h.opcode) {
517                 case FUSE_CREATE:
518                         args->in.args[0].size = sizeof(struct fuse_open_in);
519                         break;
520                 case FUSE_MKNOD:
521                         args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
522                         break;
523                 }
524         }
525 }
526 
527 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
528 {
529         struct fuse_req *req;
530         ssize_t ret;
531 
532         req = fuse_get_req(fc, 0);
533         if (IS_ERR(req))
534                 return PTR_ERR(req);
535 
536         /* Needs to be done after fuse_get_req() so that fc->minor is valid */
537         fuse_adjust_compat(fc, args);
538 
539         req->in.h.opcode = args->in.h.opcode;
540         req->in.h.nodeid = args->in.h.nodeid;
541         req->in.numargs = args->in.numargs;
542         memcpy(req->in.args, args->in.args,
543                args->in.numargs * sizeof(struct fuse_in_arg));
544         req->out.argvar = args->out.argvar;
545         req->out.numargs = args->out.numargs;
546         memcpy(req->out.args, args->out.args,
547                args->out.numargs * sizeof(struct fuse_arg));
548         fuse_request_send(fc, req);
549         ret = req->out.h.error;
550         if (!ret && args->out.argvar) {
551                 BUG_ON(args->out.numargs != 1);
552                 ret = req->out.args[0].size;
553         }
554         fuse_put_request(fc, req);
555 
556         return ret;
557 }
558 
559 /*
560  * Called under fc->lock
561  *
562  * fc->connected must have been checked previously
563  */
564 void fuse_request_send_background_locked(struct fuse_conn *fc,
565                                          struct fuse_req *req)
566 {
567         BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
568         if (!test_bit(FR_WAITING, &req->flags)) {
569                 __set_bit(FR_WAITING, &req->flags);
570                 atomic_inc(&fc->num_waiting);
571         }
572         __set_bit(FR_ISREPLY, &req->flags);
573         fc->num_background++;
574         if (fc->num_background == fc->max_background)
575                 fc->blocked = 1;
576         if (fc->num_background == fc->congestion_threshold && fc->sb) {
577                 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
578                 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
579         }
580         list_add_tail(&req->list, &fc->bg_queue);
581         flush_bg_queue(fc);
582 }
583 
584 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
585 {
586         BUG_ON(!req->end);
587         spin_lock(&fc->lock);
588         if (fc->connected) {
589                 fuse_request_send_background_locked(fc, req);
590                 spin_unlock(&fc->lock);
591         } else {
592                 spin_unlock(&fc->lock);
593                 req->out.h.error = -ENOTCONN;
594                 req->end(fc, req);
595                 fuse_put_request(fc, req);
596         }
597 }
598 EXPORT_SYMBOL_GPL(fuse_request_send_background);
599 
600 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601                                           struct fuse_req *req, u64 unique)
602 {
603         int err = -ENODEV;
604         struct fuse_iqueue *fiq = &fc->iq;
605 
606         __clear_bit(FR_ISREPLY, &req->flags);
607         req->in.h.unique = unique;
608         spin_lock(&fiq->waitq.lock);
609         if (fiq->connected) {
610                 queue_request(fiq, req);
611                 err = 0;
612         }
613         spin_unlock(&fiq->waitq.lock);
614 
615         return err;
616 }
617 
618 void fuse_force_forget(struct file *file, u64 nodeid)
619 {
620         struct inode *inode = file_inode(file);
621         struct fuse_conn *fc = get_fuse_conn(inode);
622         struct fuse_req *req;
623         struct fuse_forget_in inarg;
624 
625         memset(&inarg, 0, sizeof(inarg));
626         inarg.nlookup = 1;
627         req = fuse_get_req_nofail_nopages(fc, file);
628         req->in.h.opcode = FUSE_FORGET;
629         req->in.h.nodeid = nodeid;
630         req->in.numargs = 1;
631         req->in.args[0].size = sizeof(inarg);
632         req->in.args[0].value = &inarg;
633         __clear_bit(FR_ISREPLY, &req->flags);
634         __fuse_request_send(fc, req);
635         /* ignore errors */
636         fuse_put_request(fc, req);
637 }
638 
639 /*
640  * Lock the request.  Up to the next unlock_request() there mustn't be
641  * anything that could cause a page-fault.  If the request was already
642  * aborted bail out.
643  */
644 static int lock_request(struct fuse_req *req)
645 {
646         int err = 0;
647         if (req) {
648                 spin_lock(&req->waitq.lock);
649                 if (test_bit(FR_ABORTED, &req->flags))
650                         err = -ENOENT;
651                 else
652                         set_bit(FR_LOCKED, &req->flags);
653                 spin_unlock(&req->waitq.lock);
654         }
655         return err;
656 }
657 
658 /*
659  * Unlock request.  If it was aborted while locked, caller is responsible
660  * for unlocking and ending the request.
661  */
662 static int unlock_request(struct fuse_req *req)
663 {
664         int err = 0;
665         if (req) {
666                 spin_lock(&req->waitq.lock);
667                 if (test_bit(FR_ABORTED, &req->flags))
668                         err = -ENOENT;
669                 else
670                         clear_bit(FR_LOCKED, &req->flags);
671                 spin_unlock(&req->waitq.lock);
672         }
673         return err;
674 }
675 
676 struct fuse_copy_state {
677         int write;
678         struct fuse_req *req;
679         struct iov_iter *iter;
680         struct pipe_buffer *pipebufs;
681         struct pipe_buffer *currbuf;
682         struct pipe_inode_info *pipe;
683         unsigned long nr_segs;
684         struct page *pg;
685         unsigned len;
686         unsigned offset;
687         unsigned move_pages:1;
688 };
689 
690 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
691                            struct iov_iter *iter)
692 {
693         memset(cs, 0, sizeof(*cs));
694         cs->write = write;
695         cs->iter = iter;
696 }
697 
698 /* Unmap and put previous page of userspace buffer */
699 static void fuse_copy_finish(struct fuse_copy_state *cs)
700 {
701         if (cs->currbuf) {
702                 struct pipe_buffer *buf = cs->currbuf;
703 
704                 if (cs->write)
705                         buf->len = PAGE_SIZE - cs->len;
706                 cs->currbuf = NULL;
707         } else if (cs->pg) {
708                 if (cs->write) {
709                         flush_dcache_page(cs->pg);
710                         set_page_dirty_lock(cs->pg);
711                 }
712                 put_page(cs->pg);
713         }
714         cs->pg = NULL;
715 }
716 
717 /*
718  * Get another pagefull of userspace buffer, and map it to kernel
719  * address space, and lock request
720  */
721 static int fuse_copy_fill(struct fuse_copy_state *cs)
722 {
723         struct page *page;
724         int err;
725 
726         err = unlock_request(cs->req);
727         if (err)
728                 return err;
729 
730         fuse_copy_finish(cs);
731         if (cs->pipebufs) {
732                 struct pipe_buffer *buf = cs->pipebufs;
733 
734                 if (!cs->write) {
735                         err = pipe_buf_confirm(cs->pipe, buf);
736                         if (err)
737                                 return err;
738 
739                         BUG_ON(!cs->nr_segs);
740                         cs->currbuf = buf;
741                         cs->pg = buf->page;
742                         cs->offset = buf->offset;
743                         cs->len = buf->len;
744                         cs->pipebufs++;
745                         cs->nr_segs--;
746                 } else {
747                         if (cs->nr_segs == cs->pipe->buffers)
748                                 return -EIO;
749 
750                         page = alloc_page(GFP_HIGHUSER);
751                         if (!page)
752                                 return -ENOMEM;
753 
754                         buf->page = page;
755                         buf->offset = 0;
756                         buf->len = 0;
757 
758                         cs->currbuf = buf;
759                         cs->pg = page;
760                         cs->offset = 0;
761                         cs->len = PAGE_SIZE;
762                         cs->pipebufs++;
763                         cs->nr_segs++;
764                 }
765         } else {
766                 size_t off;
767                 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
768                 if (err < 0)
769                         return err;
770                 BUG_ON(!err);
771                 cs->len = err;
772                 cs->offset = off;
773                 cs->pg = page;
774                 iov_iter_advance(cs->iter, err);
775         }
776 
777         return lock_request(cs->req);
778 }
779 
780 /* Do as much copy to/from userspace buffer as we can */
781 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
782 {
783         unsigned ncpy = min(*size, cs->len);
784         if (val) {
785                 void *pgaddr = kmap_atomic(cs->pg);
786                 void *buf = pgaddr + cs->offset;
787 
788                 if (cs->write)
789                         memcpy(buf, *val, ncpy);
790                 else
791                         memcpy(*val, buf, ncpy);
792 
793                 kunmap_atomic(pgaddr);
794                 *val += ncpy;
795         }
796         *size -= ncpy;
797         cs->len -= ncpy;
798         cs->offset += ncpy;
799         return ncpy;
800 }
801 
802 static int fuse_check_page(struct page *page)
803 {
804         if (page_mapcount(page) ||
805             page->mapping != NULL ||
806             page_count(page) != 1 ||
807             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
808              ~(1 << PG_locked |
809                1 << PG_referenced |
810                1 << PG_uptodate |
811                1 << PG_lru |
812                1 << PG_active |
813                1 << PG_reclaim))) {
814                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
816                 return 1;
817         }
818         return 0;
819 }
820 
821 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
822 {
823         int err;
824         struct page *oldpage = *pagep;
825         struct page *newpage;
826         struct pipe_buffer *buf = cs->pipebufs;
827 
828         err = unlock_request(cs->req);
829         if (err)
830                 return err;
831 
832         fuse_copy_finish(cs);
833 
834         err = pipe_buf_confirm(cs->pipe, buf);
835         if (err)
836                 return err;
837 
838         BUG_ON(!cs->nr_segs);
839         cs->currbuf = buf;
840         cs->len = buf->len;
841         cs->pipebufs++;
842         cs->nr_segs--;
843 
844         if (cs->len != PAGE_SIZE)
845                 goto out_fallback;
846 
847         if (pipe_buf_steal(cs->pipe, buf) != 0)
848                 goto out_fallback;
849 
850         newpage = buf->page;
851 
852         if (!PageUptodate(newpage))
853                 SetPageUptodate(newpage);
854 
855         ClearPageMappedToDisk(newpage);
856 
857         if (fuse_check_page(newpage) != 0)
858                 goto out_fallback_unlock;
859 
860         /*
861          * This is a new and locked page, it shouldn't be mapped or
862          * have any special flags on it
863          */
864         if (WARN_ON(page_mapped(oldpage)))
865                 goto out_fallback_unlock;
866         if (WARN_ON(page_has_private(oldpage)))
867                 goto out_fallback_unlock;
868         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869                 goto out_fallback_unlock;
870         if (WARN_ON(PageMlocked(oldpage)))
871                 goto out_fallback_unlock;
872 
873         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
874         if (err) {
875                 unlock_page(newpage);
876                 return err;
877         }
878 
879         get_page(newpage);
880 
881         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882                 lru_cache_add_file(newpage);
883 
884         err = 0;
885         spin_lock(&cs->req->waitq.lock);
886         if (test_bit(FR_ABORTED, &cs->req->flags))
887                 err = -ENOENT;
888         else
889                 *pagep = newpage;
890         spin_unlock(&cs->req->waitq.lock);
891 
892         if (err) {
893                 unlock_page(newpage);
894                 put_page(newpage);
895                 return err;
896         }
897 
898         unlock_page(oldpage);
899         put_page(oldpage);
900         cs->len = 0;
901 
902         return 0;
903 
904 out_fallback_unlock:
905         unlock_page(newpage);
906 out_fallback:
907         cs->pg = buf->page;
908         cs->offset = buf->offset;
909 
910         err = lock_request(cs->req);
911         if (err)
912                 return err;
913 
914         return 1;
915 }
916 
917 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918                          unsigned offset, unsigned count)
919 {
920         struct pipe_buffer *buf;
921         int err;
922 
923         if (cs->nr_segs == cs->pipe->buffers)
924                 return -EIO;
925 
926         err = unlock_request(cs->req);
927         if (err)
928                 return err;
929 
930         fuse_copy_finish(cs);
931 
932         buf = cs->pipebufs;
933         get_page(page);
934         buf->page = page;
935         buf->offset = offset;
936         buf->len = count;
937 
938         cs->pipebufs++;
939         cs->nr_segs++;
940         cs->len = 0;
941 
942         return 0;
943 }
944 
945 /*
946  * Copy a page in the request to/from the userspace buffer.  Must be
947  * done atomically
948  */
949 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
950                           unsigned offset, unsigned count, int zeroing)
951 {
952         int err;
953         struct page *page = *pagep;
954 
955         if (page && zeroing && count < PAGE_SIZE)
956                 clear_highpage(page);
957 
958         while (count) {
959                 if (cs->write && cs->pipebufs && page) {
960                         return fuse_ref_page(cs, page, offset, count);
961                 } else if (!cs->len) {
962                         if (cs->move_pages && page &&
963                             offset == 0 && count == PAGE_SIZE) {
964                                 err = fuse_try_move_page(cs, pagep);
965                                 if (err <= 0)
966                                         return err;
967                         } else {
968                                 err = fuse_copy_fill(cs);
969                                 if (err)
970                                         return err;
971                         }
972                 }
973                 if (page) {
974                         void *mapaddr = kmap_atomic(page);
975                         void *buf = mapaddr + offset;
976                         offset += fuse_copy_do(cs, &buf, &count);
977                         kunmap_atomic(mapaddr);
978                 } else
979                         offset += fuse_copy_do(cs, NULL, &count);
980         }
981         if (page && !cs->write)
982                 flush_dcache_page(page);
983         return 0;
984 }
985 
986 /* Copy pages in the request to/from userspace buffer */
987 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
988                            int zeroing)
989 {
990         unsigned i;
991         struct fuse_req *req = cs->req;
992 
993         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
994                 int err;
995                 unsigned offset = req->page_descs[i].offset;
996                 unsigned count = min(nbytes, req->page_descs[i].length);
997 
998                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
999                                      zeroing);
1000                 if (err)
1001                         return err;
1002 
1003                 nbytes -= count;
1004         }
1005         return 0;
1006 }
1007 
1008 /* Copy a single argument in the request to/from userspace buffer */
1009 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1010 {
1011         while (size) {
1012                 if (!cs->len) {
1013                         int err = fuse_copy_fill(cs);
1014                         if (err)
1015                                 return err;
1016                 }
1017                 fuse_copy_do(cs, &val, &size);
1018         }
1019         return 0;
1020 }
1021 
1022 /* Copy request arguments to/from userspace buffer */
1023 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024                           unsigned argpages, struct fuse_arg *args,
1025                           int zeroing)
1026 {
1027         int err = 0;
1028         unsigned i;
1029 
1030         for (i = 0; !err && i < numargs; i++)  {
1031                 struct fuse_arg *arg = &args[i];
1032                 if (i == numargs - 1 && argpages)
1033                         err = fuse_copy_pages(cs, arg->size, zeroing);
1034                 else
1035                         err = fuse_copy_one(cs, arg->value, arg->size);
1036         }
1037         return err;
1038 }
1039 
1040 static int forget_pending(struct fuse_iqueue *fiq)
1041 {
1042         return fiq->forget_list_head.next != NULL;
1043 }
1044 
1045 static int request_pending(struct fuse_iqueue *fiq)
1046 {
1047         return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048                 forget_pending(fiq);
1049 }
1050 
1051 /*
1052  * Transfer an interrupt request to userspace
1053  *
1054  * Unlike other requests this is assembled on demand, without a need
1055  * to allocate a separate fuse_req structure.
1056  *
1057  * Called with fiq->waitq.lock held, releases it
1058  */
1059 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060                                struct fuse_copy_state *cs,
1061                                size_t nbytes, struct fuse_req *req)
1062 __releases(fiq->waitq.lock)
1063 {
1064         struct fuse_in_header ih;
1065         struct fuse_interrupt_in arg;
1066         unsigned reqsize = sizeof(ih) + sizeof(arg);
1067         int err;
1068 
1069         list_del_init(&req->intr_entry);
1070         req->intr_unique = fuse_get_unique(fiq);
1071         memset(&ih, 0, sizeof(ih));
1072         memset(&arg, 0, sizeof(arg));
1073         ih.len = reqsize;
1074         ih.opcode = FUSE_INTERRUPT;
1075         ih.unique = req->intr_unique;
1076         arg.unique = req->in.h.unique;
1077 
1078         spin_unlock(&fiq->waitq.lock);
1079         if (nbytes < reqsize)
1080                 return -EINVAL;
1081 
1082         err = fuse_copy_one(cs, &ih, sizeof(ih));
1083         if (!err)
1084                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085         fuse_copy_finish(cs);
1086 
1087         return err ? err : reqsize;
1088 }
1089 
1090 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1091                                                unsigned max,
1092                                                unsigned *countp)
1093 {
1094         struct fuse_forget_link *head = fiq->forget_list_head.next;
1095         struct fuse_forget_link **newhead = &head;
1096         unsigned count;
1097 
1098         for (count = 0; *newhead != NULL && count < max; count++)
1099                 newhead = &(*newhead)->next;
1100 
1101         fiq->forget_list_head.next = *newhead;
1102         *newhead = NULL;
1103         if (fiq->forget_list_head.next == NULL)
1104                 fiq->forget_list_tail = &fiq->forget_list_head;
1105 
1106         if (countp != NULL)
1107                 *countp = count;
1108 
1109         return head;
1110 }
1111 
1112 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1113                                    struct fuse_copy_state *cs,
1114                                    size_t nbytes)
1115 __releases(fiq->waitq.lock)
1116 {
1117         int err;
1118         struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1119         struct fuse_forget_in arg = {
1120                 .nlookup = forget->forget_one.nlookup,
1121         };
1122         struct fuse_in_header ih = {
1123                 .opcode = FUSE_FORGET,
1124                 .nodeid = forget->forget_one.nodeid,
1125                 .unique = fuse_get_unique(fiq),
1126                 .len = sizeof(ih) + sizeof(arg),
1127         };
1128 
1129         spin_unlock(&fiq->waitq.lock);
1130         kfree(forget);
1131         if (nbytes < ih.len)
1132                 return -EINVAL;
1133 
1134         err = fuse_copy_one(cs, &ih, sizeof(ih));
1135         if (!err)
1136                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137         fuse_copy_finish(cs);
1138 
1139         if (err)
1140                 return err;
1141 
1142         return ih.len;
1143 }
1144 
1145 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1146                                    struct fuse_copy_state *cs, size_t nbytes)
1147 __releases(fiq->waitq.lock)
1148 {
1149         int err;
1150         unsigned max_forgets;
1151         unsigned count;
1152         struct fuse_forget_link *head;
1153         struct fuse_batch_forget_in arg = { .count = 0 };
1154         struct fuse_in_header ih = {
1155                 .opcode = FUSE_BATCH_FORGET,
1156                 .unique = fuse_get_unique(fiq),
1157                 .len = sizeof(ih) + sizeof(arg),
1158         };
1159 
1160         if (nbytes < ih.len) {
1161                 spin_unlock(&fiq->waitq.lock);
1162                 return -EINVAL;
1163         }
1164 
1165         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1166         head = dequeue_forget(fiq, max_forgets, &count);
1167         spin_unlock(&fiq->waitq.lock);
1168 
1169         arg.count = count;
1170         ih.len += count * sizeof(struct fuse_forget_one);
1171         err = fuse_copy_one(cs, &ih, sizeof(ih));
1172         if (!err)
1173                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1174 
1175         while (head) {
1176                 struct fuse_forget_link *forget = head;
1177 
1178                 if (!err) {
1179                         err = fuse_copy_one(cs, &forget->forget_one,
1180                                             sizeof(forget->forget_one));
1181                 }
1182                 head = forget->next;
1183                 kfree(forget);
1184         }
1185 
1186         fuse_copy_finish(cs);
1187 
1188         if (err)
1189                 return err;
1190 
1191         return ih.len;
1192 }
1193 
1194 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195                             struct fuse_copy_state *cs,
1196                             size_t nbytes)
1197 __releases(fiq->waitq.lock)
1198 {
1199         if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1200                 return fuse_read_single_forget(fiq, cs, nbytes);
1201         else
1202                 return fuse_read_batch_forget(fiq, cs, nbytes);
1203 }
1204 
1205 /*
1206  * Read a single request into the userspace filesystem's buffer.  This
1207  * function waits until a request is available, then removes it from
1208  * the pending list and copies request data to userspace buffer.  If
1209  * no reply is needed (FORGET) or request has been aborted or there
1210  * was an error during the copying then it's finished by calling
1211  * request_end().  Otherwise add it to the processing list, and set
1212  * the 'sent' flag.
1213  */
1214 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1215                                 struct fuse_copy_state *cs, size_t nbytes)
1216 {
1217         ssize_t err;
1218         struct fuse_conn *fc = fud->fc;
1219         struct fuse_iqueue *fiq = &fc->iq;
1220         struct fuse_pqueue *fpq = &fud->pq;
1221         struct fuse_req *req;
1222         struct fuse_in *in;
1223         unsigned reqsize;
1224 
1225  restart:
1226         spin_lock(&fiq->waitq.lock);
1227         err = -EAGAIN;
1228         if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1229             !request_pending(fiq))
1230                 goto err_unlock;
1231 
1232         err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1233                                 !fiq->connected || request_pending(fiq));
1234         if (err)
1235                 goto err_unlock;
1236 
1237         err = -ENODEV;
1238         if (!fiq->connected)
1239                 goto err_unlock;
1240 
1241         if (!list_empty(&fiq->interrupts)) {
1242                 req = list_entry(fiq->interrupts.next, struct fuse_req,
1243                                  intr_entry);
1244                 return fuse_read_interrupt(fiq, cs, nbytes, req);
1245         }
1246 
1247         if (forget_pending(fiq)) {
1248                 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1249                         return fuse_read_forget(fc, fiq, cs, nbytes);
1250 
1251                 if (fiq->forget_batch <= -8)
1252                         fiq->forget_batch = 16;
1253         }
1254 
1255         req = list_entry(fiq->pending.next, struct fuse_req, list);
1256         clear_bit(FR_PENDING, &req->flags);
1257         list_del_init(&req->list);
1258         spin_unlock(&fiq->waitq.lock);
1259 
1260         in = &req->in;
1261         reqsize = in->h.len;
1262 
1263         if (task_active_pid_ns(current) != fc->pid_ns) {
1264                 rcu_read_lock();
1265                 in->h.pid = pid_vnr(find_pid_ns(in->h.pid, fc->pid_ns));
1266                 rcu_read_unlock();
1267         }
1268 
1269         /* If request is too large, reply with an error and restart the read */
1270         if (nbytes < reqsize) {
1271                 req->out.h.error = -EIO;
1272                 /* SETXATTR is special, since it may contain too large data */
1273                 if (in->h.opcode == FUSE_SETXATTR)
1274                         req->out.h.error = -E2BIG;
1275                 request_end(fc, req);
1276                 goto restart;
1277         }
1278         spin_lock(&fpq->lock);
1279         list_add(&req->list, &fpq->io);
1280         spin_unlock(&fpq->lock);
1281         cs->req = req;
1282         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1283         if (!err)
1284                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1285                                      (struct fuse_arg *) in->args, 0);
1286         fuse_copy_finish(cs);
1287         spin_lock(&fpq->lock);
1288         clear_bit(FR_LOCKED, &req->flags);
1289         if (!fpq->connected) {
1290                 err = -ENODEV;
1291                 goto out_end;
1292         }
1293         if (err) {
1294                 req->out.h.error = -EIO;
1295                 goto out_end;
1296         }
1297         if (!test_bit(FR_ISREPLY, &req->flags)) {
1298                 err = reqsize;
1299                 goto out_end;
1300         }
1301         list_move_tail(&req->list, &fpq->processing);
1302         spin_unlock(&fpq->lock);
1303         set_bit(FR_SENT, &req->flags);
1304         /* matches barrier in request_wait_answer() */
1305         smp_mb__after_atomic();
1306         if (test_bit(FR_INTERRUPTED, &req->flags))
1307                 queue_interrupt(fiq, req);
1308 
1309         return reqsize;
1310 
1311 out_end:
1312         if (!test_bit(FR_PRIVATE, &req->flags))
1313                 list_del_init(&req->list);
1314         spin_unlock(&fpq->lock);
1315         request_end(fc, req);
1316         return err;
1317 
1318  err_unlock:
1319         spin_unlock(&fiq->waitq.lock);
1320         return err;
1321 }
1322 
1323 static int fuse_dev_open(struct inode *inode, struct file *file)
1324 {
1325         /*
1326          * The fuse device's file's private_data is used to hold
1327          * the fuse_conn(ection) when it is mounted, and is used to
1328          * keep track of whether the file has been mounted already.
1329          */
1330         file->private_data = NULL;
1331         return 0;
1332 }
1333 
1334 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1335 {
1336         struct fuse_copy_state cs;
1337         struct file *file = iocb->ki_filp;
1338         struct fuse_dev *fud = fuse_get_dev(file);
1339 
1340         if (!fud)
1341                 return -EPERM;
1342 
1343         if (!iter_is_iovec(to))
1344                 return -EINVAL;
1345 
1346         fuse_copy_init(&cs, 1, to);
1347 
1348         return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1349 }
1350 
1351 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1352                                     struct pipe_inode_info *pipe,
1353                                     size_t len, unsigned int flags)
1354 {
1355         int total, ret;
1356         int page_nr = 0;
1357         struct pipe_buffer *bufs;
1358         struct fuse_copy_state cs;
1359         struct fuse_dev *fud = fuse_get_dev(in);
1360 
1361         if (!fud)
1362                 return -EPERM;
1363 
1364         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1365         if (!bufs)
1366                 return -ENOMEM;
1367 
1368         fuse_copy_init(&cs, 1, NULL);
1369         cs.pipebufs = bufs;
1370         cs.pipe = pipe;
1371         ret = fuse_dev_do_read(fud, in, &cs, len);
1372         if (ret < 0)
1373                 goto out;
1374 
1375         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1376                 ret = -EIO;
1377                 goto out;
1378         }
1379 
1380         for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1381                 /*
1382                  * Need to be careful about this.  Having buf->ops in module
1383                  * code can Oops if the buffer persists after module unload.
1384                  */
1385                 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1386                 bufs[page_nr].flags = 0;
1387                 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1388                 if (unlikely(ret < 0))
1389                         break;
1390         }
1391         if (total)
1392                 ret = total;
1393 out:
1394         for (; page_nr < cs.nr_segs; page_nr++)
1395                 put_page(bufs[page_nr].page);
1396 
1397         kfree(bufs);
1398         return ret;
1399 }
1400 
1401 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1402                             struct fuse_copy_state *cs)
1403 {
1404         struct fuse_notify_poll_wakeup_out outarg;
1405         int err = -EINVAL;
1406 
1407         if (size != sizeof(outarg))
1408                 goto err;
1409 
1410         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1411         if (err)
1412                 goto err;
1413 
1414         fuse_copy_finish(cs);
1415         return fuse_notify_poll_wakeup(fc, &outarg);
1416 
1417 err:
1418         fuse_copy_finish(cs);
1419         return err;
1420 }
1421 
1422 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1423                                    struct fuse_copy_state *cs)
1424 {
1425         struct fuse_notify_inval_inode_out outarg;
1426         int err = -EINVAL;
1427 
1428         if (size != sizeof(outarg))
1429                 goto err;
1430 
1431         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1432         if (err)
1433                 goto err;
1434         fuse_copy_finish(cs);
1435 
1436         down_read(&fc->killsb);
1437         err = -ENOENT;
1438         if (fc->sb) {
1439                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1440                                                outarg.off, outarg.len);
1441         }
1442         up_read(&fc->killsb);
1443         return err;
1444 
1445 err:
1446         fuse_copy_finish(cs);
1447         return err;
1448 }
1449 
1450 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1451                                    struct fuse_copy_state *cs)
1452 {
1453         struct fuse_notify_inval_entry_out outarg;
1454         int err = -ENOMEM;
1455         char *buf;
1456         struct qstr name;
1457 
1458         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1459         if (!buf)
1460                 goto err;
1461 
1462         err = -EINVAL;
1463         if (size < sizeof(outarg))
1464                 goto err;
1465 
1466         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1467         if (err)
1468                 goto err;
1469 
1470         err = -ENAMETOOLONG;
1471         if (outarg.namelen > FUSE_NAME_MAX)
1472                 goto err;
1473 
1474         err = -EINVAL;
1475         if (size != sizeof(outarg) + outarg.namelen + 1)
1476                 goto err;
1477 
1478         name.name = buf;
1479         name.len = outarg.namelen;
1480         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1481         if (err)
1482                 goto err;
1483         fuse_copy_finish(cs);
1484         buf[outarg.namelen] = 0;
1485 
1486         down_read(&fc->killsb);
1487         err = -ENOENT;
1488         if (fc->sb)
1489                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1490         up_read(&fc->killsb);
1491         kfree(buf);
1492         return err;
1493 
1494 err:
1495         kfree(buf);
1496         fuse_copy_finish(cs);
1497         return err;
1498 }
1499 
1500 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1501                               struct fuse_copy_state *cs)
1502 {
1503         struct fuse_notify_delete_out outarg;
1504         int err = -ENOMEM;
1505         char *buf;
1506         struct qstr name;
1507 
1508         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1509         if (!buf)
1510                 goto err;
1511 
1512         err = -EINVAL;
1513         if (size < sizeof(outarg))
1514                 goto err;
1515 
1516         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1517         if (err)
1518                 goto err;
1519 
1520         err = -ENAMETOOLONG;
1521         if (outarg.namelen > FUSE_NAME_MAX)
1522                 goto err;
1523 
1524         err = -EINVAL;
1525         if (size != sizeof(outarg) + outarg.namelen + 1)
1526                 goto err;
1527 
1528         name.name = buf;
1529         name.len = outarg.namelen;
1530         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1531         if (err)
1532                 goto err;
1533         fuse_copy_finish(cs);
1534         buf[outarg.namelen] = 0;
1535 
1536         down_read(&fc->killsb);
1537         err = -ENOENT;
1538         if (fc->sb)
1539                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1540                                                outarg.child, &name);
1541         up_read(&fc->killsb);
1542         kfree(buf);
1543         return err;
1544 
1545 err:
1546         kfree(buf);
1547         fuse_copy_finish(cs);
1548         return err;
1549 }
1550 
1551 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1552                              struct fuse_copy_state *cs)
1553 {
1554         struct fuse_notify_store_out outarg;
1555         struct inode *inode;
1556         struct address_space *mapping;
1557         u64 nodeid;
1558         int err;
1559         pgoff_t index;
1560         unsigned int offset;
1561         unsigned int num;
1562         loff_t file_size;
1563         loff_t end;
1564 
1565         err = -EINVAL;
1566         if (size < sizeof(outarg))
1567                 goto out_finish;
1568 
1569         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1570         if (err)
1571                 goto out_finish;
1572 
1573         err = -EINVAL;
1574         if (size - sizeof(outarg) != outarg.size)
1575                 goto out_finish;
1576 
1577         nodeid = outarg.nodeid;
1578 
1579         down_read(&fc->killsb);
1580 
1581         err = -ENOENT;
1582         if (!fc->sb)
1583                 goto out_up_killsb;
1584 
1585         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1586         if (!inode)
1587                 goto out_up_killsb;
1588 
1589         mapping = inode->i_mapping;
1590         index = outarg.offset >> PAGE_SHIFT;
1591         offset = outarg.offset & ~PAGE_MASK;
1592         file_size = i_size_read(inode);
1593         end = outarg.offset + outarg.size;
1594         if (end > file_size) {
1595                 file_size = end;
1596                 fuse_write_update_size(inode, file_size);
1597         }
1598 
1599         num = outarg.size;
1600         while (num) {
1601                 struct page *page;
1602                 unsigned int this_num;
1603 
1604                 err = -ENOMEM;
1605                 page = find_or_create_page(mapping, index,
1606                                            mapping_gfp_mask(mapping));
1607                 if (!page)
1608                         goto out_iput;
1609 
1610                 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1611                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1612                 if (!err && offset == 0 &&
1613                     (this_num == PAGE_SIZE || file_size == end))
1614                         SetPageUptodate(page);
1615                 unlock_page(page);
1616                 put_page(page);
1617 
1618                 if (err)
1619                         goto out_iput;
1620 
1621                 num -= this_num;
1622                 offset = 0;
1623                 index++;
1624         }
1625 
1626         err = 0;
1627 
1628 out_iput:
1629         iput(inode);
1630 out_up_killsb:
1631         up_read(&fc->killsb);
1632 out_finish:
1633         fuse_copy_finish(cs);
1634         return err;
1635 }
1636 
1637 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1638 {
1639         release_pages(req->pages, req->num_pages);
1640 }
1641 
1642 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1643                          struct fuse_notify_retrieve_out *outarg)
1644 {
1645         int err;
1646         struct address_space *mapping = inode->i_mapping;
1647         struct fuse_req *req;
1648         pgoff_t index;
1649         loff_t file_size;
1650         unsigned int num;
1651         unsigned int offset;
1652         size_t total_len = 0;
1653         int num_pages;
1654 
1655         offset = outarg->offset & ~PAGE_MASK;
1656         file_size = i_size_read(inode);
1657 
1658         num = outarg->size;
1659         if (outarg->offset > file_size)
1660                 num = 0;
1661         else if (outarg->offset + num > file_size)
1662                 num = file_size - outarg->offset;
1663 
1664         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1665         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1666 
1667         req = fuse_get_req(fc, num_pages);
1668         if (IS_ERR(req))
1669                 return PTR_ERR(req);
1670 
1671         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1672         req->in.h.nodeid = outarg->nodeid;
1673         req->in.numargs = 2;
1674         req->in.argpages = 1;
1675         req->page_descs[0].offset = offset;
1676         req->end = fuse_retrieve_end;
1677 
1678         index = outarg->offset >> PAGE_SHIFT;
1679 
1680         while (num && req->num_pages < num_pages) {
1681                 struct page *page;
1682                 unsigned int this_num;
1683 
1684                 page = find_get_page(mapping, index);
1685                 if (!page)
1686                         break;
1687 
1688                 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1689                 req->pages[req->num_pages] = page;
1690                 req->page_descs[req->num_pages].length = this_num;
1691                 req->num_pages++;
1692 
1693                 offset = 0;
1694                 num -= this_num;
1695                 total_len += this_num;
1696                 index++;
1697         }
1698         req->misc.retrieve_in.offset = outarg->offset;
1699         req->misc.retrieve_in.size = total_len;
1700         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1701         req->in.args[0].value = &req->misc.retrieve_in;
1702         req->in.args[1].size = total_len;
1703 
1704         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1705         if (err)
1706                 fuse_retrieve_end(fc, req);
1707 
1708         return err;
1709 }
1710 
1711 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1712                                 struct fuse_copy_state *cs)
1713 {
1714         struct fuse_notify_retrieve_out outarg;
1715         struct inode *inode;
1716         int err;
1717 
1718         err = -EINVAL;
1719         if (size != sizeof(outarg))
1720                 goto copy_finish;
1721 
1722         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1723         if (err)
1724                 goto copy_finish;
1725 
1726         fuse_copy_finish(cs);
1727 
1728         down_read(&fc->killsb);
1729         err = -ENOENT;
1730         if (fc->sb) {
1731                 u64 nodeid = outarg.nodeid;
1732 
1733                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1734                 if (inode) {
1735                         err = fuse_retrieve(fc, inode, &outarg);
1736                         iput(inode);
1737                 }
1738         }
1739         up_read(&fc->killsb);
1740 
1741         return err;
1742 
1743 copy_finish:
1744         fuse_copy_finish(cs);
1745         return err;
1746 }
1747 
1748 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1749                        unsigned int size, struct fuse_copy_state *cs)
1750 {
1751         /* Don't try to move pages (yet) */
1752         cs->move_pages = 0;
1753 
1754         switch (code) {
1755         case FUSE_NOTIFY_POLL:
1756                 return fuse_notify_poll(fc, size, cs);
1757 
1758         case FUSE_NOTIFY_INVAL_INODE:
1759                 return fuse_notify_inval_inode(fc, size, cs);
1760 
1761         case FUSE_NOTIFY_INVAL_ENTRY:
1762                 return fuse_notify_inval_entry(fc, size, cs);
1763 
1764         case FUSE_NOTIFY_STORE:
1765                 return fuse_notify_store(fc, size, cs);
1766 
1767         case FUSE_NOTIFY_RETRIEVE:
1768                 return fuse_notify_retrieve(fc, size, cs);
1769 
1770         case FUSE_NOTIFY_DELETE:
1771                 return fuse_notify_delete(fc, size, cs);
1772 
1773         default:
1774                 fuse_copy_finish(cs);
1775                 return -EINVAL;
1776         }
1777 }
1778 
1779 /* Look up request on processing list by unique ID */
1780 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1781 {
1782         struct fuse_req *req;
1783 
1784         list_for_each_entry(req, &fpq->processing, list) {
1785                 if (req->in.h.unique == unique || req->intr_unique == unique)
1786                         return req;
1787         }
1788         return NULL;
1789 }
1790 
1791 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1792                          unsigned nbytes)
1793 {
1794         unsigned reqsize = sizeof(struct fuse_out_header);
1795 
1796         if (out->h.error)
1797                 return nbytes != reqsize ? -EINVAL : 0;
1798 
1799         reqsize += len_args(out->numargs, out->args);
1800 
1801         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1802                 return -EINVAL;
1803         else if (reqsize > nbytes) {
1804                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1805                 unsigned diffsize = reqsize - nbytes;
1806                 if (diffsize > lastarg->size)
1807                         return -EINVAL;
1808                 lastarg->size -= diffsize;
1809         }
1810         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1811                               out->page_zeroing);
1812 }
1813 
1814 /*
1815  * Write a single reply to a request.  First the header is copied from
1816  * the write buffer.  The request is then searched on the processing
1817  * list by the unique ID found in the header.  If found, then remove
1818  * it from the list and copy the rest of the buffer to the request.
1819  * The request is finished by calling request_end()
1820  */
1821 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1822                                  struct fuse_copy_state *cs, size_t nbytes)
1823 {
1824         int err;
1825         struct fuse_conn *fc = fud->fc;
1826         struct fuse_pqueue *fpq = &fud->pq;
1827         struct fuse_req *req;
1828         struct fuse_out_header oh;
1829 
1830         if (nbytes < sizeof(struct fuse_out_header))
1831                 return -EINVAL;
1832 
1833         err = fuse_copy_one(cs, &oh, sizeof(oh));
1834         if (err)
1835                 goto err_finish;
1836 
1837         err = -EINVAL;
1838         if (oh.len != nbytes)
1839                 goto err_finish;
1840 
1841         /*
1842          * Zero oh.unique indicates unsolicited notification message
1843          * and error contains notification code.
1844          */
1845         if (!oh.unique) {
1846                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1847                 return err ? err : nbytes;
1848         }
1849 
1850         err = -EINVAL;
1851         if (oh.error <= -1000 || oh.error > 0)
1852                 goto err_finish;
1853 
1854         spin_lock(&fpq->lock);
1855         err = -ENOENT;
1856         if (!fpq->connected)
1857                 goto err_unlock_pq;
1858 
1859         req = request_find(fpq, oh.unique);
1860         if (!req)
1861                 goto err_unlock_pq;
1862 
1863         /* Is it an interrupt reply? */
1864         if (req->intr_unique == oh.unique) {
1865                 spin_unlock(&fpq->lock);
1866 
1867                 err = -EINVAL;
1868                 if (nbytes != sizeof(struct fuse_out_header))
1869                         goto err_finish;
1870 
1871                 if (oh.error == -ENOSYS)
1872                         fc->no_interrupt = 1;
1873                 else if (oh.error == -EAGAIN)
1874                         queue_interrupt(&fc->iq, req);
1875 
1876                 fuse_copy_finish(cs);
1877                 return nbytes;
1878         }
1879 
1880         clear_bit(FR_SENT, &req->flags);
1881         list_move(&req->list, &fpq->io);
1882         req->out.h = oh;
1883         set_bit(FR_LOCKED, &req->flags);
1884         spin_unlock(&fpq->lock);
1885         cs->req = req;
1886         if (!req->out.page_replace)
1887                 cs->move_pages = 0;
1888 
1889         err = copy_out_args(cs, &req->out, nbytes);
1890         fuse_copy_finish(cs);
1891 
1892         spin_lock(&fpq->lock);
1893         clear_bit(FR_LOCKED, &req->flags);
1894         if (!fpq->connected)
1895                 err = -ENOENT;
1896         else if (err)
1897                 req->out.h.error = -EIO;
1898         if (!test_bit(FR_PRIVATE, &req->flags))
1899                 list_del_init(&req->list);
1900         spin_unlock(&fpq->lock);
1901 
1902         request_end(fc, req);
1903 
1904         return err ? err : nbytes;
1905 
1906  err_unlock_pq:
1907         spin_unlock(&fpq->lock);
1908  err_finish:
1909         fuse_copy_finish(cs);
1910         return err;
1911 }
1912 
1913 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1914 {
1915         struct fuse_copy_state cs;
1916         struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1917 
1918         if (!fud)
1919                 return -EPERM;
1920 
1921         if (!iter_is_iovec(from))
1922                 return -EINVAL;
1923 
1924         fuse_copy_init(&cs, 0, from);
1925 
1926         return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1927 }
1928 
1929 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1930                                      struct file *out, loff_t *ppos,
1931                                      size_t len, unsigned int flags)
1932 {
1933         unsigned nbuf;
1934         unsigned idx;
1935         struct pipe_buffer *bufs;
1936         struct fuse_copy_state cs;
1937         struct fuse_dev *fud;
1938         size_t rem;
1939         ssize_t ret;
1940 
1941         fud = fuse_get_dev(out);
1942         if (!fud)
1943                 return -EPERM;
1944 
1945         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1946         if (!bufs)
1947                 return -ENOMEM;
1948 
1949         pipe_lock(pipe);
1950         nbuf = 0;
1951         rem = 0;
1952         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1953                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1954 
1955         ret = -EINVAL;
1956         if (rem < len) {
1957                 pipe_unlock(pipe);
1958                 goto out;
1959         }
1960 
1961         rem = len;
1962         while (rem) {
1963                 struct pipe_buffer *ibuf;
1964                 struct pipe_buffer *obuf;
1965 
1966                 BUG_ON(nbuf >= pipe->buffers);
1967                 BUG_ON(!pipe->nrbufs);
1968                 ibuf = &pipe->bufs[pipe->curbuf];
1969                 obuf = &bufs[nbuf];
1970 
1971                 if (rem >= ibuf->len) {
1972                         *obuf = *ibuf;
1973                         ibuf->ops = NULL;
1974                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1975                         pipe->nrbufs--;
1976                 } else {
1977                         pipe_buf_get(pipe, ibuf);
1978                         *obuf = *ibuf;
1979                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1980                         obuf->len = rem;
1981                         ibuf->offset += obuf->len;
1982                         ibuf->len -= obuf->len;
1983                 }
1984                 nbuf++;
1985                 rem -= obuf->len;
1986         }
1987         pipe_unlock(pipe);
1988 
1989         fuse_copy_init(&cs, 0, NULL);
1990         cs.pipebufs = bufs;
1991         cs.nr_segs = nbuf;
1992         cs.pipe = pipe;
1993 
1994         if (flags & SPLICE_F_MOVE)
1995                 cs.move_pages = 1;
1996 
1997         ret = fuse_dev_do_write(fud, &cs, len);
1998 
1999         for (idx = 0; idx < nbuf; idx++)
2000                 pipe_buf_release(pipe, &bufs[idx]);
2001 
2002 out:
2003         kfree(bufs);
2004         return ret;
2005 }
2006 
2007 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2008 {
2009         __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2010         struct fuse_iqueue *fiq;
2011         struct fuse_dev *fud = fuse_get_dev(file);
2012 
2013         if (!fud)
2014                 return EPOLLERR;
2015 
2016         fiq = &fud->fc->iq;
2017         poll_wait(file, &fiq->waitq, wait);
2018 
2019         spin_lock(&fiq->waitq.lock);
2020         if (!fiq->connected)
2021                 mask = EPOLLERR;
2022         else if (request_pending(fiq))
2023                 mask |= EPOLLIN | EPOLLRDNORM;
2024         spin_unlock(&fiq->waitq.lock);
2025 
2026         return mask;
2027 }
2028 
2029 /*
2030  * Abort all requests on the given list (pending or processing)
2031  *
2032  * This function releases and reacquires fc->lock
2033  */
2034 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2035 {
2036         while (!list_empty(head)) {
2037                 struct fuse_req *req;
2038                 req = list_entry(head->next, struct fuse_req, list);
2039                 req->out.h.error = -ECONNABORTED;
2040                 clear_bit(FR_SENT, &req->flags);
2041                 list_del_init(&req->list);
2042                 request_end(fc, req);
2043         }
2044 }
2045 
2046 static void end_polls(struct fuse_conn *fc)
2047 {
2048         struct rb_node *p;
2049 
2050         p = rb_first(&fc->polled_files);
2051 
2052         while (p) {
2053                 struct fuse_file *ff;
2054                 ff = rb_entry(p, struct fuse_file, polled_node);
2055                 wake_up_interruptible_all(&ff->poll_wait);
2056 
2057                 p = rb_next(p);
2058         }
2059 }
2060 
2061 /*
2062  * Abort all requests.
2063  *
2064  * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2065  * filesystem.
2066  *
2067  * The same effect is usually achievable through killing the filesystem daemon
2068  * and all users of the filesystem.  The exception is the combination of an
2069  * asynchronous request and the tricky deadlock (see
2070  * Documentation/filesystems/fuse.txt).
2071  *
2072  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2073  * requests, they should be finished off immediately.  Locked requests will be
2074  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2075  * requests.  It is possible that some request will finish before we can.  This
2076  * is OK, the request will in that case be removed from the list before we touch
2077  * it.
2078  */
2079 void fuse_abort_conn(struct fuse_conn *fc)
2080 {
2081         struct fuse_iqueue *fiq = &fc->iq;
2082 
2083         spin_lock(&fc->lock);
2084         if (fc->connected) {
2085                 struct fuse_dev *fud;
2086                 struct fuse_req *req, *next;
2087                 LIST_HEAD(to_end1);
2088                 LIST_HEAD(to_end2);
2089 
2090                 fc->connected = 0;
2091                 fc->blocked = 0;
2092                 fuse_set_initialized(fc);
2093                 list_for_each_entry(fud, &fc->devices, entry) {
2094                         struct fuse_pqueue *fpq = &fud->pq;
2095 
2096                         spin_lock(&fpq->lock);
2097                         fpq->connected = 0;
2098                         list_for_each_entry_safe(req, next, &fpq->io, list) {
2099                                 req->out.h.error = -ECONNABORTED;
2100                                 spin_lock(&req->waitq.lock);
2101                                 set_bit(FR_ABORTED, &req->flags);
2102                                 if (!test_bit(FR_LOCKED, &req->flags)) {
2103                                         set_bit(FR_PRIVATE, &req->flags);
2104                                         list_move(&req->list, &to_end1);
2105                                 }
2106                                 spin_unlock(&req->waitq.lock);
2107                         }
2108                         list_splice_init(&fpq->processing, &to_end2);
2109                         spin_unlock(&fpq->lock);
2110                 }
2111                 fc->max_background = UINT_MAX;
2112                 flush_bg_queue(fc);
2113 
2114                 spin_lock(&fiq->waitq.lock);
2115                 fiq->connected = 0;
2116                 list_splice_init(&fiq->pending, &to_end2);
2117                 list_for_each_entry(req, &to_end2, list)
2118                         clear_bit(FR_PENDING, &req->flags);
2119                 while (forget_pending(fiq))
2120                         kfree(dequeue_forget(fiq, 1, NULL));
2121                 wake_up_all_locked(&fiq->waitq);
2122                 spin_unlock(&fiq->waitq.lock);
2123                 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2124                 end_polls(fc);
2125                 wake_up_all(&fc->blocked_waitq);
2126                 spin_unlock(&fc->lock);
2127 
2128                 while (!list_empty(&to_end1)) {
2129                         req = list_first_entry(&to_end1, struct fuse_req, list);
2130                         __fuse_get_request(req);
2131                         list_del_init(&req->list);
2132                         request_end(fc, req);
2133                 }
2134                 end_requests(fc, &to_end2);
2135         } else {
2136                 spin_unlock(&fc->lock);
2137         }
2138 }
2139 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2140 
2141 int fuse_dev_release(struct inode *inode, struct file *file)
2142 {
2143         struct fuse_dev *fud = fuse_get_dev(file);
2144 
2145         if (fud) {
2146                 struct fuse_conn *fc = fud->fc;
2147                 struct fuse_pqueue *fpq = &fud->pq;
2148 
2149                 WARN_ON(!list_empty(&fpq->io));
2150                 end_requests(fc, &fpq->processing);
2151                 /* Are we the last open device? */
2152                 if (atomic_dec_and_test(&fc->dev_count)) {
2153                         WARN_ON(fc->iq.fasync != NULL);
2154                         fuse_abort_conn(fc);
2155                 }
2156                 fuse_dev_free(fud);
2157         }
2158         return 0;
2159 }
2160 EXPORT_SYMBOL_GPL(fuse_dev_release);
2161 
2162 static int fuse_dev_fasync(int fd, struct file *file, int on)
2163 {
2164         struct fuse_dev *fud = fuse_get_dev(file);
2165 
2166         if (!fud)
2167                 return -EPERM;
2168 
2169         /* No locking - fasync_helper does its own locking */
2170         return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2171 }
2172 
2173 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2174 {
2175         struct fuse_dev *fud;
2176 
2177         if (new->private_data)
2178                 return -EINVAL;
2179 
2180         fud = fuse_dev_alloc(fc);
2181         if (!fud)
2182                 return -ENOMEM;
2183 
2184         new->private_data = fud;
2185         atomic_inc(&fc->dev_count);
2186 
2187         return 0;
2188 }
2189 
2190 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2191                            unsigned long arg)
2192 {
2193         int err = -ENOTTY;
2194 
2195         if (cmd == FUSE_DEV_IOC_CLONE) {
2196                 int oldfd;
2197 
2198                 err = -EFAULT;
2199                 if (!get_user(oldfd, (__u32 __user *) arg)) {
2200                         struct file *old = fget(oldfd);
2201 
2202                         err = -EINVAL;
2203                         if (old) {
2204                                 struct fuse_dev *fud = NULL;
2205 
2206                                 /*
2207                                  * Check against file->f_op because CUSE
2208                                  * uses the same ioctl handler.
2209                                  */
2210                                 if (old->f_op == file->f_op &&
2211                                     old->f_cred->user_ns == file->f_cred->user_ns)
2212                                         fud = fuse_get_dev(old);
2213 
2214                                 if (fud) {
2215                                         mutex_lock(&fuse_mutex);
2216                                         err = fuse_device_clone(fud->fc, file);
2217                                         mutex_unlock(&fuse_mutex);
2218                                 }
2219                                 fput(old);
2220                         }
2221                 }
2222         }
2223         return err;
2224 }
2225 
2226 const struct file_operations fuse_dev_operations = {
2227         .owner          = THIS_MODULE,
2228         .open           = fuse_dev_open,
2229         .llseek         = no_llseek,
2230         .read_iter      = fuse_dev_read,
2231         .splice_read    = fuse_dev_splice_read,
2232         .write_iter     = fuse_dev_write,
2233         .splice_write   = fuse_dev_splice_write,
2234         .poll           = fuse_dev_poll,
2235         .release        = fuse_dev_release,
2236         .fasync         = fuse_dev_fasync,
2237         .unlocked_ioctl = fuse_dev_ioctl,
2238         .compat_ioctl   = fuse_dev_ioctl,
2239 };
2240 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2241 
2242 static struct miscdevice fuse_miscdevice = {
2243         .minor = FUSE_MINOR,
2244         .name  = "fuse",
2245         .fops = &fuse_dev_operations,
2246 };
2247 
2248 int __init fuse_dev_init(void)
2249 {
2250         int err = -ENOMEM;
2251         fuse_req_cachep = kmem_cache_create("fuse_request",
2252                                             sizeof(struct fuse_req),
2253                                             0, 0, NULL);
2254         if (!fuse_req_cachep)
2255                 goto out;
2256 
2257         err = misc_register(&fuse_miscdevice);
2258         if (err)
2259                 goto out_cache_clean;
2260 
2261         return 0;
2262 
2263  out_cache_clean:
2264         kmem_cache_destroy(fuse_req_cachep);
2265  out:
2266         return err;
2267 }
2268 
2269 void fuse_dev_cleanup(void)
2270 {
2271         misc_deregister(&fuse_miscdevice);
2272         kmem_cache_destroy(fuse_req_cachep);
2273 }
2274 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp