~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ceph/caps.c

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/ceph/ceph_debug.h>
  2 
  3 #include <linux/fs.h>
  4 #include <linux/kernel.h>
  5 #include <linux/sched.h>
  6 #include <linux/slab.h>
  7 #include <linux/vmalloc.h>
  8 #include <linux/wait.h>
  9 #include <linux/writeback.h>
 10 
 11 #include "super.h"
 12 #include "mds_client.h"
 13 #include "cache.h"
 14 #include <linux/ceph/decode.h>
 15 #include <linux/ceph/messenger.h>
 16 
 17 /*
 18  * Capability management
 19  *
 20  * The Ceph metadata servers control client access to inode metadata
 21  * and file data by issuing capabilities, granting clients permission
 22  * to read and/or write both inode field and file data to OSDs
 23  * (storage nodes).  Each capability consists of a set of bits
 24  * indicating which operations are allowed.
 25  *
 26  * If the client holds a *_SHARED cap, the client has a coherent value
 27  * that can be safely read from the cached inode.
 28  *
 29  * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
 30  * client is allowed to change inode attributes (e.g., file size,
 31  * mtime), note its dirty state in the ceph_cap, and asynchronously
 32  * flush that metadata change to the MDS.
 33  *
 34  * In the event of a conflicting operation (perhaps by another
 35  * client), the MDS will revoke the conflicting client capabilities.
 36  *
 37  * In order for a client to cache an inode, it must hold a capability
 38  * with at least one MDS server.  When inodes are released, release
 39  * notifications are batched and periodically sent en masse to the MDS
 40  * cluster to release server state.
 41  */
 42 
 43 
 44 /*
 45  * Generate readable cap strings for debugging output.
 46  */
 47 #define MAX_CAP_STR 20
 48 static char cap_str[MAX_CAP_STR][40];
 49 static DEFINE_SPINLOCK(cap_str_lock);
 50 static int last_cap_str;
 51 
 52 static char *gcap_string(char *s, int c)
 53 {
 54         if (c & CEPH_CAP_GSHARED)
 55                 *s++ = 's';
 56         if (c & CEPH_CAP_GEXCL)
 57                 *s++ = 'x';
 58         if (c & CEPH_CAP_GCACHE)
 59                 *s++ = 'c';
 60         if (c & CEPH_CAP_GRD)
 61                 *s++ = 'r';
 62         if (c & CEPH_CAP_GWR)
 63                 *s++ = 'w';
 64         if (c & CEPH_CAP_GBUFFER)
 65                 *s++ = 'b';
 66         if (c & CEPH_CAP_GLAZYIO)
 67                 *s++ = 'l';
 68         return s;
 69 }
 70 
 71 const char *ceph_cap_string(int caps)
 72 {
 73         int i;
 74         char *s;
 75         int c;
 76 
 77         spin_lock(&cap_str_lock);
 78         i = last_cap_str++;
 79         if (last_cap_str == MAX_CAP_STR)
 80                 last_cap_str = 0;
 81         spin_unlock(&cap_str_lock);
 82 
 83         s = cap_str[i];
 84 
 85         if (caps & CEPH_CAP_PIN)
 86                 *s++ = 'p';
 87 
 88         c = (caps >> CEPH_CAP_SAUTH) & 3;
 89         if (c) {
 90                 *s++ = 'A';
 91                 s = gcap_string(s, c);
 92         }
 93 
 94         c = (caps >> CEPH_CAP_SLINK) & 3;
 95         if (c) {
 96                 *s++ = 'L';
 97                 s = gcap_string(s, c);
 98         }
 99 
100         c = (caps >> CEPH_CAP_SXATTR) & 3;
101         if (c) {
102                 *s++ = 'X';
103                 s = gcap_string(s, c);
104         }
105 
106         c = caps >> CEPH_CAP_SFILE;
107         if (c) {
108                 *s++ = 'F';
109                 s = gcap_string(s, c);
110         }
111 
112         if (s == cap_str[i])
113                 *s++ = '-';
114         *s = 0;
115         return cap_str[i];
116 }
117 
118 void ceph_caps_init(struct ceph_mds_client *mdsc)
119 {
120         INIT_LIST_HEAD(&mdsc->caps_list);
121         spin_lock_init(&mdsc->caps_list_lock);
122 }
123 
124 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
125 {
126         struct ceph_cap *cap;
127 
128         spin_lock(&mdsc->caps_list_lock);
129         while (!list_empty(&mdsc->caps_list)) {
130                 cap = list_first_entry(&mdsc->caps_list,
131                                        struct ceph_cap, caps_item);
132                 list_del(&cap->caps_item);
133                 kmem_cache_free(ceph_cap_cachep, cap);
134         }
135         mdsc->caps_total_count = 0;
136         mdsc->caps_avail_count = 0;
137         mdsc->caps_use_count = 0;
138         mdsc->caps_reserve_count = 0;
139         mdsc->caps_min_count = 0;
140         spin_unlock(&mdsc->caps_list_lock);
141 }
142 
143 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
144 {
145         spin_lock(&mdsc->caps_list_lock);
146         mdsc->caps_min_count += delta;
147         BUG_ON(mdsc->caps_min_count < 0);
148         spin_unlock(&mdsc->caps_list_lock);
149 }
150 
151 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
152                       struct ceph_cap_reservation *ctx, int need)
153 {
154         int i;
155         struct ceph_cap *cap;
156         int have;
157         int alloc = 0;
158         LIST_HEAD(newcaps);
159 
160         dout("reserve caps ctx=%p need=%d\n", ctx, need);
161 
162         /* first reserve any caps that are already allocated */
163         spin_lock(&mdsc->caps_list_lock);
164         if (mdsc->caps_avail_count >= need)
165                 have = need;
166         else
167                 have = mdsc->caps_avail_count;
168         mdsc->caps_avail_count -= have;
169         mdsc->caps_reserve_count += have;
170         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
171                                          mdsc->caps_reserve_count +
172                                          mdsc->caps_avail_count);
173         spin_unlock(&mdsc->caps_list_lock);
174 
175         for (i = have; i < need; i++) {
176                 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
177                 if (!cap)
178                         break;
179                 list_add(&cap->caps_item, &newcaps);
180                 alloc++;
181         }
182         /* we didn't manage to reserve as much as we needed */
183         if (have + alloc != need)
184                 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
185                         ctx, need, have + alloc);
186 
187         spin_lock(&mdsc->caps_list_lock);
188         mdsc->caps_total_count += alloc;
189         mdsc->caps_reserve_count += alloc;
190         list_splice(&newcaps, &mdsc->caps_list);
191 
192         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
193                                          mdsc->caps_reserve_count +
194                                          mdsc->caps_avail_count);
195         spin_unlock(&mdsc->caps_list_lock);
196 
197         ctx->count = need;
198         dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
199              ctx, mdsc->caps_total_count, mdsc->caps_use_count,
200              mdsc->caps_reserve_count, mdsc->caps_avail_count);
201 }
202 
203 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
204                         struct ceph_cap_reservation *ctx)
205 {
206         dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
207         if (ctx->count) {
208                 spin_lock(&mdsc->caps_list_lock);
209                 BUG_ON(mdsc->caps_reserve_count < ctx->count);
210                 mdsc->caps_reserve_count -= ctx->count;
211                 mdsc->caps_avail_count += ctx->count;
212                 ctx->count = 0;
213                 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
214                      mdsc->caps_total_count, mdsc->caps_use_count,
215                      mdsc->caps_reserve_count, mdsc->caps_avail_count);
216                 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
217                                                  mdsc->caps_reserve_count +
218                                                  mdsc->caps_avail_count);
219                 spin_unlock(&mdsc->caps_list_lock);
220         }
221         return 0;
222 }
223 
224 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
225                               struct ceph_cap_reservation *ctx)
226 {
227         struct ceph_cap *cap = NULL;
228 
229         /* temporary, until we do something about cap import/export */
230         if (!ctx) {
231                 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
232                 if (cap) {
233                         spin_lock(&mdsc->caps_list_lock);
234                         mdsc->caps_use_count++;
235                         mdsc->caps_total_count++;
236                         spin_unlock(&mdsc->caps_list_lock);
237                 }
238                 return cap;
239         }
240 
241         spin_lock(&mdsc->caps_list_lock);
242         dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
243              ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
244              mdsc->caps_reserve_count, mdsc->caps_avail_count);
245         BUG_ON(!ctx->count);
246         BUG_ON(ctx->count > mdsc->caps_reserve_count);
247         BUG_ON(list_empty(&mdsc->caps_list));
248 
249         ctx->count--;
250         mdsc->caps_reserve_count--;
251         mdsc->caps_use_count++;
252 
253         cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
254         list_del(&cap->caps_item);
255 
256         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
257                mdsc->caps_reserve_count + mdsc->caps_avail_count);
258         spin_unlock(&mdsc->caps_list_lock);
259         return cap;
260 }
261 
262 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
263 {
264         spin_lock(&mdsc->caps_list_lock);
265         dout("put_cap %p %d = %d used + %d resv + %d avail\n",
266              cap, mdsc->caps_total_count, mdsc->caps_use_count,
267              mdsc->caps_reserve_count, mdsc->caps_avail_count);
268         mdsc->caps_use_count--;
269         /*
270          * Keep some preallocated caps around (ceph_min_count), to
271          * avoid lots of free/alloc churn.
272          */
273         if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
274                                       mdsc->caps_min_count) {
275                 mdsc->caps_total_count--;
276                 kmem_cache_free(ceph_cap_cachep, cap);
277         } else {
278                 mdsc->caps_avail_count++;
279                 list_add(&cap->caps_item, &mdsc->caps_list);
280         }
281 
282         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
283                mdsc->caps_reserve_count + mdsc->caps_avail_count);
284         spin_unlock(&mdsc->caps_list_lock);
285 }
286 
287 void ceph_reservation_status(struct ceph_fs_client *fsc,
288                              int *total, int *avail, int *used, int *reserved,
289                              int *min)
290 {
291         struct ceph_mds_client *mdsc = fsc->mdsc;
292 
293         if (total)
294                 *total = mdsc->caps_total_count;
295         if (avail)
296                 *avail = mdsc->caps_avail_count;
297         if (used)
298                 *used = mdsc->caps_use_count;
299         if (reserved)
300                 *reserved = mdsc->caps_reserve_count;
301         if (min)
302                 *min = mdsc->caps_min_count;
303 }
304 
305 /*
306  * Find ceph_cap for given mds, if any.
307  *
308  * Called with i_ceph_lock held.
309  */
310 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
311 {
312         struct ceph_cap *cap;
313         struct rb_node *n = ci->i_caps.rb_node;
314 
315         while (n) {
316                 cap = rb_entry(n, struct ceph_cap, ci_node);
317                 if (mds < cap->mds)
318                         n = n->rb_left;
319                 else if (mds > cap->mds)
320                         n = n->rb_right;
321                 else
322                         return cap;
323         }
324         return NULL;
325 }
326 
327 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
328 {
329         struct ceph_cap *cap;
330 
331         spin_lock(&ci->i_ceph_lock);
332         cap = __get_cap_for_mds(ci, mds);
333         spin_unlock(&ci->i_ceph_lock);
334         return cap;
335 }
336 
337 /*
338  * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
339  */
340 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
341 {
342         struct ceph_cap *cap;
343         int mds = -1;
344         struct rb_node *p;
345 
346         /* prefer mds with WR|BUFFER|EXCL caps */
347         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
348                 cap = rb_entry(p, struct ceph_cap, ci_node);
349                 mds = cap->mds;
350                 if (cap->issued & (CEPH_CAP_FILE_WR |
351                                    CEPH_CAP_FILE_BUFFER |
352                                    CEPH_CAP_FILE_EXCL))
353                         break;
354         }
355         return mds;
356 }
357 
358 int ceph_get_cap_mds(struct inode *inode)
359 {
360         struct ceph_inode_info *ci = ceph_inode(inode);
361         int mds;
362         spin_lock(&ci->i_ceph_lock);
363         mds = __ceph_get_cap_mds(ceph_inode(inode));
364         spin_unlock(&ci->i_ceph_lock);
365         return mds;
366 }
367 
368 /*
369  * Called under i_ceph_lock.
370  */
371 static void __insert_cap_node(struct ceph_inode_info *ci,
372                               struct ceph_cap *new)
373 {
374         struct rb_node **p = &ci->i_caps.rb_node;
375         struct rb_node *parent = NULL;
376         struct ceph_cap *cap = NULL;
377 
378         while (*p) {
379                 parent = *p;
380                 cap = rb_entry(parent, struct ceph_cap, ci_node);
381                 if (new->mds < cap->mds)
382                         p = &(*p)->rb_left;
383                 else if (new->mds > cap->mds)
384                         p = &(*p)->rb_right;
385                 else
386                         BUG();
387         }
388 
389         rb_link_node(&new->ci_node, parent, p);
390         rb_insert_color(&new->ci_node, &ci->i_caps);
391 }
392 
393 /*
394  * (re)set cap hold timeouts, which control the delayed release
395  * of unused caps back to the MDS.  Should be called on cap use.
396  */
397 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
398                                struct ceph_inode_info *ci)
399 {
400         struct ceph_mount_options *ma = mdsc->fsc->mount_options;
401 
402         ci->i_hold_caps_min = round_jiffies(jiffies +
403                                             ma->caps_wanted_delay_min * HZ);
404         ci->i_hold_caps_max = round_jiffies(jiffies +
405                                             ma->caps_wanted_delay_max * HZ);
406         dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
407              ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
408 }
409 
410 /*
411  * (Re)queue cap at the end of the delayed cap release list.
412  *
413  * If I_FLUSH is set, leave the inode at the front of the list.
414  *
415  * Caller holds i_ceph_lock
416  *    -> we take mdsc->cap_delay_lock
417  */
418 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
419                                 struct ceph_inode_info *ci)
420 {
421         __cap_set_timeouts(mdsc, ci);
422         dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
423              ci->i_ceph_flags, ci->i_hold_caps_max);
424         if (!mdsc->stopping) {
425                 spin_lock(&mdsc->cap_delay_lock);
426                 if (!list_empty(&ci->i_cap_delay_list)) {
427                         if (ci->i_ceph_flags & CEPH_I_FLUSH)
428                                 goto no_change;
429                         list_del_init(&ci->i_cap_delay_list);
430                 }
431                 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
432 no_change:
433                 spin_unlock(&mdsc->cap_delay_lock);
434         }
435 }
436 
437 /*
438  * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
439  * indicating we should send a cap message to flush dirty metadata
440  * asap, and move to the front of the delayed cap list.
441  */
442 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
443                                       struct ceph_inode_info *ci)
444 {
445         dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
446         spin_lock(&mdsc->cap_delay_lock);
447         ci->i_ceph_flags |= CEPH_I_FLUSH;
448         if (!list_empty(&ci->i_cap_delay_list))
449                 list_del_init(&ci->i_cap_delay_list);
450         list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
451         spin_unlock(&mdsc->cap_delay_lock);
452 }
453 
454 /*
455  * Cancel delayed work on cap.
456  *
457  * Caller must hold i_ceph_lock.
458  */
459 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
460                                struct ceph_inode_info *ci)
461 {
462         dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
463         if (list_empty(&ci->i_cap_delay_list))
464                 return;
465         spin_lock(&mdsc->cap_delay_lock);
466         list_del_init(&ci->i_cap_delay_list);
467         spin_unlock(&mdsc->cap_delay_lock);
468 }
469 
470 /*
471  * Common issue checks for add_cap, handle_cap_grant.
472  */
473 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
474                               unsigned issued)
475 {
476         unsigned had = __ceph_caps_issued(ci, NULL);
477 
478         /*
479          * Each time we receive FILE_CACHE anew, we increment
480          * i_rdcache_gen.
481          */
482         if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
483             (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
484                 ci->i_rdcache_gen++;
485         }
486 
487         /*
488          * if we are newly issued FILE_SHARED, mark dir not complete; we
489          * don't know what happened to this directory while we didn't
490          * have the cap.
491          */
492         if ((issued & CEPH_CAP_FILE_SHARED) &&
493             (had & CEPH_CAP_FILE_SHARED) == 0) {
494                 ci->i_shared_gen++;
495                 if (S_ISDIR(ci->vfs_inode.i_mode)) {
496                         dout(" marking %p NOT complete\n", &ci->vfs_inode);
497                         __ceph_dir_clear_complete(ci);
498                 }
499         }
500 }
501 
502 /*
503  * Add a capability under the given MDS session.
504  *
505  * Caller should hold session snap_rwsem (read) and s_mutex.
506  *
507  * @fmode is the open file mode, if we are opening a file, otherwise
508  * it is < 0.  (This is so we can atomically add the cap and add an
509  * open file reference to it.)
510  */
511 void ceph_add_cap(struct inode *inode,
512                   struct ceph_mds_session *session, u64 cap_id,
513                   int fmode, unsigned issued, unsigned wanted,
514                   unsigned seq, unsigned mseq, u64 realmino, int flags,
515                   struct ceph_cap **new_cap)
516 {
517         struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
518         struct ceph_inode_info *ci = ceph_inode(inode);
519         struct ceph_cap *cap;
520         int mds = session->s_mds;
521         int actual_wanted;
522 
523         dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
524              session->s_mds, cap_id, ceph_cap_string(issued), seq);
525 
526         /*
527          * If we are opening the file, include file mode wanted bits
528          * in wanted.
529          */
530         if (fmode >= 0)
531                 wanted |= ceph_caps_for_mode(fmode);
532 
533         cap = __get_cap_for_mds(ci, mds);
534         if (!cap) {
535                 cap = *new_cap;
536                 *new_cap = NULL;
537 
538                 cap->issued = 0;
539                 cap->implemented = 0;
540                 cap->mds = mds;
541                 cap->mds_wanted = 0;
542                 cap->mseq = 0;
543 
544                 cap->ci = ci;
545                 __insert_cap_node(ci, cap);
546 
547                 /* add to session cap list */
548                 cap->session = session;
549                 spin_lock(&session->s_cap_lock);
550                 list_add_tail(&cap->session_caps, &session->s_caps);
551                 session->s_nr_caps++;
552                 spin_unlock(&session->s_cap_lock);
553         } else {
554                 /*
555                  * auth mds of the inode changed. we received the cap export
556                  * message, but still haven't received the cap import message.
557                  * handle_cap_export() updated the new auth MDS' cap.
558                  *
559                  * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
560                  * a message that was send before the cap import message. So
561                  * don't remove caps.
562                  */
563                 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
564                         WARN_ON(cap != ci->i_auth_cap);
565                         WARN_ON(cap->cap_id != cap_id);
566                         seq = cap->seq;
567                         mseq = cap->mseq;
568                         issued |= cap->issued;
569                         flags |= CEPH_CAP_FLAG_AUTH;
570                 }
571         }
572 
573         if (!ci->i_snap_realm) {
574                 /*
575                  * add this inode to the appropriate snap realm
576                  */
577                 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
578                                                                realmino);
579                 if (realm) {
580                         spin_lock(&realm->inodes_with_caps_lock);
581                         ci->i_snap_realm = realm;
582                         list_add(&ci->i_snap_realm_item,
583                                  &realm->inodes_with_caps);
584                         spin_unlock(&realm->inodes_with_caps_lock);
585                 } else {
586                         pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
587                                realmino);
588                         WARN_ON(!realm);
589                 }
590         }
591 
592         __check_cap_issue(ci, cap, issued);
593 
594         /*
595          * If we are issued caps we don't want, or the mds' wanted
596          * value appears to be off, queue a check so we'll release
597          * later and/or update the mds wanted value.
598          */
599         actual_wanted = __ceph_caps_wanted(ci);
600         if ((wanted & ~actual_wanted) ||
601             (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
602                 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
603                      ceph_cap_string(issued), ceph_cap_string(wanted),
604                      ceph_cap_string(actual_wanted));
605                 __cap_delay_requeue(mdsc, ci);
606         }
607 
608         if (flags & CEPH_CAP_FLAG_AUTH) {
609                 if (ci->i_auth_cap == NULL ||
610                     ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
611                         ci->i_auth_cap = cap;
612                         cap->mds_wanted = wanted;
613                 }
614         } else {
615                 WARN_ON(ci->i_auth_cap == cap);
616         }
617 
618         dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
619              inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
620              ceph_cap_string(issued|cap->issued), seq, mds);
621         cap->cap_id = cap_id;
622         cap->issued = issued;
623         cap->implemented |= issued;
624         if (ceph_seq_cmp(mseq, cap->mseq) > 0)
625                 cap->mds_wanted = wanted;
626         else
627                 cap->mds_wanted |= wanted;
628         cap->seq = seq;
629         cap->issue_seq = seq;
630         cap->mseq = mseq;
631         cap->cap_gen = session->s_cap_gen;
632 
633         if (fmode >= 0)
634                 __ceph_get_fmode(ci, fmode);
635 }
636 
637 /*
638  * Return true if cap has not timed out and belongs to the current
639  * generation of the MDS session (i.e. has not gone 'stale' due to
640  * us losing touch with the mds).
641  */
642 static int __cap_is_valid(struct ceph_cap *cap)
643 {
644         unsigned long ttl;
645         u32 gen;
646 
647         spin_lock(&cap->session->s_gen_ttl_lock);
648         gen = cap->session->s_cap_gen;
649         ttl = cap->session->s_cap_ttl;
650         spin_unlock(&cap->session->s_gen_ttl_lock);
651 
652         if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
653                 dout("__cap_is_valid %p cap %p issued %s "
654                      "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
655                      cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
656                 return 0;
657         }
658 
659         return 1;
660 }
661 
662 /*
663  * Return set of valid cap bits issued to us.  Note that caps time
664  * out, and may be invalidated in bulk if the client session times out
665  * and session->s_cap_gen is bumped.
666  */
667 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
668 {
669         int have = ci->i_snap_caps;
670         struct ceph_cap *cap;
671         struct rb_node *p;
672 
673         if (implemented)
674                 *implemented = 0;
675         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
676                 cap = rb_entry(p, struct ceph_cap, ci_node);
677                 if (!__cap_is_valid(cap))
678                         continue;
679                 dout("__ceph_caps_issued %p cap %p issued %s\n",
680                      &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
681                 have |= cap->issued;
682                 if (implemented)
683                         *implemented |= cap->implemented;
684         }
685         /*
686          * exclude caps issued by non-auth MDS, but are been revoking
687          * by the auth MDS. The non-auth MDS should be revoking/exporting
688          * these caps, but the message is delayed.
689          */
690         if (ci->i_auth_cap) {
691                 cap = ci->i_auth_cap;
692                 have &= ~cap->implemented | cap->issued;
693         }
694         return have;
695 }
696 
697 /*
698  * Get cap bits issued by caps other than @ocap
699  */
700 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
701 {
702         int have = ci->i_snap_caps;
703         struct ceph_cap *cap;
704         struct rb_node *p;
705 
706         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
707                 cap = rb_entry(p, struct ceph_cap, ci_node);
708                 if (cap == ocap)
709                         continue;
710                 if (!__cap_is_valid(cap))
711                         continue;
712                 have |= cap->issued;
713         }
714         return have;
715 }
716 
717 /*
718  * Move a cap to the end of the LRU (oldest caps at list head, newest
719  * at list tail).
720  */
721 static void __touch_cap(struct ceph_cap *cap)
722 {
723         struct ceph_mds_session *s = cap->session;
724 
725         spin_lock(&s->s_cap_lock);
726         if (s->s_cap_iterator == NULL) {
727                 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
728                      s->s_mds);
729                 list_move_tail(&cap->session_caps, &s->s_caps);
730         } else {
731                 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
732                      &cap->ci->vfs_inode, cap, s->s_mds);
733         }
734         spin_unlock(&s->s_cap_lock);
735 }
736 
737 /*
738  * Check if we hold the given mask.  If so, move the cap(s) to the
739  * front of their respective LRUs.  (This is the preferred way for
740  * callers to check for caps they want.)
741  */
742 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
743 {
744         struct ceph_cap *cap;
745         struct rb_node *p;
746         int have = ci->i_snap_caps;
747 
748         if ((have & mask) == mask) {
749                 dout("__ceph_caps_issued_mask %p snap issued %s"
750                      " (mask %s)\n", &ci->vfs_inode,
751                      ceph_cap_string(have),
752                      ceph_cap_string(mask));
753                 return 1;
754         }
755 
756         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
757                 cap = rb_entry(p, struct ceph_cap, ci_node);
758                 if (!__cap_is_valid(cap))
759                         continue;
760                 if ((cap->issued & mask) == mask) {
761                         dout("__ceph_caps_issued_mask %p cap %p issued %s"
762                              " (mask %s)\n", &ci->vfs_inode, cap,
763                              ceph_cap_string(cap->issued),
764                              ceph_cap_string(mask));
765                         if (touch)
766                                 __touch_cap(cap);
767                         return 1;
768                 }
769 
770                 /* does a combination of caps satisfy mask? */
771                 have |= cap->issued;
772                 if ((have & mask) == mask) {
773                         dout("__ceph_caps_issued_mask %p combo issued %s"
774                              " (mask %s)\n", &ci->vfs_inode,
775                              ceph_cap_string(cap->issued),
776                              ceph_cap_string(mask));
777                         if (touch) {
778                                 struct rb_node *q;
779 
780                                 /* touch this + preceding caps */
781                                 __touch_cap(cap);
782                                 for (q = rb_first(&ci->i_caps); q != p;
783                                      q = rb_next(q)) {
784                                         cap = rb_entry(q, struct ceph_cap,
785                                                        ci_node);
786                                         if (!__cap_is_valid(cap))
787                                                 continue;
788                                         __touch_cap(cap);
789                                 }
790                         }
791                         return 1;
792                 }
793         }
794 
795         return 0;
796 }
797 
798 /*
799  * Return true if mask caps are currently being revoked by an MDS.
800  */
801 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
802                                struct ceph_cap *ocap, int mask)
803 {
804         struct ceph_cap *cap;
805         struct rb_node *p;
806 
807         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
808                 cap = rb_entry(p, struct ceph_cap, ci_node);
809                 if (cap != ocap &&
810                     (cap->implemented & ~cap->issued & mask))
811                         return 1;
812         }
813         return 0;
814 }
815 
816 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
817 {
818         struct inode *inode = &ci->vfs_inode;
819         int ret;
820 
821         spin_lock(&ci->i_ceph_lock);
822         ret = __ceph_caps_revoking_other(ci, NULL, mask);
823         spin_unlock(&ci->i_ceph_lock);
824         dout("ceph_caps_revoking %p %s = %d\n", inode,
825              ceph_cap_string(mask), ret);
826         return ret;
827 }
828 
829 int __ceph_caps_used(struct ceph_inode_info *ci)
830 {
831         int used = 0;
832         if (ci->i_pin_ref)
833                 used |= CEPH_CAP_PIN;
834         if (ci->i_rd_ref)
835                 used |= CEPH_CAP_FILE_RD;
836         if (ci->i_rdcache_ref ||
837             (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
838              ci->vfs_inode.i_data.nrpages))
839                 used |= CEPH_CAP_FILE_CACHE;
840         if (ci->i_wr_ref)
841                 used |= CEPH_CAP_FILE_WR;
842         if (ci->i_wb_ref || ci->i_wrbuffer_ref)
843                 used |= CEPH_CAP_FILE_BUFFER;
844         return used;
845 }
846 
847 /*
848  * wanted, by virtue of open file modes
849  */
850 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
851 {
852         int want = 0;
853         int mode;
854         for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
855                 if (ci->i_nr_by_mode[mode])
856                         want |= ceph_caps_for_mode(mode);
857         return want;
858 }
859 
860 /*
861  * Return caps we have registered with the MDS(s) as 'wanted'.
862  */
863 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
864 {
865         struct ceph_cap *cap;
866         struct rb_node *p;
867         int mds_wanted = 0;
868 
869         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
870                 cap = rb_entry(p, struct ceph_cap, ci_node);
871                 if (!__cap_is_valid(cap))
872                         continue;
873                 if (cap == ci->i_auth_cap)
874                         mds_wanted |= cap->mds_wanted;
875                 else
876                         mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
877         }
878         return mds_wanted;
879 }
880 
881 /*
882  * called under i_ceph_lock
883  */
884 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
885 {
886         return !RB_EMPTY_ROOT(&ci->i_caps);
887 }
888 
889 int ceph_is_any_caps(struct inode *inode)
890 {
891         struct ceph_inode_info *ci = ceph_inode(inode);
892         int ret;
893 
894         spin_lock(&ci->i_ceph_lock);
895         ret = __ceph_is_any_caps(ci);
896         spin_unlock(&ci->i_ceph_lock);
897 
898         return ret;
899 }
900 
901 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
902 {
903         struct ceph_snap_realm *realm = ci->i_snap_realm;
904         spin_lock(&realm->inodes_with_caps_lock);
905         list_del_init(&ci->i_snap_realm_item);
906         ci->i_snap_realm_counter++;
907         ci->i_snap_realm = NULL;
908         spin_unlock(&realm->inodes_with_caps_lock);
909         ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
910                             realm);
911 }
912 
913 /*
914  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
915  *
916  * caller should hold i_ceph_lock.
917  * caller will not hold session s_mutex if called from destroy_inode.
918  */
919 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
920 {
921         struct ceph_mds_session *session = cap->session;
922         struct ceph_inode_info *ci = cap->ci;
923         struct ceph_mds_client *mdsc =
924                 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
925         int removed = 0;
926 
927         dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
928 
929         /* remove from session list */
930         spin_lock(&session->s_cap_lock);
931         if (session->s_cap_iterator == cap) {
932                 /* not yet, we are iterating over this very cap */
933                 dout("__ceph_remove_cap  delaying %p removal from session %p\n",
934                      cap, cap->session);
935         } else {
936                 list_del_init(&cap->session_caps);
937                 session->s_nr_caps--;
938                 cap->session = NULL;
939                 removed = 1;
940         }
941         /* protect backpointer with s_cap_lock: see iterate_session_caps */
942         cap->ci = NULL;
943 
944         /*
945          * s_cap_reconnect is protected by s_cap_lock. no one changes
946          * s_cap_gen while session is in the reconnect state.
947          */
948         if (queue_release &&
949             (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
950                 cap->queue_release = 1;
951                 if (removed) {
952                         list_add_tail(&cap->session_caps,
953                                       &session->s_cap_releases);
954                         session->s_num_cap_releases++;
955                         removed = 0;
956                 }
957         } else {
958                 cap->queue_release = 0;
959         }
960         cap->cap_ino = ci->i_vino.ino;
961 
962         spin_unlock(&session->s_cap_lock);
963 
964         /* remove from inode list */
965         rb_erase(&cap->ci_node, &ci->i_caps);
966         if (ci->i_auth_cap == cap)
967                 ci->i_auth_cap = NULL;
968 
969         if (removed)
970                 ceph_put_cap(mdsc, cap);
971 
972         /* when reconnect denied, we remove session caps forcibly,
973          * i_wr_ref can be non-zero. If there are ongoing write,
974          * keep i_snap_realm.
975          */
976         if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
977                 drop_inode_snap_realm(ci);
978 
979         if (!__ceph_is_any_real_caps(ci))
980                 __cap_delay_cancel(mdsc, ci);
981 }
982 
983 /*
984  * Build and send a cap message to the given MDS.
985  *
986  * Caller should be holding s_mutex.
987  */
988 static int send_cap_msg(struct ceph_mds_session *session,
989                         u64 ino, u64 cid, int op,
990                         int caps, int wanted, int dirty,
991                         u32 seq, u64 flush_tid, u64 oldest_flush_tid,
992                         u32 issue_seq, u32 mseq, u64 size, u64 max_size,
993                         struct timespec *mtime, struct timespec *atime,
994                         struct timespec *ctime, u64 time_warp_seq,
995                         kuid_t uid, kgid_t gid, umode_t mode,
996                         u64 xattr_version,
997                         struct ceph_buffer *xattrs_buf,
998                         u64 follows, bool inline_data)
999 {
1000         struct ceph_mds_caps *fc;
1001         struct ceph_msg *msg;
1002         void *p;
1003         size_t extra_len;
1004 
1005         dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1006              " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1007              " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
1008              cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
1009              ceph_cap_string(dirty),
1010              seq, issue_seq, flush_tid, oldest_flush_tid,
1011              mseq, follows, size, max_size,
1012              xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
1013 
1014         /* flock buffer size + inline version + inline data size +
1015          * osd_epoch_barrier + oldest_flush_tid */
1016         extra_len = 4 + 8 + 4 + 4 + 8;
1017         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1018                            GFP_NOFS, false);
1019         if (!msg)
1020                 return -ENOMEM;
1021 
1022         msg->hdr.version = cpu_to_le16(6);
1023         msg->hdr.tid = cpu_to_le64(flush_tid);
1024 
1025         fc = msg->front.iov_base;
1026         memset(fc, 0, sizeof(*fc));
1027 
1028         fc->cap_id = cpu_to_le64(cid);
1029         fc->op = cpu_to_le32(op);
1030         fc->seq = cpu_to_le32(seq);
1031         fc->issue_seq = cpu_to_le32(issue_seq);
1032         fc->migrate_seq = cpu_to_le32(mseq);
1033         fc->caps = cpu_to_le32(caps);
1034         fc->wanted = cpu_to_le32(wanted);
1035         fc->dirty = cpu_to_le32(dirty);
1036         fc->ino = cpu_to_le64(ino);
1037         fc->snap_follows = cpu_to_le64(follows);
1038 
1039         fc->size = cpu_to_le64(size);
1040         fc->max_size = cpu_to_le64(max_size);
1041         if (mtime)
1042                 ceph_encode_timespec(&fc->mtime, mtime);
1043         if (atime)
1044                 ceph_encode_timespec(&fc->atime, atime);
1045         if (ctime)
1046                 ceph_encode_timespec(&fc->ctime, ctime);
1047         fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1048 
1049         fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1050         fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1051         fc->mode = cpu_to_le32(mode);
1052 
1053         p = fc + 1;
1054         /* flock buffer size */
1055         ceph_encode_32(&p, 0);
1056         /* inline version */
1057         ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
1058         /* inline data size */
1059         ceph_encode_32(&p, 0);
1060         /* osd_epoch_barrier */
1061         ceph_encode_32(&p, 0);
1062         /* oldest_flush_tid */
1063         ceph_encode_64(&p, oldest_flush_tid);
1064 
1065         fc->xattr_version = cpu_to_le64(xattr_version);
1066         if (xattrs_buf) {
1067                 msg->middle = ceph_buffer_get(xattrs_buf);
1068                 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1069                 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1070         }
1071 
1072         ceph_con_send(&session->s_con, msg);
1073         return 0;
1074 }
1075 
1076 /*
1077  * Queue cap releases when an inode is dropped from our cache.  Since
1078  * inode is about to be destroyed, there is no need for i_ceph_lock.
1079  */
1080 void ceph_queue_caps_release(struct inode *inode)
1081 {
1082         struct ceph_inode_info *ci = ceph_inode(inode);
1083         struct rb_node *p;
1084 
1085         p = rb_first(&ci->i_caps);
1086         while (p) {
1087                 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1088                 p = rb_next(p);
1089                 __ceph_remove_cap(cap, true);
1090         }
1091 }
1092 
1093 /*
1094  * Send a cap msg on the given inode.  Update our caps state, then
1095  * drop i_ceph_lock and send the message.
1096  *
1097  * Make note of max_size reported/requested from mds, revoked caps
1098  * that have now been implemented.
1099  *
1100  * Make half-hearted attempt ot to invalidate page cache if we are
1101  * dropping RDCACHE.  Note that this will leave behind locked pages
1102  * that we'll then need to deal with elsewhere.
1103  *
1104  * Return non-zero if delayed release, or we experienced an error
1105  * such that the caller should requeue + retry later.
1106  *
1107  * called with i_ceph_lock, then drops it.
1108  * caller should hold snap_rwsem (read), s_mutex.
1109  */
1110 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1111                       int op, int used, int want, int retain, int flushing,
1112                       u64 flush_tid, u64 oldest_flush_tid)
1113         __releases(cap->ci->i_ceph_lock)
1114 {
1115         struct ceph_inode_info *ci = cap->ci;
1116         struct inode *inode = &ci->vfs_inode;
1117         u64 cap_id = cap->cap_id;
1118         int held, revoking, dropping, keep;
1119         u64 seq, issue_seq, mseq, time_warp_seq, follows;
1120         u64 size, max_size;
1121         struct timespec mtime, atime, ctime;
1122         int wake = 0;
1123         umode_t mode;
1124         kuid_t uid;
1125         kgid_t gid;
1126         struct ceph_mds_session *session;
1127         u64 xattr_version = 0;
1128         struct ceph_buffer *xattr_blob = NULL;
1129         int delayed = 0;
1130         int ret;
1131         bool inline_data;
1132 
1133         held = cap->issued | cap->implemented;
1134         revoking = cap->implemented & ~cap->issued;
1135         retain &= ~revoking;
1136         dropping = cap->issued & ~retain;
1137 
1138         dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1139              inode, cap, cap->session,
1140              ceph_cap_string(held), ceph_cap_string(held & retain),
1141              ceph_cap_string(revoking));
1142         BUG_ON((retain & CEPH_CAP_PIN) == 0);
1143 
1144         session = cap->session;
1145 
1146         /* don't release wanted unless we've waited a bit. */
1147         if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1148             time_before(jiffies, ci->i_hold_caps_min)) {
1149                 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1150                      ceph_cap_string(cap->issued),
1151                      ceph_cap_string(cap->issued & retain),
1152                      ceph_cap_string(cap->mds_wanted),
1153                      ceph_cap_string(want));
1154                 want |= cap->mds_wanted;
1155                 retain |= cap->issued;
1156                 delayed = 1;
1157         }
1158         ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1159 
1160         cap->issued &= retain;  /* drop bits we don't want */
1161         if (cap->implemented & ~cap->issued) {
1162                 /*
1163                  * Wake up any waiters on wanted -> needed transition.
1164                  * This is due to the weird transition from buffered
1165                  * to sync IO... we need to flush dirty pages _before_
1166                  * allowing sync writes to avoid reordering.
1167                  */
1168                 wake = 1;
1169         }
1170         cap->implemented &= cap->issued | used;
1171         cap->mds_wanted = want;
1172 
1173         follows = flushing ? ci->i_head_snapc->seq : 0;
1174 
1175         keep = cap->implemented;
1176         seq = cap->seq;
1177         issue_seq = cap->issue_seq;
1178         mseq = cap->mseq;
1179         size = inode->i_size;
1180         ci->i_reported_size = size;
1181         max_size = ci->i_wanted_max_size;
1182         ci->i_requested_max_size = max_size;
1183         mtime = inode->i_mtime;
1184         atime = inode->i_atime;
1185         ctime = inode->i_ctime;
1186         time_warp_seq = ci->i_time_warp_seq;
1187         uid = inode->i_uid;
1188         gid = inode->i_gid;
1189         mode = inode->i_mode;
1190 
1191         if (flushing & CEPH_CAP_XATTR_EXCL) {
1192                 __ceph_build_xattrs_blob(ci);
1193                 xattr_blob = ci->i_xattrs.blob;
1194                 xattr_version = ci->i_xattrs.version;
1195         }
1196 
1197         inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1198 
1199         spin_unlock(&ci->i_ceph_lock);
1200 
1201         ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1202                 op, keep, want, flushing, seq,
1203                 flush_tid, oldest_flush_tid, issue_seq, mseq,
1204                 size, max_size, &mtime, &atime, &ctime, time_warp_seq,
1205                 uid, gid, mode, xattr_version, xattr_blob,
1206                 follows, inline_data);
1207         if (ret < 0) {
1208                 dout("error sending cap msg, must requeue %p\n", inode);
1209                 delayed = 1;
1210         }
1211 
1212         if (wake)
1213                 wake_up_all(&ci->i_cap_wq);
1214 
1215         return delayed;
1216 }
1217 
1218 /*
1219  * When a snapshot is taken, clients accumulate dirty metadata on
1220  * inodes with capabilities in ceph_cap_snaps to describe the file
1221  * state at the time the snapshot was taken.  This must be flushed
1222  * asynchronously back to the MDS once sync writes complete and dirty
1223  * data is written out.
1224  *
1225  * Unless @kick is true, skip cap_snaps that were already sent to
1226  * the MDS (i.e., during this session).
1227  *
1228  * Called under i_ceph_lock.  Takes s_mutex as needed.
1229  */
1230 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1231                         struct ceph_mds_session **psession,
1232                         int kick)
1233                 __releases(ci->i_ceph_lock)
1234                 __acquires(ci->i_ceph_lock)
1235 {
1236         struct inode *inode = &ci->vfs_inode;
1237         int mds;
1238         struct ceph_cap_snap *capsnap;
1239         u32 mseq;
1240         struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1241         struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1242                                                     session->s_mutex */
1243         u64 next_follows = 0;  /* keep track of how far we've gotten through the
1244                              i_cap_snaps list, and skip these entries next time
1245                              around to avoid an infinite loop */
1246 
1247         if (psession)
1248                 session = *psession;
1249 
1250         dout("__flush_snaps %p\n", inode);
1251 retry:
1252         list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1253                 /* avoid an infiniute loop after retry */
1254                 if (capsnap->follows < next_follows)
1255                         continue;
1256                 /*
1257                  * we need to wait for sync writes to complete and for dirty
1258                  * pages to be written out.
1259                  */
1260                 if (capsnap->dirty_pages || capsnap->writing)
1261                         break;
1262 
1263                 /* should be removed by ceph_try_drop_cap_snap() */
1264                 BUG_ON(!capsnap->need_flush);
1265 
1266                 /* pick mds, take s_mutex */
1267                 if (ci->i_auth_cap == NULL) {
1268                         dout("no auth cap (migrating?), doing nothing\n");
1269                         goto out;
1270                 }
1271 
1272                 /* only flush each capsnap once */
1273                 if (!kick && !list_empty(&capsnap->flushing_item)) {
1274                         dout("already flushed %p, skipping\n", capsnap);
1275                         continue;
1276                 }
1277 
1278                 mds = ci->i_auth_cap->session->s_mds;
1279                 mseq = ci->i_auth_cap->mseq;
1280 
1281                 if (session && session->s_mds != mds) {
1282                         dout("oops, wrong session %p mutex\n", session);
1283                         if (kick)
1284                                 goto out;
1285 
1286                         mutex_unlock(&session->s_mutex);
1287                         ceph_put_mds_session(session);
1288                         session = NULL;
1289                 }
1290                 if (!session) {
1291                         spin_unlock(&ci->i_ceph_lock);
1292                         mutex_lock(&mdsc->mutex);
1293                         session = __ceph_lookup_mds_session(mdsc, mds);
1294                         mutex_unlock(&mdsc->mutex);
1295                         if (session) {
1296                                 dout("inverting session/ino locks on %p\n",
1297                                      session);
1298                                 mutex_lock(&session->s_mutex);
1299                         }
1300                         /*
1301                          * if session == NULL, we raced against a cap
1302                          * deletion or migration.  retry, and we'll
1303                          * get a better @mds value next time.
1304                          */
1305                         spin_lock(&ci->i_ceph_lock);
1306                         goto retry;
1307                 }
1308 
1309                 spin_lock(&mdsc->cap_dirty_lock);
1310                 capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
1311                 spin_unlock(&mdsc->cap_dirty_lock);
1312 
1313                 atomic_inc(&capsnap->nref);
1314                 if (list_empty(&capsnap->flushing_item))
1315                         list_add_tail(&capsnap->flushing_item,
1316                                       &session->s_cap_snaps_flushing);
1317                 spin_unlock(&ci->i_ceph_lock);
1318 
1319                 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1320                      inode, capsnap, capsnap->follows, capsnap->flush_tid);
1321                 send_cap_msg(session, ceph_vino(inode).ino, 0,
1322                              CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1323                              capsnap->dirty, 0, capsnap->flush_tid, 0,
1324                              0, mseq, capsnap->size, 0,
1325                              &capsnap->mtime, &capsnap->atime,
1326                              &capsnap->ctime, capsnap->time_warp_seq,
1327                              capsnap->uid, capsnap->gid, capsnap->mode,
1328                              capsnap->xattr_version, capsnap->xattr_blob,
1329                              capsnap->follows, capsnap->inline_data);
1330 
1331                 next_follows = capsnap->follows + 1;
1332                 ceph_put_cap_snap(capsnap);
1333 
1334                 spin_lock(&ci->i_ceph_lock);
1335                 goto retry;
1336         }
1337 
1338         /* we flushed them all; remove this inode from the queue */
1339         spin_lock(&mdsc->snap_flush_lock);
1340         list_del_init(&ci->i_snap_flush_item);
1341         spin_unlock(&mdsc->snap_flush_lock);
1342 
1343 out:
1344         if (psession)
1345                 *psession = session;
1346         else if (session) {
1347                 mutex_unlock(&session->s_mutex);
1348                 ceph_put_mds_session(session);
1349         }
1350 }
1351 
1352 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1353 {
1354         spin_lock(&ci->i_ceph_lock);
1355         __ceph_flush_snaps(ci, NULL, 0);
1356         spin_unlock(&ci->i_ceph_lock);
1357 }
1358 
1359 /*
1360  * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1361  * Caller is then responsible for calling __mark_inode_dirty with the
1362  * returned flags value.
1363  */
1364 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1365                            struct ceph_cap_flush **pcf)
1366 {
1367         struct ceph_mds_client *mdsc =
1368                 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1369         struct inode *inode = &ci->vfs_inode;
1370         int was = ci->i_dirty_caps;
1371         int dirty = 0;
1372 
1373         if (!ci->i_auth_cap) {
1374                 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1375                         "but no auth cap (session was closed?)\n",
1376                         inode, ceph_ino(inode), ceph_cap_string(mask));
1377                 return 0;
1378         }
1379 
1380         dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1381              ceph_cap_string(mask), ceph_cap_string(was),
1382              ceph_cap_string(was | mask));
1383         ci->i_dirty_caps |= mask;
1384         if (was == 0) {
1385                 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1386                 swap(ci->i_prealloc_cap_flush, *pcf);
1387 
1388                 if (!ci->i_head_snapc) {
1389                         WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1390                         ci->i_head_snapc = ceph_get_snap_context(
1391                                 ci->i_snap_realm->cached_context);
1392                 }
1393                 dout(" inode %p now dirty snapc %p auth cap %p\n",
1394                      &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1395                 BUG_ON(!list_empty(&ci->i_dirty_item));
1396                 spin_lock(&mdsc->cap_dirty_lock);
1397                 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1398                 spin_unlock(&mdsc->cap_dirty_lock);
1399                 if (ci->i_flushing_caps == 0) {
1400                         ihold(inode);
1401                         dirty |= I_DIRTY_SYNC;
1402                 }
1403         } else {
1404                 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1405         }
1406         BUG_ON(list_empty(&ci->i_dirty_item));
1407         if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1408             (mask & CEPH_CAP_FILE_BUFFER))
1409                 dirty |= I_DIRTY_DATASYNC;
1410         __cap_delay_requeue(mdsc, ci);
1411         return dirty;
1412 }
1413 
1414 static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
1415                                         struct ceph_cap_flush *cf)
1416 {
1417         struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
1418         struct rb_node *parent = NULL;
1419         struct ceph_cap_flush *other = NULL;
1420 
1421         while (*p) {
1422                 parent = *p;
1423                 other = rb_entry(parent, struct ceph_cap_flush, i_node);
1424 
1425                 if (cf->tid < other->tid)
1426                         p = &(*p)->rb_left;
1427                 else if (cf->tid > other->tid)
1428                         p = &(*p)->rb_right;
1429                 else
1430                         BUG();
1431         }
1432 
1433         rb_link_node(&cf->i_node, parent, p);
1434         rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
1435 }
1436 
1437 static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
1438                                        struct ceph_cap_flush *cf)
1439 {
1440         struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
1441         struct rb_node *parent = NULL;
1442         struct ceph_cap_flush *other = NULL;
1443 
1444         while (*p) {
1445                 parent = *p;
1446                 other = rb_entry(parent, struct ceph_cap_flush, g_node);
1447 
1448                 if (cf->tid < other->tid)
1449                         p = &(*p)->rb_left;
1450                 else if (cf->tid > other->tid)
1451                         p = &(*p)->rb_right;
1452                 else
1453                         BUG();
1454         }
1455 
1456         rb_link_node(&cf->g_node, parent, p);
1457         rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
1458 }
1459 
1460 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1461 {
1462         return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1463 }
1464 
1465 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1466 {
1467         if (cf)
1468                 kmem_cache_free(ceph_cap_flush_cachep, cf);
1469 }
1470 
1471 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1472 {
1473         struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
1474         if (n) {
1475                 struct ceph_cap_flush *cf =
1476                         rb_entry(n, struct ceph_cap_flush, g_node);
1477                 return cf->tid;
1478         }
1479         return 0;
1480 }
1481 
1482 /*
1483  * Add dirty inode to the flushing list.  Assigned a seq number so we
1484  * can wait for caps to flush without starving.
1485  *
1486  * Called under i_ceph_lock.
1487  */
1488 static int __mark_caps_flushing(struct inode *inode,
1489                                 struct ceph_mds_session *session,
1490                                 u64 *flush_tid, u64 *oldest_flush_tid)
1491 {
1492         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1493         struct ceph_inode_info *ci = ceph_inode(inode);
1494         struct ceph_cap_flush *cf = NULL;
1495         int flushing;
1496 
1497         BUG_ON(ci->i_dirty_caps == 0);
1498         BUG_ON(list_empty(&ci->i_dirty_item));
1499         BUG_ON(!ci->i_prealloc_cap_flush);
1500 
1501         flushing = ci->i_dirty_caps;
1502         dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1503              ceph_cap_string(flushing),
1504              ceph_cap_string(ci->i_flushing_caps),
1505              ceph_cap_string(ci->i_flushing_caps | flushing));
1506         ci->i_flushing_caps |= flushing;
1507         ci->i_dirty_caps = 0;
1508         dout(" inode %p now !dirty\n", inode);
1509 
1510         swap(cf, ci->i_prealloc_cap_flush);
1511         cf->caps = flushing;
1512 
1513         spin_lock(&mdsc->cap_dirty_lock);
1514         list_del_init(&ci->i_dirty_item);
1515 
1516         cf->tid = ++mdsc->last_cap_flush_tid;
1517         __add_cap_flushing_to_mdsc(mdsc, cf);
1518         *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1519 
1520         if (list_empty(&ci->i_flushing_item)) {
1521                 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1522                 mdsc->num_cap_flushing++;
1523                 dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
1524         } else {
1525                 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1526                 dout(" inode %p now flushing (more) tid %llu\n",
1527                      inode, cf->tid);
1528         }
1529         spin_unlock(&mdsc->cap_dirty_lock);
1530 
1531         __add_cap_flushing_to_inode(ci, cf);
1532 
1533         *flush_tid = cf->tid;
1534         return flushing;
1535 }
1536 
1537 /*
1538  * try to invalidate mapping pages without blocking.
1539  */
1540 static int try_nonblocking_invalidate(struct inode *inode)
1541 {
1542         struct ceph_inode_info *ci = ceph_inode(inode);
1543         u32 invalidating_gen = ci->i_rdcache_gen;
1544 
1545         spin_unlock(&ci->i_ceph_lock);
1546         invalidate_mapping_pages(&inode->i_data, 0, -1);
1547         spin_lock(&ci->i_ceph_lock);
1548 
1549         if (inode->i_data.nrpages == 0 &&
1550             invalidating_gen == ci->i_rdcache_gen) {
1551                 /* success. */
1552                 dout("try_nonblocking_invalidate %p success\n", inode);
1553                 /* save any racing async invalidate some trouble */
1554                 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1555                 return 0;
1556         }
1557         dout("try_nonblocking_invalidate %p failed\n", inode);
1558         return -1;
1559 }
1560 
1561 /*
1562  * Swiss army knife function to examine currently used and wanted
1563  * versus held caps.  Release, flush, ack revoked caps to mds as
1564  * appropriate.
1565  *
1566  *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1567  *    cap release further.
1568  *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1569  *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1570  *    further delay.
1571  */
1572 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1573                      struct ceph_mds_session *session)
1574 {
1575         struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1576         struct ceph_mds_client *mdsc = fsc->mdsc;
1577         struct inode *inode = &ci->vfs_inode;
1578         struct ceph_cap *cap;
1579         u64 flush_tid, oldest_flush_tid;
1580         int file_wanted, used, cap_used;
1581         int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1582         int issued, implemented, want, retain, revoking, flushing = 0;
1583         int mds = -1;   /* keep track of how far we've gone through i_caps list
1584                            to avoid an infinite loop on retry */
1585         struct rb_node *p;
1586         int tried_invalidate = 0;
1587         int delayed = 0, sent = 0, force_requeue = 0, num;
1588         int queue_invalidate = 0;
1589         int is_delayed = flags & CHECK_CAPS_NODELAY;
1590 
1591         /* if we are unmounting, flush any unused caps immediately. */
1592         if (mdsc->stopping)
1593                 is_delayed = 1;
1594 
1595         spin_lock(&ci->i_ceph_lock);
1596 
1597         if (ci->i_ceph_flags & CEPH_I_FLUSH)
1598                 flags |= CHECK_CAPS_FLUSH;
1599 
1600         /* flush snaps first time around only */
1601         if (!list_empty(&ci->i_cap_snaps))
1602                 __ceph_flush_snaps(ci, &session, 0);
1603         goto retry_locked;
1604 retry:
1605         spin_lock(&ci->i_ceph_lock);
1606 retry_locked:
1607         file_wanted = __ceph_caps_file_wanted(ci);
1608         used = __ceph_caps_used(ci);
1609         issued = __ceph_caps_issued(ci, &implemented);
1610         revoking = implemented & ~issued;
1611 
1612         want = file_wanted;
1613         retain = file_wanted | used | CEPH_CAP_PIN;
1614         if (!mdsc->stopping && inode->i_nlink > 0) {
1615                 if (file_wanted) {
1616                         retain |= CEPH_CAP_ANY;       /* be greedy */
1617                 } else if (S_ISDIR(inode->i_mode) &&
1618                            (issued & CEPH_CAP_FILE_SHARED) &&
1619                             __ceph_dir_is_complete(ci)) {
1620                         /*
1621                          * If a directory is complete, we want to keep
1622                          * the exclusive cap. So that MDS does not end up
1623                          * revoking the shared cap on every create/unlink
1624                          * operation.
1625                          */
1626                         want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1627                         retain |= want;
1628                 } else {
1629 
1630                         retain |= CEPH_CAP_ANY_SHARED;
1631                         /*
1632                          * keep RD only if we didn't have the file open RW,
1633                          * because then the mds would revoke it anyway to
1634                          * journal max_size=0.
1635                          */
1636                         if (ci->i_max_size == 0)
1637                                 retain |= CEPH_CAP_ANY_RD;
1638                 }
1639         }
1640 
1641         dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1642              " issued %s revoking %s retain %s %s%s%s\n", inode,
1643              ceph_cap_string(file_wanted),
1644              ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1645              ceph_cap_string(ci->i_flushing_caps),
1646              ceph_cap_string(issued), ceph_cap_string(revoking),
1647              ceph_cap_string(retain),
1648              (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1649              (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1650              (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1651 
1652         /*
1653          * If we no longer need to hold onto old our caps, and we may
1654          * have cached pages, but don't want them, then try to invalidate.
1655          * If we fail, it's because pages are locked.... try again later.
1656          */
1657         if ((!is_delayed || mdsc->stopping) &&
1658             !S_ISDIR(inode->i_mode) &&          /* ignore readdir cache */
1659             !(ci->i_wb_ref || ci->i_wrbuffer_ref) &&   /* no dirty pages... */
1660             inode->i_data.nrpages &&            /* have cached pages */
1661             (revoking & (CEPH_CAP_FILE_CACHE|
1662                          CEPH_CAP_FILE_LAZYIO)) && /*  or revoking cache */
1663             !tried_invalidate) {
1664                 dout("check_caps trying to invalidate on %p\n", inode);
1665                 if (try_nonblocking_invalidate(inode) < 0) {
1666                         if (revoking & (CEPH_CAP_FILE_CACHE|
1667                                         CEPH_CAP_FILE_LAZYIO)) {
1668                                 dout("check_caps queuing invalidate\n");
1669                                 queue_invalidate = 1;
1670                                 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1671                         } else {
1672                                 dout("check_caps failed to invalidate pages\n");
1673                                 /* we failed to invalidate pages.  check these
1674                                    caps again later. */
1675                                 force_requeue = 1;
1676                                 __cap_set_timeouts(mdsc, ci);
1677                         }
1678                 }
1679                 tried_invalidate = 1;
1680                 goto retry_locked;
1681         }
1682 
1683         num = 0;
1684         for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1685                 cap = rb_entry(p, struct ceph_cap, ci_node);
1686                 num++;
1687 
1688                 /* avoid looping forever */
1689                 if (mds >= cap->mds ||
1690                     ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1691                         continue;
1692 
1693                 /* NOTE: no side-effects allowed, until we take s_mutex */
1694 
1695                 cap_used = used;
1696                 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1697                         cap_used &= ~ci->i_auth_cap->issued;
1698 
1699                 revoking = cap->implemented & ~cap->issued;
1700                 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1701                      cap->mds, cap, ceph_cap_string(cap_used),
1702                      ceph_cap_string(cap->issued),
1703                      ceph_cap_string(cap->implemented),
1704                      ceph_cap_string(revoking));
1705 
1706                 if (cap == ci->i_auth_cap &&
1707                     (cap->issued & CEPH_CAP_FILE_WR)) {
1708                         /* request larger max_size from MDS? */
1709                         if (ci->i_wanted_max_size > ci->i_max_size &&
1710                             ci->i_wanted_max_size > ci->i_requested_max_size) {
1711                                 dout("requesting new max_size\n");
1712                                 goto ack;
1713                         }
1714 
1715                         /* approaching file_max? */
1716                         if ((inode->i_size << 1) >= ci->i_max_size &&
1717                             (ci->i_reported_size << 1) < ci->i_max_size) {
1718                                 dout("i_size approaching max_size\n");
1719                                 goto ack;
1720                         }
1721                 }
1722                 /* flush anything dirty? */
1723                 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1724                     ci->i_dirty_caps) {
1725                         dout("flushing dirty caps\n");
1726                         goto ack;
1727                 }
1728 
1729                 /* completed revocation? going down and there are no caps? */
1730                 if (revoking && (revoking & cap_used) == 0) {
1731                         dout("completed revocation of %s\n",
1732                              ceph_cap_string(cap->implemented & ~cap->issued));
1733                         goto ack;
1734                 }
1735 
1736                 /* want more caps from mds? */
1737                 if (want & ~(cap->mds_wanted | cap->issued))
1738                         goto ack;
1739 
1740                 /* things we might delay */
1741                 if ((cap->issued & ~retain) == 0 &&
1742                     cap->mds_wanted == want)
1743                         continue;     /* nope, all good */
1744 
1745                 if (is_delayed)
1746                         goto ack;
1747 
1748                 /* delay? */
1749                 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1750                     time_before(jiffies, ci->i_hold_caps_max)) {
1751                         dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1752                              ceph_cap_string(cap->issued),
1753                              ceph_cap_string(cap->issued & retain),
1754                              ceph_cap_string(cap->mds_wanted),
1755                              ceph_cap_string(want));
1756                         delayed++;
1757                         continue;
1758                 }
1759 
1760 ack:
1761                 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1762                         dout(" skipping %p I_NOFLUSH set\n", inode);
1763                         continue;
1764                 }
1765 
1766                 if (session && session != cap->session) {
1767                         dout("oops, wrong session %p mutex\n", session);
1768                         mutex_unlock(&session->s_mutex);
1769                         session = NULL;
1770                 }
1771                 if (!session) {
1772                         session = cap->session;
1773                         if (mutex_trylock(&session->s_mutex) == 0) {
1774                                 dout("inverting session/ino locks on %p\n",
1775                                      session);
1776                                 spin_unlock(&ci->i_ceph_lock);
1777                                 if (took_snap_rwsem) {
1778                                         up_read(&mdsc->snap_rwsem);
1779                                         took_snap_rwsem = 0;
1780                                 }
1781                                 mutex_lock(&session->s_mutex);
1782                                 goto retry;
1783                         }
1784                 }
1785                 /* take snap_rwsem after session mutex */
1786                 if (!took_snap_rwsem) {
1787                         if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1788                                 dout("inverting snap/in locks on %p\n",
1789                                      inode);
1790                                 spin_unlock(&ci->i_ceph_lock);
1791                                 down_read(&mdsc->snap_rwsem);
1792                                 took_snap_rwsem = 1;
1793                                 goto retry;
1794                         }
1795                         took_snap_rwsem = 1;
1796                 }
1797 
1798                 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1799                         flushing = __mark_caps_flushing(inode, session,
1800                                                         &flush_tid,
1801                                                         &oldest_flush_tid);
1802                 } else {
1803                         flushing = 0;
1804                         flush_tid = 0;
1805                         spin_lock(&mdsc->cap_dirty_lock);
1806                         oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1807                         spin_unlock(&mdsc->cap_dirty_lock);
1808                 }
1809 
1810                 mds = cap->mds;  /* remember mds, so we don't repeat */
1811                 sent++;
1812 
1813                 /* __send_cap drops i_ceph_lock */
1814                 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1815                                       want, retain, flushing,
1816                                       flush_tid, oldest_flush_tid);
1817                 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1818         }
1819 
1820         /*
1821          * Reschedule delayed caps release if we delayed anything,
1822          * otherwise cancel.
1823          */
1824         if (delayed && is_delayed)
1825                 force_requeue = 1;   /* __send_cap delayed release; requeue */
1826         if (!delayed && !is_delayed)
1827                 __cap_delay_cancel(mdsc, ci);
1828         else if (!is_delayed || force_requeue)
1829                 __cap_delay_requeue(mdsc, ci);
1830 
1831         spin_unlock(&ci->i_ceph_lock);
1832 
1833         if (queue_invalidate)
1834                 ceph_queue_invalidate(inode);
1835 
1836         if (session)
1837                 mutex_unlock(&session->s_mutex);
1838         if (took_snap_rwsem)
1839                 up_read(&mdsc->snap_rwsem);
1840 }
1841 
1842 /*
1843  * Try to flush dirty caps back to the auth mds.
1844  */
1845 static int try_flush_caps(struct inode *inode, u64 *ptid)
1846 {
1847         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1848         struct ceph_inode_info *ci = ceph_inode(inode);
1849         struct ceph_mds_session *session = NULL;
1850         int flushing = 0;
1851         u64 flush_tid = 0, oldest_flush_tid = 0;
1852 
1853 retry:
1854         spin_lock(&ci->i_ceph_lock);
1855         if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1856                 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1857                 goto out;
1858         }
1859         if (ci->i_dirty_caps && ci->i_auth_cap) {
1860                 struct ceph_cap *cap = ci->i_auth_cap;
1861                 int used = __ceph_caps_used(ci);
1862                 int want = __ceph_caps_wanted(ci);
1863                 int delayed;
1864 
1865                 if (!session || session != cap->session) {
1866                         spin_unlock(&ci->i_ceph_lock);
1867                         if (session)
1868                                 mutex_unlock(&session->s_mutex);
1869                         session = cap->session;
1870                         mutex_lock(&session->s_mutex);
1871                         goto retry;
1872                 }
1873                 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1874                         goto out;
1875 
1876                 flushing = __mark_caps_flushing(inode, session, &flush_tid,
1877                                                 &oldest_flush_tid);
1878 
1879                 /* __send_cap drops i_ceph_lock */
1880                 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1881                                      (cap->issued | cap->implemented),
1882                                      flushing, flush_tid, oldest_flush_tid);
1883 
1884                 if (delayed) {
1885                         spin_lock(&ci->i_ceph_lock);
1886                         __cap_delay_requeue(mdsc, ci);
1887                         spin_unlock(&ci->i_ceph_lock);
1888                 }
1889         } else {
1890                 struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
1891                 if (n) {
1892                         struct ceph_cap_flush *cf =
1893                                 rb_entry(n, struct ceph_cap_flush, i_node);
1894                         flush_tid = cf->tid;
1895                 }
1896                 flushing = ci->i_flushing_caps;
1897                 spin_unlock(&ci->i_ceph_lock);
1898         }
1899 out:
1900         if (session)
1901                 mutex_unlock(&session->s_mutex);
1902 
1903         *ptid = flush_tid;
1904         return flushing;
1905 }
1906 
1907 /*
1908  * Return true if we've flushed caps through the given flush_tid.
1909  */
1910 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
1911 {
1912         struct ceph_inode_info *ci = ceph_inode(inode);
1913         struct ceph_cap_flush *cf;
1914         struct rb_node *n;
1915         int ret = 1;
1916 
1917         spin_lock(&ci->i_ceph_lock);
1918         n = rb_first(&ci->i_cap_flush_tree);
1919         if (n) {
1920                 cf = rb_entry(n, struct ceph_cap_flush, i_node);
1921                 if (cf->tid <= flush_tid)
1922                         ret = 0;
1923         }
1924         spin_unlock(&ci->i_ceph_lock);
1925         return ret;
1926 }
1927 
1928 /*
1929  * Wait on any unsafe replies for the given inode.  First wait on the
1930  * newest request, and make that the upper bound.  Then, if there are
1931  * more requests, keep waiting on the oldest as long as it is still older
1932  * than the original request.
1933  */
1934 static void sync_write_wait(struct inode *inode)
1935 {
1936         struct ceph_inode_info *ci = ceph_inode(inode);
1937         struct list_head *head = &ci->i_unsafe_writes;
1938         struct ceph_osd_request *req;
1939         u64 last_tid;
1940 
1941         if (!S_ISREG(inode->i_mode))
1942                 return;
1943 
1944         spin_lock(&ci->i_unsafe_lock);
1945         if (list_empty(head))
1946                 goto out;
1947 
1948         /* set upper bound as _last_ entry in chain */
1949         req = list_last_entry(head, struct ceph_osd_request,
1950                               r_unsafe_item);
1951         last_tid = req->r_tid;
1952 
1953         do {
1954                 ceph_osdc_get_request(req);
1955                 spin_unlock(&ci->i_unsafe_lock);
1956                 dout("sync_write_wait on tid %llu (until %llu)\n",
1957                      req->r_tid, last_tid);
1958                 wait_for_completion(&req->r_safe_completion);
1959                 spin_lock(&ci->i_unsafe_lock);
1960                 ceph_osdc_put_request(req);
1961 
1962                 /*
1963                  * from here on look at first entry in chain, since we
1964                  * only want to wait for anything older than last_tid
1965                  */
1966                 if (list_empty(head))
1967                         break;
1968                 req = list_first_entry(head, struct ceph_osd_request,
1969                                        r_unsafe_item);
1970         } while (req->r_tid < last_tid);
1971 out:
1972         spin_unlock(&ci->i_unsafe_lock);
1973 }
1974 
1975 /*
1976  * wait for any unsafe requests to complete.
1977  */
1978 static int unsafe_request_wait(struct inode *inode)
1979 {
1980         struct ceph_inode_info *ci = ceph_inode(inode);
1981         struct ceph_mds_request *req1 = NULL, *req2 = NULL;
1982         int ret, err = 0;
1983 
1984         spin_lock(&ci->i_unsafe_lock);
1985         if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
1986                 req1 = list_last_entry(&ci->i_unsafe_dirops,
1987                                         struct ceph_mds_request,
1988                                         r_unsafe_dir_item);
1989                 ceph_mdsc_get_request(req1);
1990         }
1991         if (!list_empty(&ci->i_unsafe_iops)) {
1992                 req2 = list_last_entry(&ci->i_unsafe_iops,
1993                                         struct ceph_mds_request,
1994                                         r_unsafe_target_item);
1995                 ceph_mdsc_get_request(req2);
1996         }
1997         spin_unlock(&ci->i_unsafe_lock);
1998 
1999         dout("unsafe_requeset_wait %p wait on tid %llu %llu\n",
2000              inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2001         if (req1) {
2002                 ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2003                                         ceph_timeout_jiffies(req1->r_timeout));
2004                 if (ret)
2005                         err = -EIO;
2006                 ceph_mdsc_put_request(req1);
2007         }
2008         if (req2) {
2009                 ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2010                                         ceph_timeout_jiffies(req2->r_timeout));
2011                 if (ret)
2012                         err = -EIO;
2013                 ceph_mdsc_put_request(req2);
2014         }
2015         return err;
2016 }
2017 
2018 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2019 {
2020         struct inode *inode = file->f_mapping->host;
2021         struct ceph_inode_info *ci = ceph_inode(inode);
2022         u64 flush_tid;
2023         int ret;
2024         int dirty;
2025 
2026         dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2027         sync_write_wait(inode);
2028 
2029         ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
2030         if (ret < 0)
2031                 goto out;
2032 
2033         if (datasync)
2034                 goto out;
2035 
2036         inode_lock(inode);
2037 
2038         dirty = try_flush_caps(inode, &flush_tid);
2039         dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2040 
2041         ret = unsafe_request_wait(inode);
2042 
2043         /*
2044          * only wait on non-file metadata writeback (the mds
2045          * can recover size and mtime, so we don't need to
2046          * wait for that)
2047          */
2048         if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2049                 ret = wait_event_interruptible(ci->i_cap_wq,
2050                                         caps_are_flushed(inode, flush_tid));
2051         }
2052         inode_unlock(inode);
2053 out:
2054         dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2055         return ret;
2056 }
2057 
2058 /*
2059  * Flush any dirty caps back to the mds.  If we aren't asked to wait,
2060  * queue inode for flush but don't do so immediately, because we can
2061  * get by with fewer MDS messages if we wait for data writeback to
2062  * complete first.
2063  */
2064 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2065 {
2066         struct ceph_inode_info *ci = ceph_inode(inode);
2067         u64 flush_tid;
2068         int err = 0;
2069         int dirty;
2070         int wait = wbc->sync_mode == WB_SYNC_ALL;
2071 
2072         dout("write_inode %p wait=%d\n", inode, wait);
2073         if (wait) {
2074                 dirty = try_flush_caps(inode, &flush_tid);
2075                 if (dirty)
2076                         err = wait_event_interruptible(ci->i_cap_wq,
2077                                        caps_are_flushed(inode, flush_tid));
2078         } else {
2079                 struct ceph_mds_client *mdsc =
2080                         ceph_sb_to_client(inode->i_sb)->mdsc;
2081 
2082                 spin_lock(&ci->i_ceph_lock);
2083                 if (__ceph_caps_dirty(ci))
2084                         __cap_delay_requeue_front(mdsc, ci);
2085                 spin_unlock(&ci->i_ceph_lock);
2086         }
2087         return err;
2088 }
2089 
2090 /*
2091  * After a recovering MDS goes active, we need to resend any caps
2092  * we were flushing.
2093  *
2094  * Caller holds session->s_mutex.
2095  */
2096 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2097                                    struct ceph_mds_session *session)
2098 {
2099         struct ceph_cap_snap *capsnap;
2100 
2101         dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
2102         list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
2103                             flushing_item) {
2104                 struct ceph_inode_info *ci = capsnap->ci;
2105                 struct inode *inode = &ci->vfs_inode;
2106                 struct ceph_cap *cap;
2107 
2108                 spin_lock(&ci->i_ceph_lock);
2109                 cap = ci->i_auth_cap;
2110                 if (cap && cap->session == session) {
2111                         dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
2112                              cap, capsnap);
2113                         __ceph_flush_snaps(ci, &session, 1);
2114                 } else {
2115                         pr_err("%p auth cap %p not mds%d ???\n", inode,
2116                                cap, session->s_mds);
2117                 }
2118                 spin_unlock(&ci->i_ceph_lock);
2119         }
2120 }
2121 
2122 static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2123                                 struct ceph_mds_session *session,
2124                                 struct ceph_inode_info *ci)
2125 {
2126         struct inode *inode = &ci->vfs_inode;
2127         struct ceph_cap *cap;
2128         struct ceph_cap_flush *cf;
2129         struct rb_node *n;
2130         int delayed = 0;
2131         u64 first_tid = 0;
2132         u64 oldest_flush_tid;
2133 
2134         spin_lock(&mdsc->cap_dirty_lock);
2135         oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2136         spin_unlock(&mdsc->cap_dirty_lock);
2137 
2138         while (true) {
2139                 spin_lock(&ci->i_ceph_lock);
2140                 cap = ci->i_auth_cap;
2141                 if (!(cap && cap->session == session)) {
2142                         pr_err("%p auth cap %p not mds%d ???\n", inode,
2143                                         cap, session->s_mds);
2144                         spin_unlock(&ci->i_ceph_lock);
2145                         break;
2146                 }
2147 
2148                 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2149                         cf = rb_entry(n, struct ceph_cap_flush, i_node);
2150                         if (cf->tid >= first_tid)
2151                                 break;
2152                 }
2153                 if (!n) {
2154                         spin_unlock(&ci->i_ceph_lock);
2155                         break;
2156                 }
2157 
2158                 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2159 
2160                 first_tid = cf->tid + 1;
2161 
2162                 dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
2163                      cap, cf->tid, ceph_cap_string(cf->caps));
2164                 delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2165                                       __ceph_caps_used(ci),
2166                                       __ceph_caps_wanted(ci),
2167                                       cap->issued | cap->implemented,
2168                                       cf->caps, cf->tid, oldest_flush_tid);
2169         }
2170         return delayed;
2171 }
2172 
2173 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2174                                    struct ceph_mds_session *session)
2175 {
2176         struct ceph_inode_info *ci;
2177         struct ceph_cap *cap;
2178 
2179         dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2180         list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2181                 spin_lock(&ci->i_ceph_lock);
2182                 cap = ci->i_auth_cap;
2183                 if (!(cap && cap->session == session)) {
2184                         pr_err("%p auth cap %p not mds%d ???\n",
2185                                 &ci->vfs_inode, cap, session->s_mds);
2186                         spin_unlock(&ci->i_ceph_lock);
2187                         continue;
2188                 }
2189 
2190 
2191                 /*
2192                  * if flushing caps were revoked, we re-send the cap flush
2193                  * in client reconnect stage. This guarantees MDS * processes
2194                  * the cap flush message before issuing the flushing caps to
2195                  * other client.
2196                  */
2197                 if ((cap->issued & ci->i_flushing_caps) !=
2198                     ci->i_flushing_caps) {
2199                         spin_unlock(&ci->i_ceph_lock);
2200                         if (!__kick_flushing_caps(mdsc, session, ci))
2201                                 continue;
2202                         spin_lock(&ci->i_ceph_lock);
2203                 }
2204 
2205                 spin_unlock(&ci->i_ceph_lock);
2206         }
2207 }
2208 
2209 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2210                              struct ceph_mds_session *session)
2211 {
2212         struct ceph_inode_info *ci;
2213 
2214         kick_flushing_capsnaps(mdsc, session);
2215 
2216         dout("kick_flushing_caps mds%d\n", session->s_mds);
2217         list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2218                 int delayed = __kick_flushing_caps(mdsc, session, ci);
2219                 if (delayed) {
2220                         spin_lock(&ci->i_ceph_lock);
2221                         __cap_delay_requeue(mdsc, ci);
2222                         spin_unlock(&ci->i_ceph_lock);
2223                 }
2224         }
2225 }
2226 
2227 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2228                                      struct ceph_mds_session *session,
2229                                      struct inode *inode)
2230 {
2231         struct ceph_inode_info *ci = ceph_inode(inode);
2232         struct ceph_cap *cap;
2233 
2234         spin_lock(&ci->i_ceph_lock);
2235         cap = ci->i_auth_cap;
2236         dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2237              ceph_cap_string(ci->i_flushing_caps));
2238 
2239         __ceph_flush_snaps(ci, &session, 1);
2240 
2241         if (ci->i_flushing_caps) {
2242                 int delayed;
2243 
2244                 spin_lock(&mdsc->cap_dirty_lock);
2245                 list_move_tail(&ci->i_flushing_item,
2246                                &cap->session->s_cap_flushing);
2247                 spin_unlock(&mdsc->cap_dirty_lock);
2248 
2249                 spin_unlock(&ci->i_ceph_lock);
2250 
2251                 delayed = __kick_flushing_caps(mdsc, session, ci);
2252                 if (delayed) {
2253                         spin_lock(&ci->i_ceph_lock);
2254                         __cap_delay_requeue(mdsc, ci);
2255                         spin_unlock(&ci->i_ceph_lock);
2256                 }
2257         } else {
2258                 spin_unlock(&ci->i_ceph_lock);
2259         }
2260 }
2261 
2262 
2263 /*
2264  * Take references to capabilities we hold, so that we don't release
2265  * them to the MDS prematurely.
2266  *
2267  * Protected by i_ceph_lock.
2268  */
2269 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2270                             bool snap_rwsem_locked)
2271 {
2272         if (got & CEPH_CAP_PIN)
2273                 ci->i_pin_ref++;
2274         if (got & CEPH_CAP_FILE_RD)
2275                 ci->i_rd_ref++;
2276         if (got & CEPH_CAP_FILE_CACHE)
2277                 ci->i_rdcache_ref++;
2278         if (got & CEPH_CAP_FILE_WR) {
2279                 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2280                         BUG_ON(!snap_rwsem_locked);
2281                         ci->i_head_snapc = ceph_get_snap_context(
2282                                         ci->i_snap_realm->cached_context);
2283                 }
2284                 ci->i_wr_ref++;
2285         }
2286         if (got & CEPH_CAP_FILE_BUFFER) {
2287                 if (ci->i_wb_ref == 0)
2288                         ihold(&ci->vfs_inode);
2289                 ci->i_wb_ref++;
2290                 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2291                      &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2292         }
2293 }
2294 
2295 /*
2296  * Try to grab cap references.  Specify those refs we @want, and the
2297  * minimal set we @need.  Also include the larger offset we are writing
2298  * to (when applicable), and check against max_size here as well.
2299  * Note that caller is responsible for ensuring max_size increases are
2300  * requested from the MDS.
2301  */
2302 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2303                             loff_t endoff, bool nonblock, int *got, int *err)
2304 {
2305         struct inode *inode = &ci->vfs_inode;
2306         struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2307         int ret = 0;
2308         int have, implemented;
2309         int file_wanted;
2310         bool snap_rwsem_locked = false;
2311 
2312         dout("get_cap_refs %p need %s want %s\n", inode,
2313              ceph_cap_string(need), ceph_cap_string(want));
2314 
2315 again:
2316         spin_lock(&ci->i_ceph_lock);
2317 
2318         /* make sure file is actually open */
2319         file_wanted = __ceph_caps_file_wanted(ci);
2320         if ((file_wanted & need) != need) {
2321                 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2322                      ceph_cap_string(need), ceph_cap_string(file_wanted));
2323                 *err = -EBADF;
2324                 ret = 1;
2325                 goto out_unlock;
2326         }
2327 
2328         /* finish pending truncate */
2329         while (ci->i_truncate_pending) {
2330                 spin_unlock(&ci->i_ceph_lock);
2331                 if (snap_rwsem_locked) {
2332                         up_read(&mdsc->snap_rwsem);
2333                         snap_rwsem_locked = false;
2334                 }
2335                 __ceph_do_pending_vmtruncate(inode);
2336                 spin_lock(&ci->i_ceph_lock);
2337         }
2338 
2339         have = __ceph_caps_issued(ci, &implemented);
2340 
2341         if (have & need & CEPH_CAP_FILE_WR) {
2342                 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2343                         dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2344                              inode, endoff, ci->i_max_size);
2345                         if (endoff > ci->i_requested_max_size) {
2346                                 *err = -EAGAIN;
2347                                 ret = 1;
2348                         }
2349                         goto out_unlock;
2350                 }
2351                 /*
2352                  * If a sync write is in progress, we must wait, so that we
2353                  * can get a final snapshot value for size+mtime.
2354                  */
2355                 if (__ceph_have_pending_cap_snap(ci)) {
2356                         dout("get_cap_refs %p cap_snap_pending\n", inode);
2357                         goto out_unlock;
2358                 }
2359         }
2360 
2361         if ((have & need) == need) {
2362                 /*
2363                  * Look at (implemented & ~have & not) so that we keep waiting
2364                  * on transition from wanted -> needed caps.  This is needed
2365                  * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2366                  * going before a prior buffered writeback happens.
2367                  */
2368                 int not = want & ~(have & need);
2369                 int revoking = implemented & ~have;
2370                 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2371                      inode, ceph_cap_string(have), ceph_cap_string(not),
2372                      ceph_cap_string(revoking));
2373                 if ((revoking & not) == 0) {
2374                         if (!snap_rwsem_locked &&
2375                             !ci->i_head_snapc &&
2376                             (need & CEPH_CAP_FILE_WR)) {
2377                                 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2378                                         /*
2379                                          * we can not call down_read() when
2380                                          * task isn't in TASK_RUNNING state
2381                                          */
2382                                         if (nonblock) {
2383                                                 *err = -EAGAIN;
2384                                                 ret = 1;
2385                                                 goto out_unlock;
2386                                         }
2387 
2388                                         spin_unlock(&ci->i_ceph_lock);
2389                                         down_read(&mdsc->snap_rwsem);
2390                                         snap_rwsem_locked = true;
2391                                         goto again;
2392                                 }
2393                                 snap_rwsem_locked = true;
2394                         }
2395                         *got = need | (have & want);
2396                         if ((need & CEPH_CAP_FILE_RD) &&
2397                             !(*got & CEPH_CAP_FILE_CACHE))
2398                                 ceph_disable_fscache_readpage(ci);
2399                         __take_cap_refs(ci, *got, true);
2400                         ret = 1;
2401                 }
2402         } else {
2403                 int session_readonly = false;
2404                 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2405                         struct ceph_mds_session *s = ci->i_auth_cap->session;
2406                         spin_lock(&s->s_cap_lock);
2407                         session_readonly = s->s_readonly;
2408                         spin_unlock(&s->s_cap_lock);
2409                 }
2410                 if (session_readonly) {
2411                         dout("get_cap_refs %p needed %s but mds%d readonly\n",
2412                              inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2413                         *err = -EROFS;
2414                         ret = 1;
2415                         goto out_unlock;
2416                 }
2417 
2418                 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
2419                         int mds_wanted;
2420                         if (ACCESS_ONCE(mdsc->fsc->mount_state) ==
2421                             CEPH_MOUNT_SHUTDOWN) {
2422                                 dout("get_cap_refs %p forced umount\n", inode);
2423                                 *err = -EIO;
2424                                 ret = 1;
2425                                 goto out_unlock;
2426                         }
2427                         mds_wanted = __ceph_caps_mds_wanted(ci);
2428                         if ((mds_wanted & need) != need) {
2429                                 dout("get_cap_refs %p caps were dropped"
2430                                      " (session killed?)\n", inode);
2431                                 *err = -ESTALE;
2432                                 ret = 1;
2433                                 goto out_unlock;
2434                         }
2435                         if ((mds_wanted & file_wanted) ==
2436                             (file_wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR)))
2437                                 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
2438                 }
2439 
2440                 dout("get_cap_refs %p have %s needed %s\n", inode,
2441                      ceph_cap_string(have), ceph_cap_string(need));
2442         }
2443 out_unlock:
2444         spin_unlock(&ci->i_ceph_lock);
2445         if (snap_rwsem_locked)
2446                 up_read(&mdsc->snap_rwsem);
2447 
2448         dout("get_cap_refs %p ret %d got %s\n", inode,
2449              ret, ceph_cap_string(*got));
2450         return ret;
2451 }
2452 
2453 /*
2454  * Check the offset we are writing up to against our current
2455  * max_size.  If necessary, tell the MDS we want to write to
2456  * a larger offset.
2457  */
2458 static void check_max_size(struct inode *inode, loff_t endoff)
2459 {
2460         struct ceph_inode_info *ci = ceph_inode(inode);
2461         int check = 0;
2462 
2463         /* do we need to explicitly request a larger max_size? */
2464         spin_lock(&ci->i_ceph_lock);
2465         if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2466                 dout("write %p at large endoff %llu, req max_size\n",
2467                      inode, endoff);
2468                 ci->i_wanted_max_size = endoff;
2469         }
2470         /* duplicate ceph_check_caps()'s logic */
2471         if (ci->i_auth_cap &&
2472             (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2473             ci->i_wanted_max_size > ci->i_max_size &&
2474             ci->i_wanted_max_size > ci->i_requested_max_size)
2475                 check = 1;
2476         spin_unlock(&ci->i_ceph_lock);
2477         if (check)
2478                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2479 }
2480 
2481 /*
2482  * Wait for caps, and take cap references.  If we can't get a WR cap
2483  * due to a small max_size, make sure we check_max_size (and possibly
2484  * ask the mds) so we don't get hung up indefinitely.
2485  */
2486 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2487                   loff_t endoff, int *got, struct page **pinned_page)
2488 {
2489         int _got, ret, err = 0;
2490 
2491         ret = ceph_pool_perm_check(ci, need);
2492         if (ret < 0)
2493                 return ret;
2494 
2495         while (true) {
2496                 if (endoff > 0)
2497                         check_max_size(&ci->vfs_inode, endoff);
2498 
2499                 err = 0;
2500                 _got = 0;
2501                 ret = try_get_cap_refs(ci, need, want, endoff,
2502                                        false, &_got, &err);
2503                 if (ret) {
2504                         if (err == -EAGAIN)
2505                                 continue;
2506                         if (err < 0)
2507                                 ret = err;
2508                 } else {
2509                         ret = wait_event_interruptible(ci->i_cap_wq,
2510                                         try_get_cap_refs(ci, need, want, endoff,
2511                                                          true, &_got, &err));
2512                         if (err == -EAGAIN)
2513                                 continue;
2514                         if (err < 0)
2515                                 ret = err;
2516                 }
2517                 if (ret < 0) {
2518                         if (err == -ESTALE) {
2519                                 /* session was killed, try renew caps */
2520                                 ret = ceph_renew_caps(&ci->vfs_inode);
2521                                 if (ret == 0)
2522                                         continue;
2523                         }
2524                         return ret;
2525                 }
2526 
2527                 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2528                     (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2529                     i_size_read(&ci->vfs_inode) > 0) {
2530                         struct page *page =
2531                                 find_get_page(ci->vfs_inode.i_mapping, 0);
2532                         if (page) {
2533                                 if (PageUptodate(page)) {
2534                                         *pinned_page = page;
2535                                         break;
2536                                 }
2537                                 put_page(page);
2538                         }
2539                         /*
2540                          * drop cap refs first because getattr while
2541                          * holding * caps refs can cause deadlock.
2542                          */
2543                         ceph_put_cap_refs(ci, _got);
2544                         _got = 0;
2545 
2546                         /*
2547                          * getattr request will bring inline data into
2548                          * page cache
2549                          */
2550                         ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2551                                                 CEPH_STAT_CAP_INLINE_DATA,
2552                                                 true);
2553                         if (ret < 0)
2554                                 return ret;
2555                         continue;
2556                 }
2557                 break;
2558         }
2559 
2560         if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2561                 ceph_fscache_revalidate_cookie(ci);
2562 
2563         *got = _got;
2564         return 0;
2565 }
2566 
2567 /*
2568  * Take cap refs.  Caller must already know we hold at least one ref
2569  * on the caps in question or we don't know this is safe.
2570  */
2571 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2572 {
2573         spin_lock(&ci->i_ceph_lock);
2574         __take_cap_refs(ci, caps, false);
2575         spin_unlock(&ci->i_ceph_lock);
2576 }
2577 
2578 
2579 /*
2580  * drop cap_snap that is not associated with any snapshot.
2581  * we don't need to send FLUSHSNAP message for it.
2582  */
2583 static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
2584 {
2585         if (!capsnap->need_flush &&
2586             !capsnap->writing && !capsnap->dirty_pages) {
2587 
2588                 dout("dropping cap_snap %p follows %llu\n",
2589                      capsnap, capsnap->follows);
2590                 ceph_put_snap_context(capsnap->context);
2591                 list_del(&capsnap->ci_item);
2592                 list_del(&capsnap->flushing_item);
2593                 ceph_put_cap_snap(capsnap);
2594                 return 1;
2595         }
2596         return 0;
2597 }
2598 
2599 /*
2600  * Release cap refs.
2601  *
2602  * If we released the last ref on any given cap, call ceph_check_caps
2603  * to release (or schedule a release).
2604  *
2605  * If we are releasing a WR cap (from a sync write), finalize any affected
2606  * cap_snap, and wake up any waiters.
2607  */
2608 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2609 {
2610         struct inode *inode = &ci->vfs_inode;
2611         int last = 0, put = 0, flushsnaps = 0, wake = 0;
2612 
2613         spin_lock(&ci->i_ceph_lock);
2614         if (had & CEPH_CAP_PIN)
2615                 --ci->i_pin_ref;
2616         if (had & CEPH_CAP_FILE_RD)
2617                 if (--ci->i_rd_ref == 0)
2618                         last++;
2619         if (had & CEPH_CAP_FILE_CACHE)
2620                 if (--ci->i_rdcache_ref == 0)
2621                         last++;
2622         if (had & CEPH_CAP_FILE_BUFFER) {
2623                 if (--ci->i_wb_ref == 0) {
2624                         last++;
2625                         put++;
2626                 }
2627                 dout("put_cap_refs %p wb %d -> %d (?)\n",
2628                      inode, ci->i_wb_ref+1, ci->i_wb_ref);
2629         }
2630         if (had & CEPH_CAP_FILE_WR)
2631                 if (--ci->i_wr_ref == 0) {
2632                         last++;
2633                         if (__ceph_have_pending_cap_snap(ci)) {
2634                                 struct ceph_cap_snap *capsnap =
2635                                         list_last_entry(&ci->i_cap_snaps,
2636                                                         struct ceph_cap_snap,
2637                                                         ci_item);
2638                                 capsnap->writing = 0;
2639                                 if (ceph_try_drop_cap_snap(capsnap))
2640                                         put++;
2641                                 else if (__ceph_finish_cap_snap(ci, capsnap))
2642                                         flushsnaps = 1;
2643                                 wake = 1;
2644                         }
2645                         if (ci->i_wrbuffer_ref_head == 0 &&
2646                             ci->i_dirty_caps == 0 &&
2647                             ci->i_flushing_caps == 0) {
2648                                 BUG_ON(!ci->i_head_snapc);
2649                                 ceph_put_snap_context(ci->i_head_snapc);
2650                                 ci->i_head_snapc = NULL;
2651                         }
2652                         /* see comment in __ceph_remove_cap() */
2653                         if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2654                                 drop_inode_snap_realm(ci);
2655                 }
2656         spin_unlock(&ci->i_ceph_lock);
2657 
2658         dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2659              last ? " last" : "", put ? " put" : "");
2660 
2661         if (last && !flushsnaps)
2662                 ceph_check_caps(ci, 0, NULL);
2663         else if (flushsnaps)
2664                 ceph_flush_snaps(ci);
2665         if (wake)
2666                 wake_up_all(&ci->i_cap_wq);
2667         while (put-- > 0)
2668                 iput(inode);
2669 }
2670 
2671 /*
2672  * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2673  * context.  Adjust per-snap dirty page accounting as appropriate.
2674  * Once all dirty data for a cap_snap is flushed, flush snapped file
2675  * metadata back to the MDS.  If we dropped the last ref, call
2676  * ceph_check_caps.
2677  */
2678 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2679                                 struct ceph_snap_context *snapc)
2680 {
2681         struct inode *inode = &ci->vfs_inode;
2682         int last = 0;
2683         int complete_capsnap = 0;
2684         int drop_capsnap = 0;
2685         int found = 0;
2686         struct ceph_cap_snap *capsnap = NULL;
2687 
2688         spin_lock(&ci->i_ceph_lock);
2689         ci->i_wrbuffer_ref -= nr;
2690         last = !ci->i_wrbuffer_ref;
2691 
2692         if (ci->i_head_snapc == snapc) {
2693                 ci->i_wrbuffer_ref_head -= nr;
2694                 if (ci->i_wrbuffer_ref_head == 0 &&
2695                     ci->i_wr_ref == 0 &&
2696                     ci->i_dirty_caps == 0 &&
2697                     ci->i_flushing_caps == 0) {
2698                         BUG_ON(!ci->i_head_snapc);
2699                         ceph_put_snap_context(ci->i_head_snapc);
2700                         ci->i_head_snapc = NULL;
2701                 }
2702                 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2703                      inode,
2704                      ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2705                      ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2706                      last ? " LAST" : "");
2707         } else {
2708                 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2709                         if (capsnap->context == snapc) {
2710                                 found = 1;
2711                                 break;
2712                         }
2713                 }
2714                 BUG_ON(!found);
2715                 capsnap->dirty_pages -= nr;
2716                 if (capsnap->dirty_pages == 0) {
2717                         complete_capsnap = 1;
2718                         drop_capsnap = ceph_try_drop_cap_snap(capsnap);
2719                 }
2720                 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2721                      " snap %lld %d/%d -> %d/%d %s%s\n",
2722                      inode, capsnap, capsnap->context->seq,
2723                      ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2724                      ci->i_wrbuffer_ref, capsnap->dirty_pages,
2725                      last ? " (wrbuffer last)" : "",
2726                      complete_capsnap ? " (complete capsnap)" : "");
2727         }
2728 
2729         spin_unlock(&ci->i_ceph_lock);
2730 
2731         if (last) {
2732                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2733                 iput(inode);
2734         } else if (complete_capsnap) {
2735                 ceph_flush_snaps(ci);
2736                 wake_up_all(&ci->i_cap_wq);
2737         }
2738         if (drop_capsnap)
2739                 iput(inode);
2740 }
2741 
2742 /*
2743  * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2744  */
2745 static void invalidate_aliases(struct inode *inode)
2746 {
2747         struct dentry *dn, *prev = NULL;
2748 
2749         dout("invalidate_aliases inode %p\n", inode);
2750         d_prune_aliases(inode);
2751         /*
2752          * For non-directory inode, d_find_alias() only returns
2753          * hashed dentry. After calling d_invalidate(), the
2754          * dentry becomes unhashed.
2755          *
2756          * For directory inode, d_find_alias() can return
2757          * unhashed dentry. But directory inode should have
2758          * one alias at most.
2759          */
2760         while ((dn = d_find_alias(inode))) {
2761                 if (dn == prev) {
2762                         dput(dn);
2763                         break;
2764                 }
2765                 d_invalidate(dn);
2766                 if (prev)
2767                         dput(prev);
2768                 prev = dn;
2769         }
2770         if (prev)
2771                 dput(prev);
2772 }
2773 
2774 /*
2775  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2776  * actually be a revocation if it specifies a smaller cap set.)
2777  *
2778  * caller holds s_mutex and i_ceph_lock, we drop both.
2779  */
2780 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2781                              struct inode *inode, struct ceph_mds_caps *grant,
2782                              u64 inline_version,
2783                              void *inline_data, int inline_len,
2784                              struct ceph_buffer *xattr_buf,
2785                              struct ceph_mds_session *session,
2786                              struct ceph_cap *cap, int issued,
2787                              u32 pool_ns_len)
2788         __releases(ci->i_ceph_lock)
2789         __releases(mdsc->snap_rwsem)
2790 {
2791         struct ceph_inode_info *ci = ceph_inode(inode);
2792         int mds = session->s_mds;
2793         int seq = le32_to_cpu(grant->seq);
2794         int newcaps = le32_to_cpu(grant->caps);
2795         int used, wanted, dirty;
2796         u64 size = le64_to_cpu(grant->size);
2797         u64 max_size = le64_to_cpu(grant->max_size);
2798         struct timespec mtime, atime, ctime;
2799         int check_caps = 0;
2800         bool wake = false;
2801         bool writeback = false;
2802         bool queue_trunc = false;
2803         bool queue_invalidate = false;
2804         bool deleted_inode = false;
2805         bool fill_inline = false;
2806 
2807         dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2808              inode, cap, mds, seq, ceph_cap_string(newcaps));
2809         dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2810                 inode->i_size);
2811 
2812 
2813         /*
2814          * auth mds of the inode changed. we received the cap export message,
2815          * but still haven't received the cap import message. handle_cap_export
2816          * updated the new auth MDS' cap.
2817          *
2818          * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2819          * that was sent before the cap import message. So don't remove caps.
2820          */
2821         if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2822                 WARN_ON(cap != ci->i_auth_cap);
2823                 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2824                 seq = cap->seq;
2825                 newcaps |= cap->issued;
2826         }
2827 
2828         /*
2829          * If CACHE is being revoked, and we have no dirty buffers,
2830          * try to invalidate (once).  (If there are dirty buffers, we
2831          * will invalidate _after_ writeback.)
2832          */
2833         if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2834             ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2835             (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2836             !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
2837                 if (try_nonblocking_invalidate(inode)) {
2838                         /* there were locked pages.. invalidate later
2839                            in a separate thread. */
2840                         if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2841                                 queue_invalidate = true;
2842                                 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2843                         }
2844                 }
2845         }
2846 
2847         /* side effects now are allowed */
2848         cap->cap_gen = session->s_cap_gen;
2849         cap->seq = seq;
2850 
2851         __check_cap_issue(ci, cap, newcaps);
2852 
2853         if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2854             (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2855                 inode->i_mode = le32_to_cpu(grant->mode);
2856                 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2857                 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2858                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2859                      from_kuid(&init_user_ns, inode->i_uid),
2860                      from_kgid(&init_user_ns, inode->i_gid));
2861         }
2862 
2863         if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2864             (issued & CEPH_CAP_LINK_EXCL) == 0) {
2865                 set_nlink(inode, le32_to_cpu(grant->nlink));
2866                 if (inode->i_nlink == 0 &&
2867                     (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2868                         deleted_inode = true;
2869         }
2870 
2871         if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2872                 int len = le32_to_cpu(grant->xattr_len);
2873                 u64 version = le64_to_cpu(grant->xattr_version);
2874 
2875                 if (version > ci->i_xattrs.version) {
2876                         dout(" got new xattrs v%llu on %p len %d\n",
2877                              version, inode, len);
2878                         if (ci->i_xattrs.blob)
2879                                 ceph_buffer_put(ci->i_xattrs.blob);
2880                         ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2881                         ci->i_xattrs.version = version;
2882                         ceph_forget_all_cached_acls(inode);
2883                 }
2884         }
2885 
2886         if (newcaps & CEPH_CAP_ANY_RD) {
2887                 /* ctime/mtime/atime? */
2888                 ceph_decode_timespec(&mtime, &grant->mtime);
2889                 ceph_decode_timespec(&atime, &grant->atime);
2890                 ceph_decode_timespec(&ctime, &grant->ctime);
2891                 ceph_fill_file_time(inode, issued,
2892                                     le32_to_cpu(grant->time_warp_seq),
2893                                     &ctime, &mtime, &atime);
2894         }
2895 
2896         if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2897                 /* file layout may have changed */
2898                 ci->i_layout = grant->layout;
2899                 ci->i_pool_ns_len = pool_ns_len;
2900 
2901                 /* size/truncate_seq? */
2902                 queue_trunc = ceph_fill_file_size(inode, issued,
2903                                         le32_to_cpu(grant->truncate_seq),
2904                                         le64_to_cpu(grant->truncate_size),
2905                                         size);
2906                 /* max size increase? */
2907                 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2908                         dout("max_size %lld -> %llu\n",
2909                              ci->i_max_size, max_size);
2910                         ci->i_max_size = max_size;
2911                         if (max_size >= ci->i_wanted_max_size) {
2912                                 ci->i_wanted_max_size = 0;  /* reset */
2913                                 ci->i_requested_max_size = 0;
2914                         }
2915                         wake = true;
2916                 }
2917         }
2918 
2919         /* check cap bits */
2920         wanted = __ceph_caps_wanted(ci);
2921         used = __ceph_caps_used(ci);
2922         dirty = __ceph_caps_dirty(ci);
2923         dout(" my wanted = %s, used = %s, dirty %s\n",
2924              ceph_cap_string(wanted),
2925              ceph_cap_string(used),
2926              ceph_cap_string(dirty));
2927         if (wanted != le32_to_cpu(grant->wanted)) {
2928                 dout("mds wanted %s -> %s\n",
2929                      ceph_cap_string(le32_to_cpu(grant->wanted)),
2930                      ceph_cap_string(wanted));
2931                 /* imported cap may not have correct mds_wanted */
2932                 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2933                         check_caps = 1;
2934         }
2935 
2936         /* revocation, grant, or no-op? */
2937         if (cap->issued & ~newcaps) {
2938                 int revoking = cap->issued & ~newcaps;
2939 
2940                 dout("revocation: %s -> %s (revoking %s)\n",
2941                      ceph_cap_string(cap->issued),
2942                      ceph_cap_string(newcaps),
2943                      ceph_cap_string(revoking));
2944                 if (revoking & used & CEPH_CAP_FILE_BUFFER)
2945                         writeback = true;  /* initiate writeback; will delay ack */
2946                 else if (revoking == CEPH_CAP_FILE_CACHE &&
2947                          (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2948                          queue_invalidate)
2949                         ; /* do nothing yet, invalidation will be queued */
2950                 else if (cap == ci->i_auth_cap)
2951                         check_caps = 1; /* check auth cap only */
2952                 else
2953                         check_caps = 2; /* check all caps */
2954                 cap->issued = newcaps;
2955                 cap->implemented |= newcaps;
2956         } else if (cap->issued == newcaps) {
2957                 dout("caps unchanged: %s -> %s\n",
2958                      ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2959         } else {
2960                 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2961                      ceph_cap_string(newcaps));
2962                 /* non-auth MDS is revoking the newly grant caps ? */
2963                 if (cap == ci->i_auth_cap &&
2964                     __ceph_caps_revoking_other(ci, cap, newcaps))
2965                     check_caps = 2;
2966 
2967                 cap->issued = newcaps;
2968                 cap->implemented |= newcaps; /* add bits only, to
2969                                               * avoid stepping on a
2970                                               * pending revocation */
2971                 wake = true;
2972         }
2973         BUG_ON(cap->issued & ~cap->implemented);
2974 
2975         if (inline_version > 0 && inline_version >= ci->i_inline_version) {
2976                 ci->i_inline_version = inline_version;
2977                 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2978                     (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
2979                         fill_inline = true;
2980         }
2981 
2982         spin_unlock(&ci->i_ceph_lock);
2983 
2984         if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2985                 kick_flushing_inode_caps(mdsc, session, inode);
2986                 up_read(&mdsc->snap_rwsem);
2987                 if (newcaps & ~issued)
2988                         wake = true;
2989         }
2990 
2991         if (fill_inline)
2992                 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2993 
2994         if (queue_trunc)
2995                 ceph_queue_vmtruncate(inode);
2996 
2997         if (writeback)
2998                 /*
2999                  * queue inode for writeback: we can't actually call
3000                  * filemap_write_and_wait, etc. from message handler
3001                  * context.
3002                  */
3003                 ceph_queue_writeback(inode);
3004         if (queue_invalidate)
3005                 ceph_queue_invalidate(inode);
3006         if (deleted_inode)
3007                 invalidate_aliases(inode);
3008         if (wake)
3009                 wake_up_all(&ci->i_cap_wq);
3010 
3011         if (check_caps == 1)
3012                 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
3013                                 session);
3014         else if (check_caps == 2)
3015                 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3016         else
3017                 mutex_unlock(&session->s_mutex);
3018 }
3019 
3020 /*
3021  * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3022  * MDS has been safely committed.
3023  */
3024 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3025                                  struct ceph_mds_caps *m,
3026                                  struct ceph_mds_session *session,
3027                                  struct ceph_cap *cap)
3028         __releases(ci->i_ceph_lock)
3029 {
3030         struct ceph_inode_info *ci = ceph_inode(inode);
3031         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3032         struct ceph_cap_flush *cf;
3033         struct rb_node *n;
3034         LIST_HEAD(to_remove);
3035         unsigned seq = le32_to_cpu(m->seq);
3036         int dirty = le32_to_cpu(m->dirty);
3037         int cleaned = 0;
3038         int drop = 0;
3039 
3040         n = rb_first(&ci->i_cap_flush_tree);
3041         while (n) {
3042                 cf = rb_entry(n, struct ceph_cap_flush, i_node);
3043                 n = rb_next(&cf->i_node);
3044                 if (cf->tid == flush_tid)
3045                         cleaned = cf->caps;
3046                 if (cf->tid <= flush_tid) {
3047                         rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
3048                         list_add_tail(&cf->list, &to_remove);
3049                 } else {
3050                         cleaned &= ~cf->caps;
3051                         if (!cleaned)
3052                                 break;
3053                 }
3054         }
3055 
3056         dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3057              " flushing %s -> %s\n",
3058              inode, session->s_mds, seq, ceph_cap_string(dirty),
3059              ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3060              ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3061 
3062         if (list_empty(&to_remove) && !cleaned)
3063                 goto out;
3064 
3065         ci->i_flushing_caps &= ~cleaned;
3066 
3067         spin_lock(&mdsc->cap_dirty_lock);
3068 
3069         if (!list_empty(&to_remove)) {
3070                 list_for_each_entry(cf, &to_remove, list)
3071                         rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
3072 
3073                 n = rb_first(&mdsc->cap_flush_tree);
3074                 cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
3075                 if (!cf || cf->tid > flush_tid)
3076                         wake_up_all(&mdsc->cap_flushing_wq);
3077         }
3078 
3079         if (ci->i_flushing_caps == 0) {
3080                 list_del_init(&ci->i_flushing_item);
3081                 if (!list_empty(&session->s_cap_flushing))
3082                         dout(" mds%d still flushing cap on %p\n",
3083                              session->s_mds,
3084                              &list_entry(session->s_cap_flushing.next,
3085                                          struct ceph_inode_info,
3086                                          i_flushing_item)->vfs_inode);
3087                 mdsc->num_cap_flushing--;
3088                 dout(" inode %p now !flushing\n", inode);
3089 
3090                 if (ci->i_dirty_caps == 0) {
3091                         dout(" inode %p now clean\n", inode);
3092                         BUG_ON(!list_empty(&ci->i_dirty_item));
3093                         drop = 1;
3094                         if (ci->i_wr_ref == 0 &&
3095                             ci->i_wrbuffer_ref_head == 0) {
3096                                 BUG_ON(!ci->i_head_snapc);
3097                                 ceph_put_snap_context(ci->i_head_snapc);
3098                                 ci->i_head_snapc = NULL;
3099                         }
3100                 } else {
3101                         BUG_ON(list_empty(&ci->i_dirty_item));
3102                 }
3103         }
3104         spin_unlock(&mdsc->cap_dirty_lock);
3105         wake_up_all(&ci->i_cap_wq);
3106 
3107 out:
3108         spin_unlock(&ci->i_ceph_lock);
3109 
3110         while (!list_empty(&to_remove)) {
3111                 cf = list_first_entry(&to_remove,
3112                                       struct ceph_cap_flush, list);
3113                 list_del(&cf->list);
3114                 ceph_free_cap_flush(cf);
3115         }
3116         if (drop)
3117                 iput(inode);
3118 }
3119 
3120 /*
3121  * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
3122  * throw away our cap_snap.
3123  *
3124  * Caller hold s_mutex.
3125  */
3126 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3127                                      struct ceph_mds_caps *m,
3128                                      struct ceph_mds_session *session)
3129 {
3130         struct ceph_inode_info *ci = ceph_inode(inode);
3131         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3132         u64 follows = le64_to_cpu(m->snap_follows);
3133         struct ceph_cap_snap *capsnap;
3134         int drop = 0;
3135 
3136         dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3137              inode, ci, session->s_mds, follows);
3138 
3139         spin_lock(&ci->i_ceph_lock);
3140         list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3141                 if (capsnap->follows == follows) {
3142                         if (capsnap->flush_tid != flush_tid) {
3143                                 dout(" cap_snap %p follows %lld tid %lld !="
3144                                      " %lld\n", capsnap, follows,
3145                                      flush_tid, capsnap->flush_tid);
3146                                 break;
3147                         }
3148                         WARN_ON(capsnap->dirty_pages || capsnap->writing);
3149                         dout(" removing %p cap_snap %p follows %lld\n",
3150                              inode, capsnap, follows);
3151                         ceph_put_snap_context(capsnap->context);
3152                         list_del(&capsnap->ci_item);
3153                         list_del(&capsnap->flushing_item);
3154                         ceph_put_cap_snap(capsnap);
3155                         wake_up_all(&mdsc->cap_flushing_wq);
3156                         drop = 1;
3157                         break;
3158                 } else {
3159                         dout(" skipping cap_snap %p follows %lld\n",
3160                              capsnap, capsnap->follows);
3161                 }
3162         }
3163         spin_unlock(&ci->i_ceph_lock);
3164         if (drop)
3165                 iput(inode);
3166 }
3167 
3168 /*
3169  * Handle TRUNC from MDS, indicating file truncation.
3170  *
3171  * caller hold s_mutex.
3172  */
3173 static void handle_cap_trunc(struct inode *inode,
3174                              struct ceph_mds_caps *trunc,
3175                              struct ceph_mds_session *session)
3176         __releases(ci->i_ceph_lock)
3177 {
3178         struct ceph_inode_info *ci = ceph_inode(inode);
3179         int mds = session->s_mds;
3180         int seq = le32_to_cpu(trunc->seq);
3181         u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3182         u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3183         u64 size = le64_to_cpu(trunc->size);
3184         int implemented = 0;
3185         int dirty = __ceph_caps_dirty(ci);
3186         int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3187         int queue_trunc = 0;
3188 
3189         issued |= implemented | dirty;
3190 
3191         dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3192              inode, mds, seq, truncate_size, truncate_seq);
3193         queue_trunc = ceph_fill_file_size(inode, issued,
3194                                           truncate_seq, truncate_size, size);
3195         spin_unlock(&ci->i_ceph_lock);
3196 
3197         if (queue_trunc)
3198                 ceph_queue_vmtruncate(inode);
3199 }
3200 
3201 /*
3202  * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
3203  * different one.  If we are the most recent migration we've seen (as
3204  * indicated by mseq), make note of the migrating cap bits for the
3205  * duration (until we see the corresponding IMPORT).
3206  *
3207  * caller holds s_mutex
3208  */
3209 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3210                               struct ceph_mds_cap_peer *ph,
3211                               struct ceph_mds_session *session)
3212 {
3213         struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3214         struct ceph_mds_session *tsession = NULL;
3215         struct ceph_cap *cap, *tcap, *new_cap = NULL;
3216         struct ceph_inode_info *ci = ceph_inode(inode);
3217         u64 t_cap_id;
3218         unsigned mseq = le32_to_cpu(ex->migrate_seq);
3219         unsigned t_seq, t_mseq;
3220         int target, issued;
3221         int mds = session->s_mds;
3222 
3223         if (ph) {
3224                 t_cap_id = le64_to_cpu(ph->cap_id);
3225                 t_seq = le32_to_cpu(ph->seq);
3226                 t_mseq = le32_to_cpu(ph->mseq);
3227                 target = le32_to_cpu(ph->mds);
3228         } else {
3229                 t_cap_id = t_seq = t_mseq = 0;
3230                 target = -1;
3231         }
3232 
3233         dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3234              inode, ci, mds, mseq, target);
3235 retry:
3236         spin_lock(&ci->i_ceph_lock);
3237         cap = __get_cap_for_mds(ci, mds);
3238         if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3239                 goto out_unlock;
3240 
3241         if (target < 0) {
3242                 __ceph_remove_cap(cap, false);
3243                 if (!ci->i_auth_cap)
3244                         ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
3245                 goto out_unlock;
3246         }
3247 
3248         /*
3249          * now we know we haven't received the cap import message yet
3250          * because the exported cap still exist.
3251          */
3252 
3253         issued = cap->issued;
3254         WARN_ON(issued != cap->implemented);
3255 
3256         tcap = __get_cap_for_mds(ci, target);
3257         if (tcap) {
3258                 /* already have caps from the target */
3259                 if (tcap->cap_id != t_cap_id ||
3260                     ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3261                         dout(" updating import cap %p mds%d\n", tcap, target);
3262                         tcap->cap_id = t_cap_id;
3263                         tcap->seq = t_seq - 1;
3264                         tcap->issue_seq = t_seq - 1;
3265                         tcap->mseq = t_mseq;
3266                         tcap->issued |= issued;
3267                         tcap->implemented |= issued;
3268                         if (cap == ci->i_auth_cap)
3269                                 ci->i_auth_cap = tcap;
3270                         if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
3271                                 spin_lock(&mdsc->cap_dirty_lock);
3272                                 list_move_tail(&ci->i_flushing_item,
3273                                                &tcap->session->s_cap_flushing);
3274                                 spin_unlock(&mdsc->cap_dirty_lock);
3275                         }
3276                 }
3277                 __ceph_remove_cap(cap, false);
3278                 goto out_unlock;
3279         } else if (tsession) {
3280                 /* add placeholder for the export tagert */
3281                 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3282                 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3283                              t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3284 
3285                 __ceph_remove_cap(cap, false);
3286                 goto out_unlock;
3287         }
3288 
3289         spin_unlock(&ci->i_ceph_lock);
3290         mutex_unlock(&session->s_mutex);
3291 
3292         /* open target session */
3293         tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3294         if (!IS_ERR(tsession)) {
3295                 if (mds > target) {
3296                         mutex_lock(&session->s_mutex);
3297                         mutex_lock_nested(&tsession->s_mutex,
3298                                           SINGLE_DEPTH_NESTING);
3299                 } else {
3300                         mutex_lock(&tsession->s_mutex);
3301                         mutex_lock_nested(&session->s_mutex,
3302                                           SINGLE_DEPTH_NESTING);
3303                 }
3304                 new_cap = ceph_get_cap(mdsc, NULL);
3305         } else {
3306                 WARN_ON(1);
3307                 tsession = NULL;
3308                 target = -1;
3309         }
3310         goto retry;
3311 
3312 out_unlock:
3313         spin_unlock(&ci->i_ceph_lock);
3314         mutex_unlock(&session->s_mutex);
3315         if (tsession) {
3316                 mutex_unlock(&tsession->s_mutex);
3317                 ceph_put_mds_session(tsession);
3318         }
3319         if (new_cap)
3320                 ceph_put_cap(mdsc, new_cap);
3321 }
3322 
3323 /*
3324  * Handle cap IMPORT.
3325  *
3326  * caller holds s_mutex. acquires i_ceph_lock
3327  */
3328 static void handle_cap_import(struct ceph_mds_client *mdsc,
3329                               struct inode *inode, struct ceph_mds_caps *im,
3330                               struct ceph_mds_cap_peer *ph,
3331                               struct ceph_mds_session *session,
3332                               struct ceph_cap **target_cap, int *old_issued)
3333         __acquires(ci->i_ceph_lock)
3334 {
3335         struct ceph_inode_info *ci = ceph_inode(inode);
3336         struct ceph_cap *cap, *ocap, *new_cap = NULL;
3337         int mds = session->s_mds;
3338         int issued;
3339         unsigned caps = le32_to_cpu(im->caps);
3340         unsigned wanted = le32_to_cpu(im->wanted);
3341         unsigned seq = le32_to_cpu(im->seq);
3342         unsigned mseq = le32_to_cpu(im->migrate_seq);
3343         u64 realmino = le64_to_cpu(im->realm);
3344         u64 cap_id = le64_to_cpu(im->cap_id);
3345         u64 p_cap_id;
3346         int peer;
3347 
3348         if (ph) {
3349                 p_cap_id = le64_to_cpu(ph->cap_id);
3350                 peer = le32_to_cpu(ph->mds);
3351         } else {
3352                 p_cap_id = 0;
3353                 peer = -1;
3354         }
3355 
3356         dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3357              inode, ci, mds, mseq, peer);
3358 
3359 retry:
3360         spin_lock(&ci->i_ceph_lock);
3361         cap = __get_cap_for_mds(ci, mds);
3362         if (!cap) {
3363                 if (!new_cap) {
3364                         spin_unlock(&ci->i_ceph_lock);
3365                         new_cap = ceph_get_cap(mdsc, NULL);
3366                         goto retry;
3367                 }
3368                 cap = new_cap;
3369         } else {
3370                 if (new_cap) {
3371                         ceph_put_cap(mdsc, new_cap);
3372                         new_cap = NULL;
3373                 }
3374         }
3375 
3376         __ceph_caps_issued(ci, &issued);
3377         issued |= __ceph_caps_dirty(ci);
3378 
3379         ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3380                      realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3381 
3382         ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3383         if (ocap && ocap->cap_id == p_cap_id) {
3384                 dout(" remove export cap %p mds%d flags %d\n",
3385                      ocap, peer, ph->flags);
3386                 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3387                     (ocap->seq != le32_to_cpu(ph->seq) ||
3388                      ocap->mseq != le32_to_cpu(ph->mseq))) {
3389                         pr_err("handle_cap_import: mismatched seq/mseq: "
3390                                "ino (%llx.%llx) mds%d seq %d mseq %d "
3391                                "importer mds%d has peer seq %d mseq %d\n",
3392                                ceph_vinop(inode), peer, ocap->seq,
3393                                ocap->mseq, mds, le32_to_cpu(ph->seq),
3394                                le32_to_cpu(ph->mseq));
3395                 }
3396                 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3397         }
3398 
3399         /* make sure we re-request max_size, if necessary */
3400         ci->i_wanted_max_size = 0;
3401         ci->i_requested_max_size = 0;
3402 
3403         *old_issued = issued;
3404         *target_cap = cap;
3405 }
3406 
3407 /*
3408  * Handle a caps message from the MDS.
3409  *
3410  * Identify the appropriate session, inode, and call the right handler
3411  * based on the cap op.
3412  */
3413 void ceph_handle_caps(struct ceph_mds_session *session,
3414                       struct ceph_msg *msg)
3415 {
3416         struct ceph_mds_client *mdsc = session->s_mdsc;
3417         struct super_block *sb = mdsc->fsc->sb;
3418         struct inode *inode;
3419         struct ceph_inode_info *ci;
3420         struct ceph_cap *cap;
3421         struct ceph_mds_caps *h;
3422         struct ceph_mds_cap_peer *peer = NULL;
3423         struct ceph_snap_realm *realm;
3424         int mds = session->s_mds;
3425         int op, issued;
3426         u32 seq, mseq;
3427         struct ceph_vino vino;
3428         u64 cap_id;
3429         u64 size, max_size;
3430         u64 tid;
3431         u64 inline_version = 0;
3432         void *inline_data = NULL;
3433         u32  inline_len = 0;
3434         void *snaptrace;
3435         size_t snaptrace_len;
3436         u32 pool_ns_len = 0;
3437         void *p, *end;
3438 
3439         dout("handle_caps from mds%d\n", mds);
3440 
3441         /* decode */
3442         end = msg->front.iov_base + msg->front.iov_len;
3443         tid = le64_to_cpu(msg->hdr.tid);
3444         if (msg->front.iov_len < sizeof(*h))
3445                 goto bad;
3446         h = msg->front.iov_base;
3447         op = le32_to_cpu(h->op);
3448         vino.ino = le64_to_cpu(h->ino);
3449         vino.snap = CEPH_NOSNAP;
3450         cap_id = le64_to_cpu(h->cap_id);
3451         seq = le32_to_cpu(h->seq);
3452         mseq = le32_to_cpu(h->migrate_seq);
3453         size = le64_to_cpu(h->size);
3454         max_size = le64_to_cpu(h->max_size);
3455 
3456         snaptrace = h + 1;
3457         snaptrace_len = le32_to_cpu(h->snap_trace_len);
3458         p = snaptrace + snaptrace_len;
3459 
3460         if (le16_to_cpu(msg->hdr.version) >= 2) {
3461                 u32 flock_len;
3462                 ceph_decode_32_safe(&p, end, flock_len, bad);
3463                 if (p + flock_len > end)
3464                         goto bad;
3465                 p += flock_len;
3466         }
3467 
3468         if (le16_to_cpu(msg->hdr.version) >= 3) {
3469                 if (op == CEPH_CAP_OP_IMPORT) {
3470                         if (p + sizeof(*peer) > end)
3471                                 goto bad;
3472                         peer = p;
3473                         p += sizeof(*peer);
3474                 } else if (op == CEPH_CAP_OP_EXPORT) {
3475                         /* recorded in unused fields */
3476                         peer = (void *)&h->size;
3477                 }
3478         }
3479 
3480         if (le16_to_cpu(msg->hdr.version) >= 4) {
3481                 ceph_decode_64_safe(&p, end, inline_version, bad);
3482                 ceph_decode_32_safe(&p, end, inline_len, bad);
3483                 if (p + inline_len > end)
3484                         goto bad;
3485                 inline_data = p;
3486                 p += inline_len;
3487         }
3488 
3489         if (le16_to_cpu(msg->hdr.version) >= 8) {
3490                 u64 flush_tid;
3491                 u32 caller_uid, caller_gid;
3492                 u32 osd_epoch_barrier;
3493                 /* version >= 5 */
3494                 ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
3495                 /* version >= 6 */
3496                 ceph_decode_64_safe(&p, end, flush_tid, bad);
3497                 /* version >= 7 */
3498                 ceph_decode_32_safe(&p, end, caller_uid, bad);
3499                 ceph_decode_32_safe(&p, end, caller_gid, bad);
3500                 /* version >= 8 */
3501                 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3502         }
3503 
3504         /* lookup ino */
3505         inode = ceph_find_inode(sb, vino);
3506         ci = ceph_inode(inode);
3507         dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3508              vino.snap, inode);
3509 
3510         mutex_lock(&session->s_mutex);
3511         session->s_seq++;
3512         dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3513              (unsigned)seq);
3514 
3515         if (!inode) {
3516                 dout(" i don't have ino %llx\n", vino.ino);
3517 
3518                 if (op == CEPH_CAP_OP_IMPORT) {
3519                         cap = ceph_get_cap(mdsc, NULL);
3520                         cap->cap_ino = vino.ino;
3521                         cap->queue_release = 1;
3522                         cap->cap_id = cap_id;
3523                         cap->mseq = mseq;
3524                         cap->seq = seq;
3525                         spin_lock(&session->s_cap_lock);
3526                         list_add_tail(&cap->session_caps,
3527                                         &session->s_cap_releases);
3528                         session->s_num_cap_releases++;
3529                         spin_unlock(&session->s_cap_lock);
3530                 }
3531                 goto flush_cap_releases;
3532         }
3533 
3534         /* these will work even if we don't have a cap yet */
3535         switch (op) {
3536         case CEPH_CAP_OP_FLUSHSNAP_ACK:
3537                 handle_cap_flushsnap_ack(inode, tid, h, session);
3538                 goto done;
3539 
3540         case CEPH_CAP_OP_EXPORT:
3541                 handle_cap_export(inode, h, peer, session);
3542                 goto done_unlocked;
3543 
3544         case CEPH_CAP_OP_IMPORT:
3545                 realm = NULL;
3546                 if (snaptrace_len) {
3547                         down_write(&mdsc->snap_rwsem);
3548                         ceph_update_snap_trace(mdsc, snaptrace,
3549                                                snaptrace + snaptrace_len,
3550                                                false, &realm);
3551                         downgrade_write(&mdsc->snap_rwsem);
3552                 } else {
3553                         down_read(&mdsc->snap_rwsem);
3554                 }
3555                 handle_cap_import(mdsc, inode, h, peer, session,
3556                                   &cap, &issued);
3557                 handle_cap_grant(mdsc, inode, h,
3558                                  inline_version, inline_data, inline_len,
3559                                  msg->middle, session, cap, issued,
3560                                  pool_ns_len);
3561                 if (realm)
3562                         ceph_put_snap_realm(mdsc, realm);
3563                 goto done_unlocked;
3564         }
3565 
3566         /* the rest require a cap */
3567         spin_lock(&ci->i_ceph_lock);
3568         cap = __get_cap_for_mds(ceph_inode(inode), mds);
3569         if (!cap) {
3570                 dout(" no cap on %p ino %llx.%llx from mds%d\n",
3571                      inode, ceph_ino(inode), ceph_snap(inode), mds);
3572                 spin_unlock(&ci->i_ceph_lock);
3573                 goto flush_cap_releases;
3574         }
3575 
3576         /* note that each of these drops i_ceph_lock for us */
3577         switch (op) {
3578         case CEPH_CAP_OP_REVOKE:
3579         case CEPH_CAP_OP_GRANT:
3580                 __ceph_caps_issued(ci, &issued);
3581                 issued |= __ceph_caps_dirty(ci);
3582                 handle_cap_grant(mdsc, inode, h,
3583                                  inline_version, inline_data, inline_len,
3584                                  msg->middle, session, cap, issued,
3585                                  pool_ns_len);
3586                 goto done_unlocked;
3587 
3588         case CEPH_CAP_OP_FLUSH_ACK:
3589                 handle_cap_flush_ack(inode, tid, h, session, cap);
3590                 break;
3591 
3592         case CEPH_CAP_OP_TRUNC:
3593                 handle_cap_trunc(inode, h, session);
3594                 break;
3595 
3596         default:
3597                 spin_unlock(&ci->i_ceph_lock);
3598                 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3599                        ceph_cap_op_name(op));
3600         }
3601 
3602         goto done;
3603 
3604 flush_cap_releases:
3605         /*
3606          * send any cap release message to try to move things
3607          * along for the mds (who clearly thinks we still have this
3608          * cap).
3609          */
3610         ceph_send_cap_releases(mdsc, session);
3611 
3612 done:
3613         mutex_unlock(&session->s_mutex);
3614 done_unlocked:
3615         iput(inode);
3616         return;
3617 
3618 bad:
3619         pr_err("ceph_handle_caps: corrupt message\n");
3620         ceph_msg_dump(msg);
3621         return;
3622 }
3623 
3624 /*
3625  * Delayed work handler to process end of delayed cap release LRU list.
3626  */
3627 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3628 {
3629         struct ceph_inode_info *ci;
3630         int flags = CHECK_CAPS_NODELAY;
3631 
3632         dout("check_delayed_caps\n");
3633         while (1) {
3634                 spin_lock(&mdsc->cap_delay_lock);
3635                 if (list_empty(&mdsc->cap_delay_list))
3636                         break;
3637                 ci = list_first_entry(&mdsc->cap_delay_list,
3638                                       struct ceph_inode_info,
3639                                       i_cap_delay_list);
3640                 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3641                     time_before(jiffies, ci->i_hold_caps_max))
3642                         break;
3643                 list_del_init(&ci->i_cap_delay_list);
3644                 spin_unlock(&mdsc->cap_delay_lock);
3645                 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3646                 ceph_check_caps(ci, flags, NULL);
3647         }
3648         spin_unlock(&mdsc->cap_delay_lock);
3649 }
3650 
3651 /*
3652  * Flush all dirty caps to the mds
3653  */
3654 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3655 {
3656         struct ceph_inode_info *ci;
3657         struct inode *inode;
3658 
3659         dout("flush_dirty_caps\n");
3660         spin_lock(&mdsc->cap_dirty_lock);
3661         while (!list_empty(&mdsc->cap_dirty)) {
3662                 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3663                                       i_dirty_item);
3664                 inode = &ci->vfs_inode;
3665                 ihold(inode);
3666                 dout("flush_dirty_caps %p\n", inode);
3667                 spin_unlock(&mdsc->cap_dirty_lock);
3668                 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3669                 iput(inode);
3670                 spin_lock(&mdsc->cap_dirty_lock);
3671         }
3672         spin_unlock(&mdsc->cap_dirty_lock);
3673         dout("flush_dirty_caps done\n");
3674 }
3675 
3676 /*
3677  * Drop open file reference.  If we were the last open file,
3678  * we may need to release capabilities to the MDS (or schedule
3679  * their delayed release).
3680  */
3681 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3682 {
3683         struct inode *inode = &ci->vfs_inode;
3684         int last = 0;
3685 
3686         spin_lock(&ci->i_ceph_lock);
3687         dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3688              ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3689         BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3690         if (--ci->i_nr_by_mode[fmode] == 0)
3691                 last++;
3692         spin_unlock(&ci->i_ceph_lock);
3693 
3694         if (last && ci->i_vino.snap == CEPH_NOSNAP)
3695                 ceph_check_caps(ci, 0, NULL);
3696 }
3697 
3698 /*
3699  * Helpers for embedding cap and dentry lease releases into mds
3700  * requests.
3701  *
3702  * @force is used by dentry_release (below) to force inclusion of a
3703  * record for the directory inode, even when there aren't any caps to
3704  * drop.
3705  */
3706 int ceph_encode_inode_release(void **p, struct inode *inode,
3707                               int mds, int drop, int unless, int force)
3708 {
3709         struct ceph_inode_info *ci = ceph_inode(inode);
3710         struct ceph_cap *cap;
3711         struct ceph_mds_request_release *rel = *p;
3712         int used, dirty;
3713         int ret = 0;
3714 
3715         spin_lock(&ci->i_ceph_lock);
3716         used = __ceph_caps_used(ci);
3717         dirty = __ceph_caps_dirty(ci);
3718 
3719         dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3720              inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3721              ceph_cap_string(unless));
3722 
3723         /* only drop unused, clean caps */
3724         drop &= ~(used | dirty);
3725 
3726         cap = __get_cap_for_mds(ci, mds);
3727         if (cap && __cap_is_valid(cap)) {
3728                 if (force ||
3729                     ((cap->issued & drop) &&
3730                      (cap->issued & unless) == 0)) {
3731                         if ((cap->issued & drop) &&
3732                             (cap->issued & unless) == 0) {
3733                                 int wanted = __ceph_caps_wanted(ci);
3734                                 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3735                                         wanted |= cap->mds_wanted;
3736                                 dout("encode_inode_release %p cap %p "
3737                                      "%s -> %s, wanted %s -> %s\n", inode, cap,
3738                                      ceph_cap_string(cap->issued),
3739                                      ceph_cap_string(cap->issued & ~drop),
3740                                      ceph_cap_string(cap->mds_wanted),
3741                                      ceph_cap_string(wanted));
3742 
3743                                 cap->issued &= ~drop;
3744                                 cap->implemented &= ~drop;
3745                                 cap->mds_wanted = wanted;
3746                         } else {
3747                                 dout("encode_inode_release %p cap %p %s"
3748                                      " (force)\n", inode, cap,
3749                                      ceph_cap_string(cap->issued));
3750                         }
3751 
3752                         rel->ino = cpu_to_le64(ceph_ino(inode));
3753                         rel->cap_id = cpu_to_le64(cap->cap_id);
3754                         rel->seq = cpu_to_le32(cap->seq);
3755                         rel->issue_seq = cpu_to_le32(cap->issue_seq);
3756                         rel->mseq = cpu_to_le32(cap->mseq);
3757                         rel->caps = cpu_to_le32(cap->implemented);
3758                         rel->wanted = cpu_to_le32(cap->mds_wanted);
3759                         rel->dname_len = 0;
3760                         rel->dname_seq = 0;
3761                         *p += sizeof(*rel);
3762                         ret = 1;
3763                 } else {
3764                         dout("encode_inode_release %p cap %p %s\n",
3765                              inode, cap, ceph_cap_string(cap->issued));
3766                 }
3767         }
3768         spin_unlock(&ci->i_ceph_lock);
3769         return ret;
3770 }
3771 
3772 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3773                                int mds, int drop, int unless)
3774 {
3775         struct inode *dir = d_inode(dentry->d_parent);
3776         struct ceph_mds_request_release *rel = *p;
3777         struct ceph_dentry_info *di = ceph_dentry(dentry);
3778         int force = 0;
3779         int ret;
3780 
3781         /*
3782          * force an record for the directory caps if we have a dentry lease.
3783          * this is racy (can't take i_ceph_lock and d_lock together), but it
3784          * doesn't have to be perfect; the mds will revoke anything we don't
3785          * release.
3786          */
3787         spin_lock(&dentry->d_lock);
3788         if (di->lease_session && di->lease_session->s_mds == mds)
3789                 force = 1;
3790         spin_unlock(&dentry->d_lock);
3791 
3792         ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3793 
3794         spin_lock(&dentry->d_lock);
3795         if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3796                 dout("encode_dentry_release %p mds%d seq %d\n",
3797                      dentry, mds, (int)di->lease_seq);
3798                 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3799                 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3800                 *p += dentry->d_name.len;
3801                 rel->dname_seq = cpu_to_le32(di->lease_seq);
3802                 __ceph_mdsc_drop_dentry_lease(dentry);
3803         }
3804         spin_unlock(&dentry->d_lock);
3805         return ret;
3806 }
3807 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp