~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ceph/inode.c

Version: ~ [ linux-5.15-rc6 ] ~ [ linux-5.14.14 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.75 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.155 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.213 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.252 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.287 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.289 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/ceph/ceph_debug.h>
  2 
  3 #include <linux/module.h>
  4 #include <linux/fs.h>
  5 #include <linux/slab.h>
  6 #include <linux/string.h>
  7 #include <linux/uaccess.h>
  8 #include <linux/kernel.h>
  9 #include <linux/writeback.h>
 10 #include <linux/vmalloc.h>
 11 #include <linux/xattr.h>
 12 #include <linux/posix_acl.h>
 13 #include <linux/random.h>
 14 #include <linux/sort.h>
 15 
 16 #include "super.h"
 17 #include "mds_client.h"
 18 #include "cache.h"
 19 #include <linux/ceph/decode.h>
 20 
 21 /*
 22  * Ceph inode operations
 23  *
 24  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
 25  * setattr, etc.), xattr helpers, and helpers for assimilating
 26  * metadata returned by the MDS into our cache.
 27  *
 28  * Also define helpers for doing asynchronous writeback, invalidation,
 29  * and truncation for the benefit of those who can't afford to block
 30  * (typically because they are in the message handler path).
 31  */
 32 
 33 static const struct inode_operations ceph_symlink_iops;
 34 
 35 static void ceph_invalidate_work(struct work_struct *work);
 36 static void ceph_writeback_work(struct work_struct *work);
 37 static void ceph_vmtruncate_work(struct work_struct *work);
 38 
 39 /*
 40  * find or create an inode, given the ceph ino number
 41  */
 42 static int ceph_set_ino_cb(struct inode *inode, void *data)
 43 {
 44         ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
 45         inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
 46         return 0;
 47 }
 48 
 49 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
 50 {
 51         struct inode *inode;
 52         ino_t t = ceph_vino_to_ino(vino);
 53 
 54         inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
 55         if (inode == NULL)
 56                 return ERR_PTR(-ENOMEM);
 57         if (inode->i_state & I_NEW) {
 58                 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
 59                      inode, ceph_vinop(inode), (u64)inode->i_ino);
 60                 unlock_new_inode(inode);
 61         }
 62 
 63         dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
 64              vino.snap, inode);
 65         return inode;
 66 }
 67 
 68 /*
 69  * get/constuct snapdir inode for a given directory
 70  */
 71 struct inode *ceph_get_snapdir(struct inode *parent)
 72 {
 73         struct ceph_vino vino = {
 74                 .ino = ceph_ino(parent),
 75                 .snap = CEPH_SNAPDIR,
 76         };
 77         struct inode *inode = ceph_get_inode(parent->i_sb, vino);
 78         struct ceph_inode_info *ci = ceph_inode(inode);
 79 
 80         BUG_ON(!S_ISDIR(parent->i_mode));
 81         if (IS_ERR(inode))
 82                 return inode;
 83         inode->i_mode = parent->i_mode;
 84         inode->i_uid = parent->i_uid;
 85         inode->i_gid = parent->i_gid;
 86         inode->i_op = &ceph_snapdir_iops;
 87         inode->i_fop = &ceph_snapdir_fops;
 88         ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
 89         ci->i_rbytes = 0;
 90         return inode;
 91 }
 92 
 93 const struct inode_operations ceph_file_iops = {
 94         .permission = ceph_permission,
 95         .setattr = ceph_setattr,
 96         .getattr = ceph_getattr,
 97         .listxattr = ceph_listxattr,
 98         .get_acl = ceph_get_acl,
 99         .set_acl = ceph_set_acl,
100 };
101 
102 
103 /*
104  * We use a 'frag tree' to keep track of the MDS's directory fragments
105  * for a given inode (usually there is just a single fragment).  We
106  * need to know when a child frag is delegated to a new MDS, or when
107  * it is flagged as replicated, so we can direct our requests
108  * accordingly.
109  */
110 
111 /*
112  * find/create a frag in the tree
113  */
114 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115                                                     u32 f)
116 {
117         struct rb_node **p;
118         struct rb_node *parent = NULL;
119         struct ceph_inode_frag *frag;
120         int c;
121 
122         p = &ci->i_fragtree.rb_node;
123         while (*p) {
124                 parent = *p;
125                 frag = rb_entry(parent, struct ceph_inode_frag, node);
126                 c = ceph_frag_compare(f, frag->frag);
127                 if (c < 0)
128                         p = &(*p)->rb_left;
129                 else if (c > 0)
130                         p = &(*p)->rb_right;
131                 else
132                         return frag;
133         }
134 
135         frag = kmalloc(sizeof(*frag), GFP_NOFS);
136         if (!frag) {
137                 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
138                        "frag %x\n", &ci->vfs_inode,
139                        ceph_vinop(&ci->vfs_inode), f);
140                 return ERR_PTR(-ENOMEM);
141         }
142         frag->frag = f;
143         frag->split_by = 0;
144         frag->mds = -1;
145         frag->ndist = 0;
146 
147         rb_link_node(&frag->node, parent, p);
148         rb_insert_color(&frag->node, &ci->i_fragtree);
149 
150         dout("get_or_create_frag added %llx.%llx frag %x\n",
151              ceph_vinop(&ci->vfs_inode), f);
152         return frag;
153 }
154 
155 /*
156  * find a specific frag @f
157  */
158 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
159 {
160         struct rb_node *n = ci->i_fragtree.rb_node;
161 
162         while (n) {
163                 struct ceph_inode_frag *frag =
164                         rb_entry(n, struct ceph_inode_frag, node);
165                 int c = ceph_frag_compare(f, frag->frag);
166                 if (c < 0)
167                         n = n->rb_left;
168                 else if (c > 0)
169                         n = n->rb_right;
170                 else
171                         return frag;
172         }
173         return NULL;
174 }
175 
176 /*
177  * Choose frag containing the given value @v.  If @pfrag is
178  * specified, copy the frag delegation info to the caller if
179  * it is present.
180  */
181 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
182                               struct ceph_inode_frag *pfrag, int *found)
183 {
184         u32 t = ceph_frag_make(0, 0);
185         struct ceph_inode_frag *frag;
186         unsigned nway, i;
187         u32 n;
188 
189         if (found)
190                 *found = 0;
191 
192         while (1) {
193                 WARN_ON(!ceph_frag_contains_value(t, v));
194                 frag = __ceph_find_frag(ci, t);
195                 if (!frag)
196                         break; /* t is a leaf */
197                 if (frag->split_by == 0) {
198                         if (pfrag)
199                                 memcpy(pfrag, frag, sizeof(*pfrag));
200                         if (found)
201                                 *found = 1;
202                         break;
203                 }
204 
205                 /* choose child */
206                 nway = 1 << frag->split_by;
207                 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208                      frag->split_by, nway);
209                 for (i = 0; i < nway; i++) {
210                         n = ceph_frag_make_child(t, frag->split_by, i);
211                         if (ceph_frag_contains_value(n, v)) {
212                                 t = n;
213                                 break;
214                         }
215                 }
216                 BUG_ON(i == nway);
217         }
218         dout("choose_frag(%x) = %x\n", v, t);
219 
220         return t;
221 }
222 
223 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
224                      struct ceph_inode_frag *pfrag, int *found)
225 {
226         u32 ret;
227         mutex_lock(&ci->i_fragtree_mutex);
228         ret = __ceph_choose_frag(ci, v, pfrag, found);
229         mutex_unlock(&ci->i_fragtree_mutex);
230         return ret;
231 }
232 
233 /*
234  * Process dirfrag (delegation) info from the mds.  Include leaf
235  * fragment in tree ONLY if ndist > 0.  Otherwise, only
236  * branches/splits are included in i_fragtree)
237  */
238 static int ceph_fill_dirfrag(struct inode *inode,
239                              struct ceph_mds_reply_dirfrag *dirinfo)
240 {
241         struct ceph_inode_info *ci = ceph_inode(inode);
242         struct ceph_inode_frag *frag;
243         u32 id = le32_to_cpu(dirinfo->frag);
244         int mds = le32_to_cpu(dirinfo->auth);
245         int ndist = le32_to_cpu(dirinfo->ndist);
246         int diri_auth = -1;
247         int i;
248         int err = 0;
249 
250         spin_lock(&ci->i_ceph_lock);
251         if (ci->i_auth_cap)
252                 diri_auth = ci->i_auth_cap->mds;
253         spin_unlock(&ci->i_ceph_lock);
254 
255         if (mds == -1) /* CDIR_AUTH_PARENT */
256                 mds = diri_auth;
257 
258         mutex_lock(&ci->i_fragtree_mutex);
259         if (ndist == 0 && mds == diri_auth) {
260                 /* no delegation info needed. */
261                 frag = __ceph_find_frag(ci, id);
262                 if (!frag)
263                         goto out;
264                 if (frag->split_by == 0) {
265                         /* tree leaf, remove */
266                         dout("fill_dirfrag removed %llx.%llx frag %x"
267                              " (no ref)\n", ceph_vinop(inode), id);
268                         rb_erase(&frag->node, &ci->i_fragtree);
269                         kfree(frag);
270                 } else {
271                         /* tree branch, keep and clear */
272                         dout("fill_dirfrag cleared %llx.%llx frag %x"
273                              " referral\n", ceph_vinop(inode), id);
274                         frag->mds = -1;
275                         frag->ndist = 0;
276                 }
277                 goto out;
278         }
279 
280 
281         /* find/add this frag to store mds delegation info */
282         frag = __get_or_create_frag(ci, id);
283         if (IS_ERR(frag)) {
284                 /* this is not the end of the world; we can continue
285                    with bad/inaccurate delegation info */
286                 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
287                        ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
288                 err = -ENOMEM;
289                 goto out;
290         }
291 
292         frag->mds = mds;
293         frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
294         for (i = 0; i < frag->ndist; i++)
295                 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
296         dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
297              ceph_vinop(inode), frag->frag, frag->ndist);
298 
299 out:
300         mutex_unlock(&ci->i_fragtree_mutex);
301         return err;
302 }
303 
304 static int frag_tree_split_cmp(const void *l, const void *r)
305 {
306         struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307         struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308         return ceph_frag_compare(le32_to_cpu(ls->frag),
309                                  le32_to_cpu(rs->frag));
310 }
311 
312 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
313 {
314         if (!frag)
315                 return f == ceph_frag_make(0, 0);
316         if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
317                 return false;
318         return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
319 }
320 
321 static int ceph_fill_fragtree(struct inode *inode,
322                               struct ceph_frag_tree_head *fragtree,
323                               struct ceph_mds_reply_dirfrag *dirinfo)
324 {
325         struct ceph_inode_info *ci = ceph_inode(inode);
326         struct ceph_inode_frag *frag, *prev_frag = NULL;
327         struct rb_node *rb_node;
328         unsigned i, split_by, nsplits;
329         u32 id;
330         bool update = false;
331 
332         mutex_lock(&ci->i_fragtree_mutex);
333         nsplits = le32_to_cpu(fragtree->nsplits);
334         if (nsplits != ci->i_fragtree_nsplits) {
335                 update = true;
336         } else if (nsplits) {
337                 i = prandom_u32() % nsplits;
338                 id = le32_to_cpu(fragtree->splits[i].frag);
339                 if (!__ceph_find_frag(ci, id))
340                         update = true;
341         } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
342                 rb_node = rb_first(&ci->i_fragtree);
343                 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
344                 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
345                         update = true;
346         }
347         if (!update && dirinfo) {
348                 id = le32_to_cpu(dirinfo->frag);
349                 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
350                         update = true;
351         }
352         if (!update)
353                 goto out_unlock;
354 
355         if (nsplits > 1) {
356                 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
357                      frag_tree_split_cmp, NULL);
358         }
359 
360         dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
361         rb_node = rb_first(&ci->i_fragtree);
362         for (i = 0; i < nsplits; i++) {
363                 id = le32_to_cpu(fragtree->splits[i].frag);
364                 split_by = le32_to_cpu(fragtree->splits[i].by);
365                 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
366                         pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
367                                "frag %x split by %d\n", ceph_vinop(inode),
368                                i, nsplits, id, split_by);
369                         continue;
370                 }
371                 frag = NULL;
372                 while (rb_node) {
373                         frag = rb_entry(rb_node, struct ceph_inode_frag, node);
374                         if (ceph_frag_compare(frag->frag, id) >= 0) {
375                                 if (frag->frag != id)
376                                         frag = NULL;
377                                 else
378                                         rb_node = rb_next(rb_node);
379                                 break;
380                         }
381                         rb_node = rb_next(rb_node);
382                         /* delete stale split/leaf node */
383                         if (frag->split_by > 0 ||
384                             !is_frag_child(frag->frag, prev_frag)) {
385                                 rb_erase(&frag->node, &ci->i_fragtree);
386                                 if (frag->split_by > 0)
387                                         ci->i_fragtree_nsplits--;
388                                 kfree(frag);
389                         }
390                         frag = NULL;
391                 }
392                 if (!frag) {
393                         frag = __get_or_create_frag(ci, id);
394                         if (IS_ERR(frag))
395                                 continue;
396                 }
397                 if (frag->split_by == 0)
398                         ci->i_fragtree_nsplits++;
399                 frag->split_by = split_by;
400                 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
401                 prev_frag = frag;
402         }
403         while (rb_node) {
404                 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
405                 rb_node = rb_next(rb_node);
406                 /* delete stale split/leaf node */
407                 if (frag->split_by > 0 ||
408                     !is_frag_child(frag->frag, prev_frag)) {
409                         rb_erase(&frag->node, &ci->i_fragtree);
410                         if (frag->split_by > 0)
411                                 ci->i_fragtree_nsplits--;
412                         kfree(frag);
413                 }
414         }
415 out_unlock:
416         mutex_unlock(&ci->i_fragtree_mutex);
417         return 0;
418 }
419 
420 /*
421  * initialize a newly allocated inode.
422  */
423 struct inode *ceph_alloc_inode(struct super_block *sb)
424 {
425         struct ceph_inode_info *ci;
426         int i;
427 
428         ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
429         if (!ci)
430                 return NULL;
431 
432         dout("alloc_inode %p\n", &ci->vfs_inode);
433 
434         spin_lock_init(&ci->i_ceph_lock);
435 
436         ci->i_version = 0;
437         ci->i_inline_version = 0;
438         ci->i_time_warp_seq = 0;
439         ci->i_ceph_flags = 0;
440         atomic64_set(&ci->i_ordered_count, 1);
441         atomic64_set(&ci->i_release_count, 1);
442         atomic64_set(&ci->i_complete_seq[0], 0);
443         atomic64_set(&ci->i_complete_seq[1], 0);
444         ci->i_symlink = NULL;
445 
446         memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
447         RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
448 
449         ci->i_fragtree = RB_ROOT;
450         mutex_init(&ci->i_fragtree_mutex);
451 
452         ci->i_xattrs.blob = NULL;
453         ci->i_xattrs.prealloc_blob = NULL;
454         ci->i_xattrs.dirty = false;
455         ci->i_xattrs.index = RB_ROOT;
456         ci->i_xattrs.count = 0;
457         ci->i_xattrs.names_size = 0;
458         ci->i_xattrs.vals_size = 0;
459         ci->i_xattrs.version = 0;
460         ci->i_xattrs.index_version = 0;
461 
462         ci->i_caps = RB_ROOT;
463         ci->i_auth_cap = NULL;
464         ci->i_dirty_caps = 0;
465         ci->i_flushing_caps = 0;
466         INIT_LIST_HEAD(&ci->i_dirty_item);
467         INIT_LIST_HEAD(&ci->i_flushing_item);
468         ci->i_prealloc_cap_flush = NULL;
469         INIT_LIST_HEAD(&ci->i_cap_flush_list);
470         init_waitqueue_head(&ci->i_cap_wq);
471         ci->i_hold_caps_min = 0;
472         ci->i_hold_caps_max = 0;
473         INIT_LIST_HEAD(&ci->i_cap_delay_list);
474         INIT_LIST_HEAD(&ci->i_cap_snaps);
475         ci->i_head_snapc = NULL;
476         ci->i_snap_caps = 0;
477 
478         for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
479                 ci->i_nr_by_mode[i] = 0;
480 
481         mutex_init(&ci->i_truncate_mutex);
482         ci->i_truncate_seq = 0;
483         ci->i_truncate_size = 0;
484         ci->i_truncate_pending = 0;
485 
486         ci->i_max_size = 0;
487         ci->i_reported_size = 0;
488         ci->i_wanted_max_size = 0;
489         ci->i_requested_max_size = 0;
490 
491         ci->i_pin_ref = 0;
492         ci->i_rd_ref = 0;
493         ci->i_rdcache_ref = 0;
494         ci->i_wr_ref = 0;
495         ci->i_wb_ref = 0;
496         ci->i_wrbuffer_ref = 0;
497         ci->i_wrbuffer_ref_head = 0;
498         ci->i_shared_gen = 0;
499         ci->i_rdcache_gen = 0;
500         ci->i_rdcache_revoking = 0;
501 
502         INIT_LIST_HEAD(&ci->i_unsafe_writes);
503         INIT_LIST_HEAD(&ci->i_unsafe_dirops);
504         INIT_LIST_HEAD(&ci->i_unsafe_iops);
505         spin_lock_init(&ci->i_unsafe_lock);
506 
507         ci->i_snap_realm = NULL;
508         INIT_LIST_HEAD(&ci->i_snap_realm_item);
509         INIT_LIST_HEAD(&ci->i_snap_flush_item);
510 
511         INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
512         INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
513 
514         INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
515 
516         ceph_fscache_inode_init(ci);
517 
518         return &ci->vfs_inode;
519 }
520 
521 static void ceph_i_callback(struct rcu_head *head)
522 {
523         struct inode *inode = container_of(head, struct inode, i_rcu);
524         struct ceph_inode_info *ci = ceph_inode(inode);
525 
526         kmem_cache_free(ceph_inode_cachep, ci);
527 }
528 
529 void ceph_destroy_inode(struct inode *inode)
530 {
531         struct ceph_inode_info *ci = ceph_inode(inode);
532         struct ceph_inode_frag *frag;
533         struct rb_node *n;
534 
535         dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
536 
537         ceph_fscache_unregister_inode_cookie(ci);
538 
539         ceph_queue_caps_release(inode);
540 
541         /*
542          * we may still have a snap_realm reference if there are stray
543          * caps in i_snap_caps.
544          */
545         if (ci->i_snap_realm) {
546                 struct ceph_mds_client *mdsc =
547                         ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
548                 struct ceph_snap_realm *realm = ci->i_snap_realm;
549 
550                 dout(" dropping residual ref to snap realm %p\n", realm);
551                 spin_lock(&realm->inodes_with_caps_lock);
552                 list_del_init(&ci->i_snap_realm_item);
553                 spin_unlock(&realm->inodes_with_caps_lock);
554                 ceph_put_snap_realm(mdsc, realm);
555         }
556 
557         kfree(ci->i_symlink);
558         while ((n = rb_first(&ci->i_fragtree)) != NULL) {
559                 frag = rb_entry(n, struct ceph_inode_frag, node);
560                 rb_erase(n, &ci->i_fragtree);
561                 kfree(frag);
562         }
563         ci->i_fragtree_nsplits = 0;
564 
565         __ceph_destroy_xattrs(ci);
566         if (ci->i_xattrs.blob)
567                 ceph_buffer_put(ci->i_xattrs.blob);
568         if (ci->i_xattrs.prealloc_blob)
569                 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
570 
571         ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
572 
573         call_rcu(&inode->i_rcu, ceph_i_callback);
574 }
575 
576 int ceph_drop_inode(struct inode *inode)
577 {
578         /*
579          * Positve dentry and corresponding inode are always accompanied
580          * in MDS reply. So no need to keep inode in the cache after
581          * dropping all its aliases.
582          */
583         return 1;
584 }
585 
586 void ceph_evict_inode(struct inode *inode)
587 {
588         /* wait unsafe sync writes */
589         ceph_sync_write_wait(inode);
590         truncate_inode_pages_final(&inode->i_data);
591         clear_inode(inode);
592 }
593 
594 static inline blkcnt_t calc_inode_blocks(u64 size)
595 {
596         return (size + (1<<9) - 1) >> 9;
597 }
598 
599 /*
600  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
601  * careful because either the client or MDS may have more up to date
602  * info, depending on which capabilities are held, and whether
603  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
604  * and size are monotonically increasing, except when utimes() or
605  * truncate() increments the corresponding _seq values.)
606  */
607 int ceph_fill_file_size(struct inode *inode, int issued,
608                         u32 truncate_seq, u64 truncate_size, u64 size)
609 {
610         struct ceph_inode_info *ci = ceph_inode(inode);
611         int queue_trunc = 0;
612 
613         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
614             (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
615                 dout("size %lld -> %llu\n", inode->i_size, size);
616                 if (size > 0 && S_ISDIR(inode->i_mode)) {
617                         pr_err("fill_file_size non-zero size for directory\n");
618                         size = 0;
619                 }
620                 i_size_write(inode, size);
621                 inode->i_blocks = calc_inode_blocks(size);
622                 ci->i_reported_size = size;
623                 if (truncate_seq != ci->i_truncate_seq) {
624                         dout("truncate_seq %u -> %u\n",
625                              ci->i_truncate_seq, truncate_seq);
626                         ci->i_truncate_seq = truncate_seq;
627 
628                         /* the MDS should have revoked these caps */
629                         WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
630                                                CEPH_CAP_FILE_RD |
631                                                CEPH_CAP_FILE_WR |
632                                                CEPH_CAP_FILE_LAZYIO));
633                         /*
634                          * If we hold relevant caps, or in the case where we're
635                          * not the only client referencing this file and we
636                          * don't hold those caps, then we need to check whether
637                          * the file is either opened or mmaped
638                          */
639                         if ((issued & (CEPH_CAP_FILE_CACHE|
640                                        CEPH_CAP_FILE_BUFFER)) ||
641                             mapping_mapped(inode->i_mapping) ||
642                             __ceph_caps_file_wanted(ci)) {
643                                 ci->i_truncate_pending++;
644                                 queue_trunc = 1;
645                         }
646                 }
647         }
648         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
649             ci->i_truncate_size != truncate_size) {
650                 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
651                      truncate_size);
652                 ci->i_truncate_size = truncate_size;
653         }
654 
655         if (queue_trunc)
656                 ceph_fscache_invalidate(inode);
657 
658         return queue_trunc;
659 }
660 
661 void ceph_fill_file_time(struct inode *inode, int issued,
662                          u64 time_warp_seq, struct timespec *ctime,
663                          struct timespec *mtime, struct timespec *atime)
664 {
665         struct ceph_inode_info *ci = ceph_inode(inode);
666         int warn = 0;
667 
668         if (issued & (CEPH_CAP_FILE_EXCL|
669                       CEPH_CAP_FILE_WR|
670                       CEPH_CAP_FILE_BUFFER|
671                       CEPH_CAP_AUTH_EXCL|
672                       CEPH_CAP_XATTR_EXCL)) {
673                 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
674                         dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
675                              inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
676                              ctime->tv_sec, ctime->tv_nsec);
677                         inode->i_ctime = *ctime;
678                 }
679                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
680                         /* the MDS did a utimes() */
681                         dout("mtime %ld.%09ld -> %ld.%09ld "
682                              "tw %d -> %d\n",
683                              inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
684                              mtime->tv_sec, mtime->tv_nsec,
685                              ci->i_time_warp_seq, (int)time_warp_seq);
686 
687                         inode->i_mtime = *mtime;
688                         inode->i_atime = *atime;
689                         ci->i_time_warp_seq = time_warp_seq;
690                 } else if (time_warp_seq == ci->i_time_warp_seq) {
691                         /* nobody did utimes(); take the max */
692                         if (timespec_compare(mtime, &inode->i_mtime) > 0) {
693                                 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
694                                      inode->i_mtime.tv_sec,
695                                      inode->i_mtime.tv_nsec,
696                                      mtime->tv_sec, mtime->tv_nsec);
697                                 inode->i_mtime = *mtime;
698                         }
699                         if (timespec_compare(atime, &inode->i_atime) > 0) {
700                                 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
701                                      inode->i_atime.tv_sec,
702                                      inode->i_atime.tv_nsec,
703                                      atime->tv_sec, atime->tv_nsec);
704                                 inode->i_atime = *atime;
705                         }
706                 } else if (issued & CEPH_CAP_FILE_EXCL) {
707                         /* we did a utimes(); ignore mds values */
708                 } else {
709                         warn = 1;
710                 }
711         } else {
712                 /* we have no write|excl caps; whatever the MDS says is true */
713                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
714                         inode->i_ctime = *ctime;
715                         inode->i_mtime = *mtime;
716                         inode->i_atime = *atime;
717                         ci->i_time_warp_seq = time_warp_seq;
718                 } else {
719                         warn = 1;
720                 }
721         }
722         if (warn) /* time_warp_seq shouldn't go backwards */
723                 dout("%p mds time_warp_seq %llu < %u\n",
724                      inode, time_warp_seq, ci->i_time_warp_seq);
725 }
726 
727 /*
728  * Populate an inode based on info from mds.  May be called on new or
729  * existing inodes.
730  */
731 static int fill_inode(struct inode *inode, struct page *locked_page,
732                       struct ceph_mds_reply_info_in *iinfo,
733                       struct ceph_mds_reply_dirfrag *dirinfo,
734                       struct ceph_mds_session *session,
735                       unsigned long ttl_from, int cap_fmode,
736                       struct ceph_cap_reservation *caps_reservation)
737 {
738         struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
739         struct ceph_mds_reply_inode *info = iinfo->in;
740         struct ceph_inode_info *ci = ceph_inode(inode);
741         int issued = 0, implemented, new_issued;
742         struct timespec mtime, atime, ctime;
743         struct ceph_buffer *xattr_blob = NULL;
744         struct ceph_string *pool_ns = NULL;
745         struct ceph_cap *new_cap = NULL;
746         int err = 0;
747         bool wake = false;
748         bool queue_trunc = false;
749         bool new_version = false;
750         bool fill_inline = false;
751 
752         dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
753              inode, ceph_vinop(inode), le64_to_cpu(info->version),
754              ci->i_version);
755 
756         /* prealloc new cap struct */
757         if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
758                 new_cap = ceph_get_cap(mdsc, caps_reservation);
759 
760         /*
761          * prealloc xattr data, if it looks like we'll need it.  only
762          * if len > 4 (meaning there are actually xattrs; the first 4
763          * bytes are the xattr count).
764          */
765         if (iinfo->xattr_len > 4) {
766                 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
767                 if (!xattr_blob)
768                         pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
769                                iinfo->xattr_len);
770         }
771 
772         if (iinfo->pool_ns_len > 0)
773                 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
774                                                      iinfo->pool_ns_len);
775 
776         spin_lock(&ci->i_ceph_lock);
777 
778         /*
779          * provided version will be odd if inode value is projected,
780          * even if stable.  skip the update if we have newer stable
781          * info (ours>=theirs, e.g. due to racing mds replies), unless
782          * we are getting projected (unstable) info (in which case the
783          * version is odd, and we want ours>theirs).
784          *   us   them
785          *   2    2     skip
786          *   3    2     skip
787          *   3    3     update
788          */
789         if (ci->i_version == 0 ||
790             ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
791              le64_to_cpu(info->version) > (ci->i_version & ~1)))
792                 new_version = true;
793 
794         issued = __ceph_caps_issued(ci, &implemented);
795         issued |= implemented | __ceph_caps_dirty(ci);
796         new_issued = ~issued & le32_to_cpu(info->cap.caps);
797 
798         /* update inode */
799         ci->i_version = le64_to_cpu(info->version);
800         inode->i_version++;
801         inode->i_rdev = le32_to_cpu(info->rdev);
802         inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
803 
804         if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
805             (issued & CEPH_CAP_AUTH_EXCL) == 0) {
806                 inode->i_mode = le32_to_cpu(info->mode);
807                 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
808                 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
809                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
810                      from_kuid(&init_user_ns, inode->i_uid),
811                      from_kgid(&init_user_ns, inode->i_gid));
812         }
813 
814         if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
815             (issued & CEPH_CAP_LINK_EXCL) == 0)
816                 set_nlink(inode, le32_to_cpu(info->nlink));
817 
818         if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
819                 /* be careful with mtime, atime, size */
820                 ceph_decode_timespec(&atime, &info->atime);
821                 ceph_decode_timespec(&mtime, &info->mtime);
822                 ceph_decode_timespec(&ctime, &info->ctime);
823                 ceph_fill_file_time(inode, issued,
824                                 le32_to_cpu(info->time_warp_seq),
825                                 &ctime, &mtime, &atime);
826         }
827 
828         if (new_version ||
829             (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
830                 s64 old_pool = ci->i_layout.pool_id;
831                 struct ceph_string *old_ns;
832 
833                 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
834                 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
835                                         lockdep_is_held(&ci->i_ceph_lock));
836                 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
837 
838                 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
839                         ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
840 
841                 pool_ns = old_ns;
842 
843                 queue_trunc = ceph_fill_file_size(inode, issued,
844                                         le32_to_cpu(info->truncate_seq),
845                                         le64_to_cpu(info->truncate_size),
846                                         le64_to_cpu(info->size));
847                 /* only update max_size on auth cap */
848                 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
849                     ci->i_max_size != le64_to_cpu(info->max_size)) {
850                         dout("max_size %lld -> %llu\n", ci->i_max_size,
851                                         le64_to_cpu(info->max_size));
852                         ci->i_max_size = le64_to_cpu(info->max_size);
853                 }
854         }
855 
856         /* xattrs */
857         /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
858         if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
859             le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
860                 if (ci->i_xattrs.blob)
861                         ceph_buffer_put(ci->i_xattrs.blob);
862                 ci->i_xattrs.blob = xattr_blob;
863                 if (xattr_blob)
864                         memcpy(ci->i_xattrs.blob->vec.iov_base,
865                                iinfo->xattr_data, iinfo->xattr_len);
866                 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
867                 ceph_forget_all_cached_acls(inode);
868                 xattr_blob = NULL;
869         }
870 
871         inode->i_mapping->a_ops = &ceph_aops;
872 
873         switch (inode->i_mode & S_IFMT) {
874         case S_IFIFO:
875         case S_IFBLK:
876         case S_IFCHR:
877         case S_IFSOCK:
878                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
879                 inode->i_op = &ceph_file_iops;
880                 break;
881         case S_IFREG:
882                 inode->i_op = &ceph_file_iops;
883                 inode->i_fop = &ceph_file_fops;
884                 break;
885         case S_IFLNK:
886                 inode->i_op = &ceph_symlink_iops;
887                 if (!ci->i_symlink) {
888                         u32 symlen = iinfo->symlink_len;
889                         char *sym;
890 
891                         spin_unlock(&ci->i_ceph_lock);
892 
893                         if (symlen != i_size_read(inode)) {
894                                 pr_err("fill_inode %llx.%llx BAD symlink "
895                                         "size %lld\n", ceph_vinop(inode),
896                                         i_size_read(inode));
897                                 i_size_write(inode, symlen);
898                                 inode->i_blocks = calc_inode_blocks(symlen);
899                         }
900 
901                         err = -ENOMEM;
902                         sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
903                         if (!sym)
904                                 goto out;
905 
906                         spin_lock(&ci->i_ceph_lock);
907                         if (!ci->i_symlink)
908                                 ci->i_symlink = sym;
909                         else
910                                 kfree(sym); /* lost a race */
911                 }
912                 inode->i_link = ci->i_symlink;
913                 break;
914         case S_IFDIR:
915                 inode->i_op = &ceph_dir_iops;
916                 inode->i_fop = &ceph_dir_fops;
917 
918                 ci->i_dir_layout = iinfo->dir_layout;
919 
920                 ci->i_files = le64_to_cpu(info->files);
921                 ci->i_subdirs = le64_to_cpu(info->subdirs);
922                 ci->i_rbytes = le64_to_cpu(info->rbytes);
923                 ci->i_rfiles = le64_to_cpu(info->rfiles);
924                 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
925                 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
926                 break;
927         default:
928                 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
929                        ceph_vinop(inode), inode->i_mode);
930         }
931 
932         /* were we issued a capability? */
933         if (info->cap.caps) {
934                 if (ceph_snap(inode) == CEPH_NOSNAP) {
935                         unsigned caps = le32_to_cpu(info->cap.caps);
936                         ceph_add_cap(inode, session,
937                                      le64_to_cpu(info->cap.cap_id),
938                                      cap_fmode, caps,
939                                      le32_to_cpu(info->cap.wanted),
940                                      le32_to_cpu(info->cap.seq),
941                                      le32_to_cpu(info->cap.mseq),
942                                      le64_to_cpu(info->cap.realm),
943                                      info->cap.flags, &new_cap);
944 
945                         /* set dir completion flag? */
946                         if (S_ISDIR(inode->i_mode) &&
947                             ci->i_files == 0 && ci->i_subdirs == 0 &&
948                             (caps & CEPH_CAP_FILE_SHARED) &&
949                             (issued & CEPH_CAP_FILE_EXCL) == 0 &&
950                             !__ceph_dir_is_complete(ci)) {
951                                 dout(" marking %p complete (empty)\n", inode);
952                                 i_size_write(inode, 0);
953                                 __ceph_dir_set_complete(ci,
954                                         atomic64_read(&ci->i_release_count),
955                                         atomic64_read(&ci->i_ordered_count));
956                         }
957 
958                         wake = true;
959                 } else {
960                         dout(" %p got snap_caps %s\n", inode,
961                              ceph_cap_string(le32_to_cpu(info->cap.caps)));
962                         ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
963                         if (cap_fmode >= 0)
964                                 __ceph_get_fmode(ci, cap_fmode);
965                 }
966         } else if (cap_fmode >= 0) {
967                 pr_warn("mds issued no caps on %llx.%llx\n",
968                            ceph_vinop(inode));
969                 __ceph_get_fmode(ci, cap_fmode);
970         }
971 
972         if (iinfo->inline_version > 0 &&
973             iinfo->inline_version >= ci->i_inline_version) {
974                 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
975                 ci->i_inline_version = iinfo->inline_version;
976                 if (ci->i_inline_version != CEPH_INLINE_NONE &&
977                     (locked_page ||
978                      (le32_to_cpu(info->cap.caps) & cache_caps)))
979                         fill_inline = true;
980         }
981 
982         spin_unlock(&ci->i_ceph_lock);
983 
984         if (fill_inline)
985                 ceph_fill_inline_data(inode, locked_page,
986                                       iinfo->inline_data, iinfo->inline_len);
987 
988         if (wake)
989                 wake_up_all(&ci->i_cap_wq);
990 
991         /* queue truncate if we saw i_size decrease */
992         if (queue_trunc)
993                 ceph_queue_vmtruncate(inode);
994 
995         /* populate frag tree */
996         if (S_ISDIR(inode->i_mode))
997                 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
998 
999         /* update delegation info? */
1000         if (dirinfo)
1001                 ceph_fill_dirfrag(inode, dirinfo);
1002 
1003         err = 0;
1004 out:
1005         if (new_cap)
1006                 ceph_put_cap(mdsc, new_cap);
1007         if (xattr_blob)
1008                 ceph_buffer_put(xattr_blob);
1009         ceph_put_string(pool_ns);
1010         return err;
1011 }
1012 
1013 /*
1014  * caller should hold session s_mutex.
1015  */
1016 static void update_dentry_lease(struct dentry *dentry,
1017                                 struct ceph_mds_reply_lease *lease,
1018                                 struct ceph_mds_session *session,
1019                                 unsigned long from_time)
1020 {
1021         struct ceph_dentry_info *di = ceph_dentry(dentry);
1022         long unsigned duration = le32_to_cpu(lease->duration_ms);
1023         long unsigned ttl = from_time + (duration * HZ) / 1000;
1024         long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1025         struct inode *dir;
1026 
1027         spin_lock(&dentry->d_lock);
1028         dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1029              dentry, duration, ttl);
1030 
1031         /* make lease_rdcache_gen match directory */
1032         dir = d_inode(dentry->d_parent);
1033 
1034         /* only track leases on regular dentries */
1035         if (ceph_snap(dir) != CEPH_NOSNAP)
1036                 goto out_unlock;
1037 
1038         di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
1039 
1040         if (duration == 0)
1041                 goto out_unlock;
1042 
1043         if (di->lease_gen == session->s_cap_gen &&
1044             time_before(ttl, di->time))
1045                 goto out_unlock;  /* we already have a newer lease. */
1046 
1047         if (di->lease_session && di->lease_session != session)
1048                 goto out_unlock;
1049 
1050         ceph_dentry_lru_touch(dentry);
1051 
1052         if (!di->lease_session)
1053                 di->lease_session = ceph_get_mds_session(session);
1054         di->lease_gen = session->s_cap_gen;
1055         di->lease_seq = le32_to_cpu(lease->seq);
1056         di->lease_renew_after = half_ttl;
1057         di->lease_renew_from = 0;
1058         di->time = ttl;
1059 out_unlock:
1060         spin_unlock(&dentry->d_lock);
1061         return;
1062 }
1063 
1064 /*
1065  * splice a dentry to an inode.
1066  * caller must hold directory i_mutex for this to be safe.
1067  */
1068 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
1069 {
1070         struct dentry *realdn;
1071 
1072         BUG_ON(d_inode(dn));
1073 
1074         /* dn must be unhashed */
1075         if (!d_unhashed(dn))
1076                 d_drop(dn);
1077         realdn = d_splice_alias(in, dn);
1078         if (IS_ERR(realdn)) {
1079                 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1080                        PTR_ERR(realdn), dn, in, ceph_vinop(in));
1081                 dn = realdn; /* note realdn contains the error */
1082                 goto out;
1083         } else if (realdn) {
1084                 dout("dn %p (%d) spliced with %p (%d) "
1085                      "inode %p ino %llx.%llx\n",
1086                      dn, d_count(dn),
1087                      realdn, d_count(realdn),
1088                      d_inode(realdn), ceph_vinop(d_inode(realdn)));
1089                 dput(dn);
1090                 dn = realdn;
1091         } else {
1092                 BUG_ON(!ceph_dentry(dn));
1093                 dout("dn %p attached to %p ino %llx.%llx\n",
1094                      dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1095         }
1096 out:
1097         return dn;
1098 }
1099 
1100 /*
1101  * Incorporate results into the local cache.  This is either just
1102  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1103  * after a lookup).
1104  *
1105  * A reply may contain
1106  *         a directory inode along with a dentry.
1107  *  and/or a target inode
1108  *
1109  * Called with snap_rwsem (read).
1110  */
1111 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1112                     struct ceph_mds_session *session)
1113 {
1114         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1115         struct inode *in = NULL;
1116         struct ceph_vino vino;
1117         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1118         int err = 0;
1119 
1120         dout("fill_trace %p is_dentry %d is_target %d\n", req,
1121              rinfo->head->is_dentry, rinfo->head->is_target);
1122 
1123 #if 0
1124         /*
1125          * Debugging hook:
1126          *
1127          * If we resend completed ops to a recovering mds, we get no
1128          * trace.  Since that is very rare, pretend this is the case
1129          * to ensure the 'no trace' handlers in the callers behave.
1130          *
1131          * Fill in inodes unconditionally to avoid breaking cap
1132          * invariants.
1133          */
1134         if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1135                 pr_info("fill_trace faking empty trace on %lld %s\n",
1136                         req->r_tid, ceph_mds_op_name(rinfo->head->op));
1137                 if (rinfo->head->is_dentry) {
1138                         rinfo->head->is_dentry = 0;
1139                         err = fill_inode(req->r_locked_dir,
1140                                          &rinfo->diri, rinfo->dirfrag,
1141                                          session, req->r_request_started, -1);
1142                 }
1143                 if (rinfo->head->is_target) {
1144                         rinfo->head->is_target = 0;
1145                         ininfo = rinfo->targeti.in;
1146                         vino.ino = le64_to_cpu(ininfo->ino);
1147                         vino.snap = le64_to_cpu(ininfo->snapid);
1148                         in = ceph_get_inode(sb, vino);
1149                         err = fill_inode(in, &rinfo->targeti, NULL,
1150                                          session, req->r_request_started,
1151                                          req->r_fmode);
1152                         iput(in);
1153                 }
1154         }
1155 #endif
1156 
1157         if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1158                 dout("fill_trace reply is empty!\n");
1159                 if (rinfo->head->result == 0 && req->r_locked_dir)
1160                         ceph_invalidate_dir_request(req);
1161                 return 0;
1162         }
1163 
1164         if (rinfo->head->is_dentry) {
1165                 struct inode *dir = req->r_locked_dir;
1166 
1167                 if (dir) {
1168                         err = fill_inode(dir, NULL,
1169                                          &rinfo->diri, rinfo->dirfrag,
1170                                          session, req->r_request_started, -1,
1171                                          &req->r_caps_reservation);
1172                         if (err < 0)
1173                                 goto done;
1174                 } else {
1175                         WARN_ON_ONCE(1);
1176                 }
1177 
1178                 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1179                         struct qstr dname;
1180                         struct dentry *dn, *parent;
1181 
1182                         BUG_ON(!rinfo->head->is_target);
1183                         BUG_ON(req->r_dentry);
1184 
1185                         parent = d_find_any_alias(dir);
1186                         BUG_ON(!parent);
1187 
1188                         dname.name = rinfo->dname;
1189                         dname.len = rinfo->dname_len;
1190                         dname.hash = full_name_hash(parent, dname.name, dname.len);
1191                         vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1192                         vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1193 retry_lookup:
1194                         dn = d_lookup(parent, &dname);
1195                         dout("d_lookup on parent=%p name=%.*s got %p\n",
1196                              parent, dname.len, dname.name, dn);
1197 
1198                         if (!dn) {
1199                                 dn = d_alloc(parent, &dname);
1200                                 dout("d_alloc %p '%.*s' = %p\n", parent,
1201                                      dname.len, dname.name, dn);
1202                                 if (dn == NULL) {
1203                                         dput(parent);
1204                                         err = -ENOMEM;
1205                                         goto done;
1206                                 }
1207                                 err = 0;
1208                         } else if (d_really_is_positive(dn) &&
1209                                    (ceph_ino(d_inode(dn)) != vino.ino ||
1210                                     ceph_snap(d_inode(dn)) != vino.snap)) {
1211                                 dout(" dn %p points to wrong inode %p\n",
1212                                      dn, d_inode(dn));
1213                                 d_delete(dn);
1214                                 dput(dn);
1215                                 goto retry_lookup;
1216                         }
1217 
1218                         req->r_dentry = dn;
1219                         dput(parent);
1220                 }
1221         }
1222 
1223         if (rinfo->head->is_target) {
1224                 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1225                 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1226 
1227                 in = ceph_get_inode(sb, vino);
1228                 if (IS_ERR(in)) {
1229                         err = PTR_ERR(in);
1230                         goto done;
1231                 }
1232                 req->r_target_inode = in;
1233 
1234                 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1235                                 session, req->r_request_started,
1236                                 (!req->r_aborted && rinfo->head->result == 0) ?
1237                                 req->r_fmode : -1,
1238                                 &req->r_caps_reservation);
1239                 if (err < 0) {
1240                         pr_err("fill_inode badness %p %llx.%llx\n",
1241                                 in, ceph_vinop(in));
1242                         goto done;
1243                 }
1244         }
1245 
1246         /*
1247          * ignore null lease/binding on snapdir ENOENT, or else we
1248          * will have trouble splicing in the virtual snapdir later
1249          */
1250         if (rinfo->head->is_dentry && !req->r_aborted &&
1251             req->r_locked_dir &&
1252             (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1253                                                fsc->mount_options->snapdir_name,
1254                                                req->r_dentry->d_name.len))) {
1255                 /*
1256                  * lookup link rename   : null -> possibly existing inode
1257                  * mknod symlink mkdir  : null -> new inode
1258                  * unlink               : linked -> null
1259                  */
1260                 struct inode *dir = req->r_locked_dir;
1261                 struct dentry *dn = req->r_dentry;
1262                 bool have_dir_cap, have_lease;
1263 
1264                 BUG_ON(!dn);
1265                 BUG_ON(!dir);
1266                 BUG_ON(d_inode(dn->d_parent) != dir);
1267                 BUG_ON(ceph_ino(dir) !=
1268                        le64_to_cpu(rinfo->diri.in->ino));
1269                 BUG_ON(ceph_snap(dir) !=
1270                        le64_to_cpu(rinfo->diri.in->snapid));
1271 
1272                 /* do we have a lease on the whole dir? */
1273                 have_dir_cap =
1274                         (le32_to_cpu(rinfo->diri.in->cap.caps) &
1275                          CEPH_CAP_FILE_SHARED);
1276 
1277                 /* do we have a dn lease? */
1278                 have_lease = have_dir_cap ||
1279                         le32_to_cpu(rinfo->dlease->duration_ms);
1280                 if (!have_lease)
1281                         dout("fill_trace  no dentry lease or dir cap\n");
1282 
1283                 /* rename? */
1284                 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1285                         struct inode *olddir = req->r_old_dentry_dir;
1286                         BUG_ON(!olddir);
1287 
1288                         dout(" src %p '%pd' dst %p '%pd'\n",
1289                              req->r_old_dentry,
1290                              req->r_old_dentry,
1291                              dn, dn);
1292                         dout("fill_trace doing d_move %p -> %p\n",
1293                              req->r_old_dentry, dn);
1294 
1295                         /* d_move screws up sibling dentries' offsets */
1296                         ceph_dir_clear_ordered(dir);
1297                         ceph_dir_clear_ordered(olddir);
1298 
1299                         d_move(req->r_old_dentry, dn);
1300                         dout(" src %p '%pd' dst %p '%pd'\n",
1301                              req->r_old_dentry,
1302                              req->r_old_dentry,
1303                              dn, dn);
1304 
1305                         /* ensure target dentry is invalidated, despite
1306                            rehashing bug in vfs_rename_dir */
1307                         ceph_invalidate_dentry_lease(dn);
1308 
1309                         dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1310                              ceph_dentry(req->r_old_dentry)->offset);
1311 
1312                         dn = req->r_old_dentry;  /* use old_dentry */
1313                 }
1314 
1315                 /* null dentry? */
1316                 if (!rinfo->head->is_target) {
1317                         dout("fill_trace null dentry\n");
1318                         if (d_really_is_positive(dn)) {
1319                                 ceph_dir_clear_ordered(dir);
1320                                 dout("d_delete %p\n", dn);
1321                                 d_delete(dn);
1322                         } else {
1323                                 if (have_lease && d_unhashed(dn))
1324                                         d_add(dn, NULL);
1325                                 update_dentry_lease(dn, rinfo->dlease,
1326                                                     session,
1327                                                     req->r_request_started);
1328                         }
1329                         goto done;
1330                 }
1331 
1332                 /* attach proper inode */
1333                 if (d_really_is_negative(dn)) {
1334                         ceph_dir_clear_ordered(dir);
1335                         ihold(in);
1336                         dn = splice_dentry(dn, in);
1337                         if (IS_ERR(dn)) {
1338                                 err = PTR_ERR(dn);
1339                                 goto done;
1340                         }
1341                         req->r_dentry = dn;  /* may have spliced */
1342                 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1343                         dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1344                              dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1345                              ceph_vinop(in));
1346                         d_invalidate(dn);
1347                         have_lease = false;
1348                 }
1349 
1350                 if (have_lease)
1351                         update_dentry_lease(dn, rinfo->dlease, session,
1352                                             req->r_request_started);
1353                 dout(" final dn %p\n", dn);
1354         } else if (!req->r_aborted &&
1355                    (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1356                     req->r_op == CEPH_MDS_OP_MKSNAP)) {
1357                 struct dentry *dn = req->r_dentry;
1358                 struct inode *dir = req->r_locked_dir;
1359 
1360                 /* fill out a snapdir LOOKUPSNAP dentry */
1361                 BUG_ON(!dn);
1362                 BUG_ON(!dir);
1363                 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1364                 dout(" linking snapped dir %p to dn %p\n", in, dn);
1365                 ceph_dir_clear_ordered(dir);
1366                 ihold(in);
1367                 dn = splice_dentry(dn, in);
1368                 if (IS_ERR(dn)) {
1369                         err = PTR_ERR(dn);
1370                         goto done;
1371                 }
1372                 req->r_dentry = dn;  /* may have spliced */
1373         }
1374 done:
1375         dout("fill_trace done err=%d\n", err);
1376         return err;
1377 }
1378 
1379 /*
1380  * Prepopulate our cache with readdir results, leases, etc.
1381  */
1382 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1383                                            struct ceph_mds_session *session)
1384 {
1385         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1386         int i, err = 0;
1387 
1388         for (i = 0; i < rinfo->dir_nr; i++) {
1389                 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1390                 struct ceph_vino vino;
1391                 struct inode *in;
1392                 int rc;
1393 
1394                 vino.ino = le64_to_cpu(rde->inode.in->ino);
1395                 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1396 
1397                 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1398                 if (IS_ERR(in)) {
1399                         err = PTR_ERR(in);
1400                         dout("new_inode badness got %d\n", err);
1401                         continue;
1402                 }
1403                 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
1404                                 req->r_request_started, -1,
1405                                 &req->r_caps_reservation);
1406                 if (rc < 0) {
1407                         pr_err("fill_inode badness on %p got %d\n", in, rc);
1408                         err = rc;
1409                 }
1410                 iput(in);
1411         }
1412 
1413         return err;
1414 }
1415 
1416 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1417 {
1418         if (ctl->page) {
1419                 kunmap(ctl->page);
1420                 put_page(ctl->page);
1421                 ctl->page = NULL;
1422         }
1423 }
1424 
1425 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1426                               struct ceph_readdir_cache_control *ctl,
1427                               struct ceph_mds_request *req)
1428 {
1429         struct ceph_inode_info *ci = ceph_inode(dir);
1430         unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1431         unsigned idx = ctl->index % nsize;
1432         pgoff_t pgoff = ctl->index / nsize;
1433 
1434         if (!ctl->page || pgoff != page_index(ctl->page)) {
1435                 ceph_readdir_cache_release(ctl);
1436                 if (idx == 0)
1437                         ctl->page = grab_cache_page(&dir->i_data, pgoff);
1438                 else
1439                         ctl->page = find_lock_page(&dir->i_data, pgoff);
1440                 if (!ctl->page) {
1441                         ctl->index = -1;
1442                         return idx == 0 ? -ENOMEM : 0;
1443                 }
1444                 /* reading/filling the cache are serialized by
1445                  * i_mutex, no need to use page lock */
1446                 unlock_page(ctl->page);
1447                 ctl->dentries = kmap(ctl->page);
1448                 if (idx == 0)
1449                         memset(ctl->dentries, 0, PAGE_SIZE);
1450         }
1451 
1452         if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1453             req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1454                 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1455                 ctl->dentries[idx] = dn;
1456                 ctl->index++;
1457         } else {
1458                 dout("disable readdir cache\n");
1459                 ctl->index = -1;
1460         }
1461         return 0;
1462 }
1463 
1464 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1465                              struct ceph_mds_session *session)
1466 {
1467         struct dentry *parent = req->r_dentry;
1468         struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1469         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1470         struct qstr dname;
1471         struct dentry *dn;
1472         struct inode *in;
1473         int err = 0, skipped = 0, ret, i;
1474         struct inode *snapdir = NULL;
1475         struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1476         u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1477         u32 last_hash = 0;
1478         u32 fpos_offset;
1479         struct ceph_readdir_cache_control cache_ctl = {};
1480 
1481         if (req->r_aborted)
1482                 return readdir_prepopulate_inodes_only(req, session);
1483 
1484         if (rinfo->hash_order && req->r_path2) {
1485                 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1486                                           req->r_path2, strlen(req->r_path2));
1487                 last_hash = ceph_frag_value(last_hash);
1488         }
1489 
1490         if (rinfo->dir_dir &&
1491             le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1492                 dout("readdir_prepopulate got new frag %x -> %x\n",
1493                      frag, le32_to_cpu(rinfo->dir_dir->frag));
1494                 frag = le32_to_cpu(rinfo->dir_dir->frag);
1495                 if (!rinfo->hash_order)
1496                         req->r_readdir_offset = 2;
1497         }
1498 
1499         if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1500                 snapdir = ceph_get_snapdir(d_inode(parent));
1501                 parent = d_find_alias(snapdir);
1502                 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1503                      rinfo->dir_nr, parent);
1504         } else {
1505                 dout("readdir_prepopulate %d items under dn %p\n",
1506                      rinfo->dir_nr, parent);
1507                 if (rinfo->dir_dir)
1508                         ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1509         }
1510 
1511         if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
1512             !(rinfo->hash_order && req->r_path2)) {
1513                 /* note dir version at start of readdir so we can tell
1514                  * if any dentries get dropped */
1515                 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1516                 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1517                 req->r_readdir_cache_idx = 0;
1518         }
1519 
1520         cache_ctl.index = req->r_readdir_cache_idx;
1521         fpos_offset = req->r_readdir_offset;
1522 
1523         /* FIXME: release caps/leases if error occurs */
1524         for (i = 0; i < rinfo->dir_nr; i++) {
1525                 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1526                 struct ceph_vino vino;
1527 
1528                 dname.name = rde->name;
1529                 dname.len = rde->name_len;
1530                 dname.hash = full_name_hash(parent, dname.name, dname.len);
1531 
1532                 vino.ino = le64_to_cpu(rde->inode.in->ino);
1533                 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1534 
1535                 if (rinfo->hash_order) {
1536                         u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1537                                                  rde->name, rde->name_len);
1538                         hash = ceph_frag_value(hash);
1539                         if (hash != last_hash)
1540                                 fpos_offset = 2;
1541                         last_hash = hash;
1542                         rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1543                 } else {
1544                         rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1545                 }
1546 
1547 retry_lookup:
1548                 dn = d_lookup(parent, &dname);
1549                 dout("d_lookup on parent=%p name=%.*s got %p\n",
1550                      parent, dname.len, dname.name, dn);
1551 
1552                 if (!dn) {
1553                         dn = d_alloc(parent, &dname);
1554                         dout("d_alloc %p '%.*s' = %p\n", parent,
1555                              dname.len, dname.name, dn);
1556                         if (dn == NULL) {
1557                                 dout("d_alloc badness\n");
1558                                 err = -ENOMEM;
1559                                 goto out;
1560                         }
1561                 } else if (d_really_is_positive(dn) &&
1562                            (ceph_ino(d_inode(dn)) != vino.ino ||
1563                             ceph_snap(d_inode(dn)) != vino.snap)) {
1564                         dout(" dn %p points to wrong inode %p\n",
1565                              dn, d_inode(dn));
1566                         d_delete(dn);
1567                         dput(dn);
1568                         goto retry_lookup;
1569                 }
1570 
1571                 /* inode */
1572                 if (d_really_is_positive(dn)) {
1573                         in = d_inode(dn);
1574                 } else {
1575                         in = ceph_get_inode(parent->d_sb, vino);
1576                         if (IS_ERR(in)) {
1577                                 dout("new_inode badness\n");
1578                                 d_drop(dn);
1579                                 dput(dn);
1580                                 err = PTR_ERR(in);
1581                                 goto out;
1582                         }
1583                 }
1584 
1585                 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
1586                                  req->r_request_started, -1,
1587                                  &req->r_caps_reservation);
1588                 if (ret < 0) {
1589                         pr_err("fill_inode badness on %p\n", in);
1590                         if (d_really_is_negative(dn))
1591                                 iput(in);
1592                         d_drop(dn);
1593                         err = ret;
1594                         goto next_item;
1595                 }
1596 
1597                 if (d_really_is_negative(dn)) {
1598                         struct dentry *realdn;
1599 
1600                         if (ceph_security_xattr_deadlock(in)) {
1601                                 dout(" skip splicing dn %p to inode %p"
1602                                      " (security xattr deadlock)\n", dn, in);
1603                                 iput(in);
1604                                 skipped++;
1605                                 goto next_item;
1606                         }
1607 
1608                         realdn = splice_dentry(dn, in);
1609                         if (IS_ERR(realdn)) {
1610                                 err = PTR_ERR(realdn);
1611                                 d_drop(dn);
1612                                 dn = NULL;
1613                                 goto next_item;
1614                         }
1615                         dn = realdn;
1616                 }
1617 
1618                 ceph_dentry(dn)->offset = rde->offset;
1619 
1620                 update_dentry_lease(dn, rde->lease, req->r_session,
1621                                     req->r_request_started);
1622 
1623                 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1624                         ret = fill_readdir_cache(d_inode(parent), dn,
1625                                                  &cache_ctl, req);
1626                         if (ret < 0)
1627                                 err = ret;
1628                 }
1629 next_item:
1630                 if (dn)
1631                         dput(dn);
1632         }
1633 out:
1634         if (err == 0 && skipped == 0) {
1635                 req->r_did_prepopulate = true;
1636                 req->r_readdir_cache_idx = cache_ctl.index;
1637         }
1638         ceph_readdir_cache_release(&cache_ctl);
1639         if (snapdir) {
1640                 iput(snapdir);
1641                 dput(parent);
1642         }
1643         dout("readdir_prepopulate done\n");
1644         return err;
1645 }
1646 
1647 int ceph_inode_set_size(struct inode *inode, loff_t size)
1648 {
1649         struct ceph_inode_info *ci = ceph_inode(inode);
1650         int ret = 0;
1651 
1652         spin_lock(&ci->i_ceph_lock);
1653         dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1654         i_size_write(inode, size);
1655         inode->i_blocks = calc_inode_blocks(size);
1656 
1657         /* tell the MDS if we are approaching max_size */
1658         if ((size << 1) >= ci->i_max_size &&
1659             (ci->i_reported_size << 1) < ci->i_max_size)
1660                 ret = 1;
1661 
1662         spin_unlock(&ci->i_ceph_lock);
1663         return ret;
1664 }
1665 
1666 /*
1667  * Write back inode data in a worker thread.  (This can't be done
1668  * in the message handler context.)
1669  */
1670 void ceph_queue_writeback(struct inode *inode)
1671 {
1672         ihold(inode);
1673         if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1674                        &ceph_inode(inode)->i_wb_work)) {
1675                 dout("ceph_queue_writeback %p\n", inode);
1676         } else {
1677                 dout("ceph_queue_writeback %p failed\n", inode);
1678                 iput(inode);
1679         }
1680 }
1681 
1682 static void ceph_writeback_work(struct work_struct *work)
1683 {
1684         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1685                                                   i_wb_work);
1686         struct inode *inode = &ci->vfs_inode;
1687 
1688         dout("writeback %p\n", inode);
1689         filemap_fdatawrite(&inode->i_data);
1690         iput(inode);
1691 }
1692 
1693 /*
1694  * queue an async invalidation
1695  */
1696 void ceph_queue_invalidate(struct inode *inode)
1697 {
1698         ihold(inode);
1699         if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1700                        &ceph_inode(inode)->i_pg_inv_work)) {
1701                 dout("ceph_queue_invalidate %p\n", inode);
1702         } else {
1703                 dout("ceph_queue_invalidate %p failed\n", inode);
1704                 iput(inode);
1705         }
1706 }
1707 
1708 /*
1709  * Invalidate inode pages in a worker thread.  (This can't be done
1710  * in the message handler context.)
1711  */
1712 static void ceph_invalidate_work(struct work_struct *work)
1713 {
1714         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1715                                                   i_pg_inv_work);
1716         struct inode *inode = &ci->vfs_inode;
1717         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1718         u32 orig_gen;
1719         int check = 0;
1720 
1721         mutex_lock(&ci->i_truncate_mutex);
1722 
1723         if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1724                 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1725                                     inode, ceph_ino(inode));
1726                 mapping_set_error(inode->i_mapping, -EIO);
1727                 truncate_pagecache(inode, 0);
1728                 mutex_unlock(&ci->i_truncate_mutex);
1729                 goto out;
1730         }
1731 
1732         spin_lock(&ci->i_ceph_lock);
1733         dout("invalidate_pages %p gen %d revoking %d\n", inode,
1734              ci->i_rdcache_gen, ci->i_rdcache_revoking);
1735         if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1736                 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1737                         check = 1;
1738                 spin_unlock(&ci->i_ceph_lock);
1739                 mutex_unlock(&ci->i_truncate_mutex);
1740                 goto out;
1741         }
1742         orig_gen = ci->i_rdcache_gen;
1743         spin_unlock(&ci->i_ceph_lock);
1744 
1745         if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1746                 pr_err("invalidate_pages %p fails\n", inode);
1747         }
1748 
1749         spin_lock(&ci->i_ceph_lock);
1750         if (orig_gen == ci->i_rdcache_gen &&
1751             orig_gen == ci->i_rdcache_revoking) {
1752                 dout("invalidate_pages %p gen %d successful\n", inode,
1753                      ci->i_rdcache_gen);
1754                 ci->i_rdcache_revoking--;
1755                 check = 1;
1756         } else {
1757                 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1758                      inode, orig_gen, ci->i_rdcache_gen,
1759                      ci->i_rdcache_revoking);
1760                 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1761                         check = 1;
1762         }
1763         spin_unlock(&ci->i_ceph_lock);
1764         mutex_unlock(&ci->i_truncate_mutex);
1765 out:
1766         if (check)
1767                 ceph_check_caps(ci, 0, NULL);
1768         iput(inode);
1769 }
1770 
1771 
1772 /*
1773  * called by trunc_wq;
1774  *
1775  * We also truncate in a separate thread as well.
1776  */
1777 static void ceph_vmtruncate_work(struct work_struct *work)
1778 {
1779         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1780                                                   i_vmtruncate_work);
1781         struct inode *inode = &ci->vfs_inode;
1782 
1783         dout("vmtruncate_work %p\n", inode);
1784         __ceph_do_pending_vmtruncate(inode);
1785         iput(inode);
1786 }
1787 
1788 /*
1789  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1790  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1791  */
1792 void ceph_queue_vmtruncate(struct inode *inode)
1793 {
1794         struct ceph_inode_info *ci = ceph_inode(inode);
1795 
1796         ihold(inode);
1797 
1798         if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1799                        &ci->i_vmtruncate_work)) {
1800                 dout("ceph_queue_vmtruncate %p\n", inode);
1801         } else {
1802                 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1803                      inode, ci->i_truncate_pending);
1804                 iput(inode);
1805         }
1806 }
1807 
1808 /*
1809  * Make sure any pending truncation is applied before doing anything
1810  * that may depend on it.
1811  */
1812 void __ceph_do_pending_vmtruncate(struct inode *inode)
1813 {
1814         struct ceph_inode_info *ci = ceph_inode(inode);
1815         u64 to;
1816         int wrbuffer_refs, finish = 0;
1817 
1818         mutex_lock(&ci->i_truncate_mutex);
1819 retry:
1820         spin_lock(&ci->i_ceph_lock);
1821         if (ci->i_truncate_pending == 0) {
1822                 dout("__do_pending_vmtruncate %p none pending\n", inode);
1823                 spin_unlock(&ci->i_ceph_lock);
1824                 mutex_unlock(&ci->i_truncate_mutex);
1825                 return;
1826         }
1827 
1828         /*
1829          * make sure any dirty snapped pages are flushed before we
1830          * possibly truncate them.. so write AND block!
1831          */
1832         if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1833                 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1834                      inode);
1835                 spin_unlock(&ci->i_ceph_lock);
1836                 filemap_write_and_wait_range(&inode->i_data, 0,
1837                                              inode->i_sb->s_maxbytes);
1838                 goto retry;
1839         }
1840 
1841         /* there should be no reader or writer */
1842         WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1843 
1844         to = ci->i_truncate_size;
1845         wrbuffer_refs = ci->i_wrbuffer_ref;
1846         dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1847              ci->i_truncate_pending, to);
1848         spin_unlock(&ci->i_ceph_lock);
1849 
1850         truncate_pagecache(inode, to);
1851 
1852         spin_lock(&ci->i_ceph_lock);
1853         if (to == ci->i_truncate_size) {
1854                 ci->i_truncate_pending = 0;
1855                 finish = 1;
1856         }
1857         spin_unlock(&ci->i_ceph_lock);
1858         if (!finish)
1859                 goto retry;
1860 
1861         mutex_unlock(&ci->i_truncate_mutex);
1862 
1863         if (wrbuffer_refs == 0)
1864                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1865 
1866         wake_up_all(&ci->i_cap_wq);
1867 }
1868 
1869 /*
1870  * symlinks
1871  */
1872 static const struct inode_operations ceph_symlink_iops = {
1873         .get_link = simple_get_link,
1874         .setattr = ceph_setattr,
1875         .getattr = ceph_getattr,
1876         .listxattr = ceph_listxattr,
1877 };
1878 
1879 int __ceph_setattr(struct inode *inode, struct iattr *attr)
1880 {
1881         struct ceph_inode_info *ci = ceph_inode(inode);
1882         const unsigned int ia_valid = attr->ia_valid;
1883         struct ceph_mds_request *req;
1884         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1885         struct ceph_cap_flush *prealloc_cf;
1886         int issued;
1887         int release = 0, dirtied = 0;
1888         int mask = 0;
1889         int err = 0;
1890         int inode_dirty_flags = 0;
1891         bool lock_snap_rwsem = false;
1892 
1893         prealloc_cf = ceph_alloc_cap_flush();
1894         if (!prealloc_cf)
1895                 return -ENOMEM;
1896 
1897         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1898                                        USE_AUTH_MDS);
1899         if (IS_ERR(req)) {
1900                 ceph_free_cap_flush(prealloc_cf);
1901                 return PTR_ERR(req);
1902         }
1903 
1904         spin_lock(&ci->i_ceph_lock);
1905         issued = __ceph_caps_issued(ci, NULL);
1906 
1907         if (!ci->i_head_snapc &&
1908             (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1909                 lock_snap_rwsem = true;
1910                 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1911                         spin_unlock(&ci->i_ceph_lock);
1912                         down_read(&mdsc->snap_rwsem);
1913                         spin_lock(&ci->i_ceph_lock);
1914                         issued = __ceph_caps_issued(ci, NULL);
1915                 }
1916         }
1917 
1918         dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1919 
1920         if (ia_valid & ATTR_UID) {
1921                 dout("setattr %p uid %d -> %d\n", inode,
1922                      from_kuid(&init_user_ns, inode->i_uid),
1923                      from_kuid(&init_user_ns, attr->ia_uid));
1924                 if (issued & CEPH_CAP_AUTH_EXCL) {
1925                         inode->i_uid = attr->ia_uid;
1926                         dirtied |= CEPH_CAP_AUTH_EXCL;
1927                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1928                            !uid_eq(attr->ia_uid, inode->i_uid)) {
1929                         req->r_args.setattr.uid = cpu_to_le32(
1930                                 from_kuid(&init_user_ns, attr->ia_uid));
1931                         mask |= CEPH_SETATTR_UID;
1932                         release |= CEPH_CAP_AUTH_SHARED;
1933                 }
1934         }
1935         if (ia_valid & ATTR_GID) {
1936                 dout("setattr %p gid %d -> %d\n", inode,
1937                      from_kgid(&init_user_ns, inode->i_gid),
1938                      from_kgid(&init_user_ns, attr->ia_gid));
1939                 if (issued & CEPH_CAP_AUTH_EXCL) {
1940                         inode->i_gid = attr->ia_gid;
1941                         dirtied |= CEPH_CAP_AUTH_EXCL;
1942                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1943                            !gid_eq(attr->ia_gid, inode->i_gid)) {
1944                         req->r_args.setattr.gid = cpu_to_le32(
1945                                 from_kgid(&init_user_ns, attr->ia_gid));
1946                         mask |= CEPH_SETATTR_GID;
1947                         release |= CEPH_CAP_AUTH_SHARED;
1948                 }
1949         }
1950         if (ia_valid & ATTR_MODE) {
1951                 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1952                      attr->ia_mode);
1953                 if (issued & CEPH_CAP_AUTH_EXCL) {
1954                         inode->i_mode = attr->ia_mode;
1955                         dirtied |= CEPH_CAP_AUTH_EXCL;
1956                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1957                            attr->ia_mode != inode->i_mode) {
1958                         inode->i_mode = attr->ia_mode;
1959                         req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1960                         mask |= CEPH_SETATTR_MODE;
1961                         release |= CEPH_CAP_AUTH_SHARED;
1962                 }
1963         }
1964 
1965         if (ia_valid & ATTR_ATIME) {
1966                 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1967                      inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1968                      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1969                 if (issued & CEPH_CAP_FILE_EXCL) {
1970                         ci->i_time_warp_seq++;
1971                         inode->i_atime = attr->ia_atime;
1972                         dirtied |= CEPH_CAP_FILE_EXCL;
1973                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1974                            timespec_compare(&inode->i_atime,
1975                                             &attr->ia_atime) < 0) {
1976                         inode->i_atime = attr->ia_atime;
1977                         dirtied |= CEPH_CAP_FILE_WR;
1978                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1979                            !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1980                         ceph_encode_timespec(&req->r_args.setattr.atime,
1981                                              &attr->ia_atime);
1982                         mask |= CEPH_SETATTR_ATIME;
1983                         release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1984                                 CEPH_CAP_FILE_WR;
1985                 }
1986         }
1987         if (ia_valid & ATTR_MTIME) {
1988                 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1989                      inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1990                      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1991                 if (issued & CEPH_CAP_FILE_EXCL) {
1992                         ci->i_time_warp_seq++;
1993                         inode->i_mtime = attr->ia_mtime;
1994                         dirtied |= CEPH_CAP_FILE_EXCL;
1995                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1996                            timespec_compare(&inode->i_mtime,
1997                                             &attr->ia_mtime) < 0) {
1998                         inode->i_mtime = attr->ia_mtime;
1999                         dirtied |= CEPH_CAP_FILE_WR;
2000                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2001                            !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
2002                         ceph_encode_timespec(&req->r_args.setattr.mtime,
2003                                              &attr->ia_mtime);
2004                         mask |= CEPH_SETATTR_MTIME;
2005                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2006                                 CEPH_CAP_FILE_WR;
2007                 }
2008         }
2009         if (ia_valid & ATTR_SIZE) {
2010                 dout("setattr %p size %lld -> %lld\n", inode,
2011                      inode->i_size, attr->ia_size);
2012                 if ((issued & CEPH_CAP_FILE_EXCL) &&
2013                     attr->ia_size > inode->i_size) {
2014                         i_size_write(inode, attr->ia_size);
2015                         inode->i_blocks = calc_inode_blocks(attr->ia_size);
2016                         inode->i_ctime = attr->ia_ctime;
2017                         ci->i_reported_size = attr->ia_size;
2018                         dirtied |= CEPH_CAP_FILE_EXCL;
2019                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2020                            attr->ia_size != inode->i_size) {
2021                         req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2022                         req->r_args.setattr.old_size =
2023                                 cpu_to_le64(inode->i_size);
2024                         mask |= CEPH_SETATTR_SIZE;
2025                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2026                                 CEPH_CAP_FILE_WR;
2027                 }
2028         }
2029 
2030         /* these do nothing */
2031         if (ia_valid & ATTR_CTIME) {
2032                 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2033                                          ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2034                 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
2035                      inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2036                      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2037                      only ? "ctime only" : "ignored");
2038                 inode->i_ctime = attr->ia_ctime;
2039                 if (only) {
2040                         /*
2041                          * if kernel wants to dirty ctime but nothing else,
2042                          * we need to choose a cap to dirty under, or do
2043                          * a almost-no-op setattr
2044                          */
2045                         if (issued & CEPH_CAP_AUTH_EXCL)
2046                                 dirtied |= CEPH_CAP_AUTH_EXCL;
2047                         else if (issued & CEPH_CAP_FILE_EXCL)
2048                                 dirtied |= CEPH_CAP_FILE_EXCL;
2049                         else if (issued & CEPH_CAP_XATTR_EXCL)
2050                                 dirtied |= CEPH_CAP_XATTR_EXCL;
2051                         else
2052                                 mask |= CEPH_SETATTR_CTIME;
2053                 }
2054         }
2055         if (ia_valid & ATTR_FILE)
2056                 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2057 
2058         if (dirtied) {
2059                 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2060                                                            &prealloc_cf);
2061                 inode->i_ctime = current_time(inode);
2062         }
2063 
2064         release &= issued;
2065         spin_unlock(&ci->i_ceph_lock);
2066         if (lock_snap_rwsem)
2067                 up_read(&mdsc->snap_rwsem);
2068 
2069         if (inode_dirty_flags)
2070                 __mark_inode_dirty(inode, inode_dirty_flags);
2071 
2072 
2073         if (mask) {
2074                 req->r_inode = inode;
2075                 ihold(inode);
2076                 req->r_inode_drop = release;
2077                 req->r_args.setattr.mask = cpu_to_le32(mask);
2078                 req->r_num_caps = 1;
2079                 err = ceph_mdsc_do_request(mdsc, NULL, req);
2080         }
2081         dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2082              ceph_cap_string(dirtied), mask);
2083 
2084         ceph_mdsc_put_request(req);
2085         ceph_free_cap_flush(prealloc_cf);
2086 
2087         if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2088                 __ceph_do_pending_vmtruncate(inode);
2089 
2090         return err;
2091 }
2092 
2093 /*
2094  * setattr
2095  */
2096 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2097 {
2098         struct inode *inode = d_inode(dentry);
2099         int err;
2100 
2101         if (ceph_snap(inode) != CEPH_NOSNAP)
2102                 return -EROFS;
2103 
2104         err = setattr_prepare(dentry, attr);
2105         if (err != 0)
2106                 return err;
2107 
2108         err = __ceph_setattr(inode, attr);
2109 
2110         if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2111                 err = posix_acl_chmod(inode, attr->ia_mode);
2112 
2113         return err;
2114 }
2115 
2116 /*
2117  * Verify that we have a lease on the given mask.  If not,
2118  * do a getattr against an mds.
2119  */
2120 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2121                       int mask, bool force)
2122 {
2123         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2124         struct ceph_mds_client *mdsc = fsc->mdsc;
2125         struct ceph_mds_request *req;
2126         int err;
2127 
2128         if (ceph_snap(inode) == CEPH_SNAPDIR) {
2129                 dout("do_getattr inode %p SNAPDIR\n", inode);
2130                 return 0;
2131         }
2132 
2133         dout("do_getattr inode %p mask %s mode 0%o\n",
2134              inode, ceph_cap_string(mask), inode->i_mode);
2135         if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
2136                 return 0;
2137 
2138         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2139         if (IS_ERR(req))
2140                 return PTR_ERR(req);
2141         req->r_inode = inode;
2142         ihold(inode);
2143         req->r_num_caps = 1;
2144         req->r_args.getattr.mask = cpu_to_le32(mask);
2145         req->r_locked_page = locked_page;
2146         err = ceph_mdsc_do_request(mdsc, NULL, req);
2147         if (locked_page && err == 0) {
2148                 u64 inline_version = req->r_reply_info.targeti.inline_version;
2149                 if (inline_version == 0) {
2150                         /* the reply is supposed to contain inline data */
2151                         err = -EINVAL;
2152                 } else if (inline_version == CEPH_INLINE_NONE) {
2153                         err = -ENODATA;
2154                 } else {
2155                         err = req->r_reply_info.targeti.inline_len;
2156                 }
2157         }
2158         ceph_mdsc_put_request(req);
2159         dout("do_getattr result=%d\n", err);
2160         return err;
2161 }
2162 
2163 
2164 /*
2165  * Check inode permissions.  We verify we have a valid value for
2166  * the AUTH cap, then call the generic handler.
2167  */
2168 int ceph_permission(struct inode *inode, int mask)
2169 {
2170         int err;
2171 
2172         if (mask & MAY_NOT_BLOCK)
2173                 return -ECHILD;
2174 
2175         err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2176 
2177         if (!err)
2178                 err = generic_permission(inode, mask);
2179         return err;
2180 }
2181 
2182 /*
2183  * Get all attributes.  Hopefully somedata we'll have a statlite()
2184  * and can limit the fields we require to be accurate.
2185  */
2186 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2187                  struct kstat *stat)
2188 {
2189         struct inode *inode = d_inode(dentry);
2190         struct ceph_inode_info *ci = ceph_inode(inode);
2191         int err;
2192 
2193         err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
2194         if (!err) {
2195                 generic_fillattr(inode, stat);
2196                 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
2197                 if (ceph_snap(inode) != CEPH_NOSNAP)
2198                         stat->dev = ceph_snap(inode);
2199                 else
2200                         stat->dev = 0;
2201                 if (S_ISDIR(inode->i_mode)) {
2202                         if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2203                                                 RBYTES))
2204                                 stat->size = ci->i_rbytes;
2205                         else
2206                                 stat->size = ci->i_files + ci->i_subdirs;
2207                         stat->blocks = 0;
2208                         stat->blksize = 65536;
2209                 }
2210         }
2211         return err;
2212 }
2213 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp