~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/nfs/nfs4state.c

Version: ~ [ linux-5.1-rc5 ] ~ [ linux-5.0.7 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.34 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.111 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.168 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.178 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.138 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.65 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  fs/nfs/nfs4state.c
  3  *
  4  *  Client-side XDR for NFSv4.
  5  *
  6  *  Copyright (c) 2002 The Regents of the University of Michigan.
  7  *  All rights reserved.
  8  *
  9  *  Kendrick Smith <kmsmith@umich.edu>
 10  *
 11  *  Redistribution and use in source and binary forms, with or without
 12  *  modification, are permitted provided that the following conditions
 13  *  are met:
 14  *
 15  *  1. Redistributions of source code must retain the above copyright
 16  *     notice, this list of conditions and the following disclaimer.
 17  *  2. Redistributions in binary form must reproduce the above copyright
 18  *     notice, this list of conditions and the following disclaimer in the
 19  *     documentation and/or other materials provided with the distribution.
 20  *  3. Neither the name of the University nor the names of its
 21  *     contributors may be used to endorse or promote products derived
 22  *     from this software without specific prior written permission.
 23  *
 24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
 25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 35  *
 36  * Implementation of the NFSv4 state model.  For the time being,
 37  * this is minimal, but will be made much more complex in a
 38  * subsequent patch.
 39  */
 40 
 41 #include <linux/kernel.h>
 42 #include <linux/slab.h>
 43 #include <linux/fs.h>
 44 #include <linux/nfs_fs.h>
 45 #include <linux/kthread.h>
 46 #include <linux/module.h>
 47 #include <linux/random.h>
 48 #include <linux/ratelimit.h>
 49 #include <linux/workqueue.h>
 50 #include <linux/bitops.h>
 51 #include <linux/jiffies.h>
 52 
 53 #include <linux/sunrpc/clnt.h>
 54 
 55 #include "nfs4_fs.h"
 56 #include "callback.h"
 57 #include "delegation.h"
 58 #include "internal.h"
 59 #include "nfs4idmap.h"
 60 #include "nfs4session.h"
 61 #include "pnfs.h"
 62 #include "netns.h"
 63 
 64 #define NFSDBG_FACILITY         NFSDBG_STATE
 65 
 66 #define OPENOWNER_POOL_SIZE     8
 67 
 68 const nfs4_stateid zero_stateid = {
 69         { .data = { 0 } },
 70         .type = NFS4_SPECIAL_STATEID_TYPE,
 71 };
 72 const nfs4_stateid invalid_stateid = {
 73         {
 74                 /* Funky initialiser keeps older gcc versions happy */
 75                 .data = { 0xff, 0xff, 0xff, 0xff, 0 },
 76         },
 77         .type = NFS4_INVALID_STATEID_TYPE,
 78 };
 79 
 80 static DEFINE_MUTEX(nfs_clid_init_mutex);
 81 
 82 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 83 {
 84         struct nfs4_setclientid_res clid = {
 85                 .clientid = clp->cl_clientid,
 86                 .confirm = clp->cl_confirm,
 87         };
 88         unsigned short port;
 89         int status;
 90         struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 91 
 92         if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
 93                 goto do_confirm;
 94         port = nn->nfs_callback_tcpport;
 95         if (clp->cl_addr.ss_family == AF_INET6)
 96                 port = nn->nfs_callback_tcpport6;
 97 
 98         status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
 99         if (status != 0)
100                 goto out;
101         clp->cl_clientid = clid.clientid;
102         clp->cl_confirm = clid.confirm;
103         set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
104 do_confirm:
105         status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
106         if (status != 0)
107                 goto out;
108         clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
109         nfs4_schedule_state_renewal(clp);
110 out:
111         return status;
112 }
113 
114 /**
115  * nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
116  *
117  * @clp: nfs_client under test
118  * @result: OUT: found nfs_client, or clp
119  * @cred: credential to use for trunking test
120  *
121  * Returns zero, a negative errno, or a negative NFS4ERR status.
122  * If zero is returned, an nfs_client pointer is planted in
123  * "result".
124  *
125  * Note: The returned client may not yet be marked ready.
126  */
127 int nfs40_discover_server_trunking(struct nfs_client *clp,
128                                    struct nfs_client **result,
129                                    struct rpc_cred *cred)
130 {
131         struct nfs4_setclientid_res clid = {
132                 .clientid = clp->cl_clientid,
133                 .confirm = clp->cl_confirm,
134         };
135         struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
136         unsigned short port;
137         int status;
138 
139         port = nn->nfs_callback_tcpport;
140         if (clp->cl_addr.ss_family == AF_INET6)
141                 port = nn->nfs_callback_tcpport6;
142 
143         status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
144         if (status != 0)
145                 goto out;
146         clp->cl_clientid = clid.clientid;
147         clp->cl_confirm = clid.confirm;
148 
149         status = nfs40_walk_client_list(clp, result, cred);
150         if (status == 0) {
151                 /* Sustain the lease, even if it's empty.  If the clientid4
152                  * goes stale it's of no use for trunking discovery. */
153                 nfs4_schedule_state_renewal(*result);
154         }
155 out:
156         return status;
157 }
158 
159 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
160 {
161         struct rpc_cred *cred = NULL;
162 
163         if (clp->cl_machine_cred != NULL)
164                 cred = get_rpccred(clp->cl_machine_cred);
165         return cred;
166 }
167 
168 static void nfs4_root_machine_cred(struct nfs_client *clp)
169 {
170         struct rpc_cred *cred, *new;
171 
172         new = rpc_lookup_machine_cred(NULL);
173         spin_lock(&clp->cl_lock);
174         cred = clp->cl_machine_cred;
175         clp->cl_machine_cred = new;
176         spin_unlock(&clp->cl_lock);
177         if (cred != NULL)
178                 put_rpccred(cred);
179 }
180 
181 static struct rpc_cred *
182 nfs4_get_renew_cred_server_locked(struct nfs_server *server)
183 {
184         struct rpc_cred *cred = NULL;
185         struct nfs4_state_owner *sp;
186         struct rb_node *pos;
187 
188         for (pos = rb_first(&server->state_owners);
189              pos != NULL;
190              pos = rb_next(pos)) {
191                 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
192                 if (list_empty(&sp->so_states))
193                         continue;
194                 cred = get_rpccred(sp->so_cred);
195                 break;
196         }
197         return cred;
198 }
199 
200 /**
201  * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
202  * @clp: client state handle
203  *
204  * Returns an rpc_cred with reference count bumped, or NULL.
205  * Caller must hold clp->cl_lock.
206  */
207 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
208 {
209         struct rpc_cred *cred = NULL;
210         struct nfs_server *server;
211 
212         /* Use machine credentials if available */
213         cred = nfs4_get_machine_cred_locked(clp);
214         if (cred != NULL)
215                 goto out;
216 
217         rcu_read_lock();
218         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
219                 cred = nfs4_get_renew_cred_server_locked(server);
220                 if (cred != NULL)
221                         break;
222         }
223         rcu_read_unlock();
224 
225 out:
226         return cred;
227 }
228 
229 static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl)
230 {
231         if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
232                 spin_lock(&tbl->slot_tbl_lock);
233                 nfs41_wake_slot_table(tbl);
234                 spin_unlock(&tbl->slot_tbl_lock);
235         }
236 }
237 
238 static void nfs4_end_drain_session(struct nfs_client *clp)
239 {
240         struct nfs4_session *ses = clp->cl_session;
241 
242         if (clp->cl_slot_tbl) {
243                 nfs4_end_drain_slot_table(clp->cl_slot_tbl);
244                 return;
245         }
246 
247         if (ses != NULL) {
248                 nfs4_end_drain_slot_table(&ses->bc_slot_table);
249                 nfs4_end_drain_slot_table(&ses->fc_slot_table);
250         }
251 }
252 
253 static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
254 {
255         set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
256         spin_lock(&tbl->slot_tbl_lock);
257         if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
258                 reinit_completion(&tbl->complete);
259                 spin_unlock(&tbl->slot_tbl_lock);
260                 return wait_for_completion_interruptible(&tbl->complete);
261         }
262         spin_unlock(&tbl->slot_tbl_lock);
263         return 0;
264 }
265 
266 static int nfs4_begin_drain_session(struct nfs_client *clp)
267 {
268         struct nfs4_session *ses = clp->cl_session;
269         int ret = 0;
270 
271         if (clp->cl_slot_tbl)
272                 return nfs4_drain_slot_tbl(clp->cl_slot_tbl);
273 
274         /* back channel */
275         ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
276         if (ret)
277                 return ret;
278         /* fore channel */
279         return nfs4_drain_slot_tbl(&ses->fc_slot_table);
280 }
281 
282 #if defined(CONFIG_NFS_V4_1)
283 
284 static int nfs41_setup_state_renewal(struct nfs_client *clp)
285 {
286         int status;
287         struct nfs_fsinfo fsinfo;
288         unsigned long now;
289 
290         if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
291                 nfs4_schedule_state_renewal(clp);
292                 return 0;
293         }
294 
295         now = jiffies;
296         status = nfs4_proc_get_lease_time(clp, &fsinfo);
297         if (status == 0) {
298                 nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
299                 nfs4_schedule_state_renewal(clp);
300         }
301 
302         return status;
303 }
304 
305 static void nfs41_finish_session_reset(struct nfs_client *clp)
306 {
307         clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
308         clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
309         /* create_session negotiated new slot table */
310         clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
311         nfs41_setup_state_renewal(clp);
312 }
313 
314 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
315 {
316         int status;
317 
318         if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
319                 goto do_confirm;
320         status = nfs4_proc_exchange_id(clp, cred);
321         if (status != 0)
322                 goto out;
323         set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
324 do_confirm:
325         status = nfs4_proc_create_session(clp, cred);
326         if (status != 0)
327                 goto out;
328         nfs41_finish_session_reset(clp);
329         nfs_mark_client_ready(clp, NFS_CS_READY);
330 out:
331         return status;
332 }
333 
334 /**
335  * nfs41_discover_server_trunking - Detect server IP address trunking (mv1)
336  *
337  * @clp: nfs_client under test
338  * @result: OUT: found nfs_client, or clp
339  * @cred: credential to use for trunking test
340  *
341  * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
342  * If NFS4_OK is returned, an nfs_client pointer is planted in
343  * "result".
344  *
345  * Note: The returned client may not yet be marked ready.
346  */
347 int nfs41_discover_server_trunking(struct nfs_client *clp,
348                                    struct nfs_client **result,
349                                    struct rpc_cred *cred)
350 {
351         int status;
352 
353         status = nfs4_proc_exchange_id(clp, cred);
354         if (status != NFS4_OK)
355                 return status;
356 
357         status = nfs41_walk_client_list(clp, result, cred);
358         if (status < 0)
359                 return status;
360         if (clp != *result)
361                 return 0;
362 
363         /*
364          * Purge state if the client id was established in a prior
365          * instance and the client id could not have arrived on the
366          * server via Transparent State Migration.
367          */
368         if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) {
369                 if (!test_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags))
370                         set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
371                 else
372                         set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
373         }
374         nfs4_schedule_state_manager(clp);
375         status = nfs_wait_client_init_complete(clp);
376         if (status < 0)
377                 nfs_put_client(clp);
378         return status;
379 }
380 
381 #endif /* CONFIG_NFS_V4_1 */
382 
383 /**
384  * nfs4_get_clid_cred - Acquire credential for a setclientid operation
385  * @clp: client state handle
386  *
387  * Returns an rpc_cred with reference count bumped, or NULL.
388  */
389 struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp)
390 {
391         struct rpc_cred *cred;
392 
393         spin_lock(&clp->cl_lock);
394         cred = nfs4_get_machine_cred_locked(clp);
395         spin_unlock(&clp->cl_lock);
396         return cred;
397 }
398 
399 static struct nfs4_state_owner *
400 nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
401 {
402         struct rb_node **p = &server->state_owners.rb_node,
403                        *parent = NULL;
404         struct nfs4_state_owner *sp;
405 
406         while (*p != NULL) {
407                 parent = *p;
408                 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
409 
410                 if (cred < sp->so_cred)
411                         p = &parent->rb_left;
412                 else if (cred > sp->so_cred)
413                         p = &parent->rb_right;
414                 else {
415                         if (!list_empty(&sp->so_lru))
416                                 list_del_init(&sp->so_lru);
417                         atomic_inc(&sp->so_count);
418                         return sp;
419                 }
420         }
421         return NULL;
422 }
423 
424 static struct nfs4_state_owner *
425 nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
426 {
427         struct nfs_server *server = new->so_server;
428         struct rb_node **p = &server->state_owners.rb_node,
429                        *parent = NULL;
430         struct nfs4_state_owner *sp;
431         int err;
432 
433         while (*p != NULL) {
434                 parent = *p;
435                 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
436 
437                 if (new->so_cred < sp->so_cred)
438                         p = &parent->rb_left;
439                 else if (new->so_cred > sp->so_cred)
440                         p = &parent->rb_right;
441                 else {
442                         if (!list_empty(&sp->so_lru))
443                                 list_del_init(&sp->so_lru);
444                         atomic_inc(&sp->so_count);
445                         return sp;
446                 }
447         }
448         err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
449         if (err)
450                 return ERR_PTR(err);
451         rb_link_node(&new->so_server_node, parent, p);
452         rb_insert_color(&new->so_server_node, &server->state_owners);
453         return new;
454 }
455 
456 static void
457 nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
458 {
459         struct nfs_server *server = sp->so_server;
460 
461         if (!RB_EMPTY_NODE(&sp->so_server_node))
462                 rb_erase(&sp->so_server_node, &server->state_owners);
463         ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
464 }
465 
466 static void
467 nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
468 {
469         sc->create_time = ktime_get();
470         sc->flags = 0;
471         sc->counter = 0;
472         spin_lock_init(&sc->lock);
473         INIT_LIST_HEAD(&sc->list);
474         rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
475 }
476 
477 static void
478 nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
479 {
480         rpc_destroy_wait_queue(&sc->wait);
481 }
482 
483 /*
484  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
485  * create a new state_owner.
486  *
487  */
488 static struct nfs4_state_owner *
489 nfs4_alloc_state_owner(struct nfs_server *server,
490                 struct rpc_cred *cred,
491                 gfp_t gfp_flags)
492 {
493         struct nfs4_state_owner *sp;
494 
495         sp = kzalloc(sizeof(*sp), gfp_flags);
496         if (!sp)
497                 return NULL;
498         sp->so_server = server;
499         sp->so_cred = get_rpccred(cred);
500         spin_lock_init(&sp->so_lock);
501         INIT_LIST_HEAD(&sp->so_states);
502         nfs4_init_seqid_counter(&sp->so_seqid);
503         atomic_set(&sp->so_count, 1);
504         INIT_LIST_HEAD(&sp->so_lru);
505         seqcount_init(&sp->so_reclaim_seqcount);
506         mutex_init(&sp->so_delegreturn_mutex);
507         return sp;
508 }
509 
510 static void
511 nfs4_reset_state_owner(struct nfs4_state_owner *sp)
512 {
513         /* This state_owner is no longer usable, but must
514          * remain in place so that state recovery can find it
515          * and the opens associated with it.
516          * It may also be used for new 'open' request to
517          * return a delegation to the server.
518          * So update the 'create_time' so that it looks like
519          * a new state_owner.  This will cause the server to
520          * request an OPEN_CONFIRM to start a new sequence.
521          */
522         sp->so_seqid.create_time = ktime_get();
523 }
524 
525 static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
526 {
527         nfs4_destroy_seqid_counter(&sp->so_seqid);
528         put_rpccred(sp->so_cred);
529         kfree(sp);
530 }
531 
532 static void nfs4_gc_state_owners(struct nfs_server *server)
533 {
534         struct nfs_client *clp = server->nfs_client;
535         struct nfs4_state_owner *sp, *tmp;
536         unsigned long time_min, time_max;
537         LIST_HEAD(doomed);
538 
539         spin_lock(&clp->cl_lock);
540         time_max = jiffies;
541         time_min = (long)time_max - (long)clp->cl_lease_time;
542         list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
543                 /* NB: LRU is sorted so that oldest is at the head */
544                 if (time_in_range(sp->so_expires, time_min, time_max))
545                         break;
546                 list_move(&sp->so_lru, &doomed);
547                 nfs4_remove_state_owner_locked(sp);
548         }
549         spin_unlock(&clp->cl_lock);
550 
551         list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
552                 list_del(&sp->so_lru);
553                 nfs4_free_state_owner(sp);
554         }
555 }
556 
557 /**
558  * nfs4_get_state_owner - Look up a state owner given a credential
559  * @server: nfs_server to search
560  * @cred: RPC credential to match
561  *
562  * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
563  */
564 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
565                                               struct rpc_cred *cred,
566                                               gfp_t gfp_flags)
567 {
568         struct nfs_client *clp = server->nfs_client;
569         struct nfs4_state_owner *sp, *new;
570 
571         spin_lock(&clp->cl_lock);
572         sp = nfs4_find_state_owner_locked(server, cred);
573         spin_unlock(&clp->cl_lock);
574         if (sp != NULL)
575                 goto out;
576         new = nfs4_alloc_state_owner(server, cred, gfp_flags);
577         if (new == NULL)
578                 goto out;
579         do {
580                 if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
581                         break;
582                 spin_lock(&clp->cl_lock);
583                 sp = nfs4_insert_state_owner_locked(new);
584                 spin_unlock(&clp->cl_lock);
585         } while (sp == ERR_PTR(-EAGAIN));
586         if (sp != new)
587                 nfs4_free_state_owner(new);
588 out:
589         nfs4_gc_state_owners(server);
590         return sp;
591 }
592 
593 /**
594  * nfs4_put_state_owner - Release a nfs4_state_owner
595  * @sp: state owner data to release
596  *
597  * Note that we keep released state owners on an LRU
598  * list.
599  * This caches valid state owners so that they can be
600  * reused, to avoid the OPEN_CONFIRM on minor version 0.
601  * It also pins the uniquifier of dropped state owners for
602  * a while, to ensure that those state owner names are
603  * never reused.
604  */
605 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
606 {
607         struct nfs_server *server = sp->so_server;
608         struct nfs_client *clp = server->nfs_client;
609 
610         if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
611                 return;
612 
613         sp->so_expires = jiffies;
614         list_add_tail(&sp->so_lru, &server->state_owners_lru);
615         spin_unlock(&clp->cl_lock);
616 }
617 
618 /**
619  * nfs4_purge_state_owners - Release all cached state owners
620  * @server: nfs_server with cached state owners to release
621  *
622  * Called at umount time.  Remaining state owners will be on
623  * the LRU with ref count of zero.
624  */
625 void nfs4_purge_state_owners(struct nfs_server *server)
626 {
627         struct nfs_client *clp = server->nfs_client;
628         struct nfs4_state_owner *sp, *tmp;
629         LIST_HEAD(doomed);
630 
631         spin_lock(&clp->cl_lock);
632         list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
633                 list_move(&sp->so_lru, &doomed);
634                 nfs4_remove_state_owner_locked(sp);
635         }
636         spin_unlock(&clp->cl_lock);
637 
638         list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
639                 list_del(&sp->so_lru);
640                 nfs4_free_state_owner(sp);
641         }
642 }
643 
644 static struct nfs4_state *
645 nfs4_alloc_open_state(void)
646 {
647         struct nfs4_state *state;
648 
649         state = kzalloc(sizeof(*state), GFP_NOFS);
650         if (!state)
651                 return NULL;
652         atomic_set(&state->count, 1);
653         INIT_LIST_HEAD(&state->lock_states);
654         spin_lock_init(&state->state_lock);
655         seqlock_init(&state->seqlock);
656         init_waitqueue_head(&state->waitq);
657         return state;
658 }
659 
660 void
661 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
662 {
663         if (state->state == fmode)
664                 return;
665         /* NB! List reordering - see the reclaim code for why.  */
666         if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
667                 if (fmode & FMODE_WRITE)
668                         list_move(&state->open_states, &state->owner->so_states);
669                 else
670                         list_move_tail(&state->open_states, &state->owner->so_states);
671         }
672         state->state = fmode;
673 }
674 
675 static struct nfs4_state *
676 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
677 {
678         struct nfs_inode *nfsi = NFS_I(inode);
679         struct nfs4_state *state;
680 
681         list_for_each_entry(state, &nfsi->open_states, inode_states) {
682                 if (state->owner != owner)
683                         continue;
684                 if (!nfs4_valid_open_stateid(state))
685                         continue;
686                 if (atomic_inc_not_zero(&state->count))
687                         return state;
688         }
689         return NULL;
690 }
691 
692 static void
693 nfs4_free_open_state(struct nfs4_state *state)
694 {
695         kfree(state);
696 }
697 
698 struct nfs4_state *
699 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
700 {
701         struct nfs4_state *state, *new;
702         struct nfs_inode *nfsi = NFS_I(inode);
703 
704         spin_lock(&inode->i_lock);
705         state = __nfs4_find_state_byowner(inode, owner);
706         spin_unlock(&inode->i_lock);
707         if (state)
708                 goto out;
709         new = nfs4_alloc_open_state();
710         spin_lock(&owner->so_lock);
711         spin_lock(&inode->i_lock);
712         state = __nfs4_find_state_byowner(inode, owner);
713         if (state == NULL && new != NULL) {
714                 state = new;
715                 state->owner = owner;
716                 atomic_inc(&owner->so_count);
717                 list_add(&state->inode_states, &nfsi->open_states);
718                 ihold(inode);
719                 state->inode = inode;
720                 spin_unlock(&inode->i_lock);
721                 /* Note: The reclaim code dictates that we add stateless
722                  * and read-only stateids to the end of the list */
723                 list_add_tail(&state->open_states, &owner->so_states);
724                 spin_unlock(&owner->so_lock);
725         } else {
726                 spin_unlock(&inode->i_lock);
727                 spin_unlock(&owner->so_lock);
728                 if (new)
729                         nfs4_free_open_state(new);
730         }
731 out:
732         return state;
733 }
734 
735 void nfs4_put_open_state(struct nfs4_state *state)
736 {
737         struct inode *inode = state->inode;
738         struct nfs4_state_owner *owner = state->owner;
739 
740         if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
741                 return;
742         spin_lock(&inode->i_lock);
743         list_del(&state->inode_states);
744         list_del(&state->open_states);
745         spin_unlock(&inode->i_lock);
746         spin_unlock(&owner->so_lock);
747         iput(inode);
748         nfs4_free_open_state(state);
749         nfs4_put_state_owner(owner);
750 }
751 
752 /*
753  * Close the current file.
754  */
755 static void __nfs4_close(struct nfs4_state *state,
756                 fmode_t fmode, gfp_t gfp_mask, int wait)
757 {
758         struct nfs4_state_owner *owner = state->owner;
759         int call_close = 0;
760         fmode_t newstate;
761 
762         atomic_inc(&owner->so_count);
763         /* Protect against nfs4_find_state() */
764         spin_lock(&owner->so_lock);
765         switch (fmode & (FMODE_READ | FMODE_WRITE)) {
766                 case FMODE_READ:
767                         state->n_rdonly--;
768                         break;
769                 case FMODE_WRITE:
770                         state->n_wronly--;
771                         break;
772                 case FMODE_READ|FMODE_WRITE:
773                         state->n_rdwr--;
774         }
775         newstate = FMODE_READ|FMODE_WRITE;
776         if (state->n_rdwr == 0) {
777                 if (state->n_rdonly == 0) {
778                         newstate &= ~FMODE_READ;
779                         call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
780                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
781                 }
782                 if (state->n_wronly == 0) {
783                         newstate &= ~FMODE_WRITE;
784                         call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
785                         call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
786                 }
787                 if (newstate == 0)
788                         clear_bit(NFS_DELEGATED_STATE, &state->flags);
789         }
790         nfs4_state_set_mode_locked(state, newstate);
791         spin_unlock(&owner->so_lock);
792 
793         if (!call_close) {
794                 nfs4_put_open_state(state);
795                 nfs4_put_state_owner(owner);
796         } else
797                 nfs4_do_close(state, gfp_mask, wait);
798 }
799 
800 void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
801 {
802         __nfs4_close(state, fmode, GFP_NOFS, 0);
803 }
804 
805 void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
806 {
807         __nfs4_close(state, fmode, GFP_KERNEL, 1);
808 }
809 
810 /*
811  * Search the state->lock_states for an existing lock_owner
812  * that is compatible with either of the given owners.
813  * If the second is non-zero, then the first refers to a Posix-lock
814  * owner (current->files) and the second refers to a flock/OFD
815  * owner (struct file*).  In that case, prefer a match for the first
816  * owner.
817  * If both sorts of locks are held on the one file we cannot know
818  * which stateid was intended to be used, so a "correct" choice cannot
819  * be made.  Failing that, a "consistent" choice is preferable.  The
820  * consistent choice we make is to prefer the first owner, that of a
821  * Posix lock.
822  */
823 static struct nfs4_lock_state *
824 __nfs4_find_lock_state(struct nfs4_state *state,
825                        fl_owner_t fl_owner, fl_owner_t fl_owner2)
826 {
827         struct nfs4_lock_state *pos, *ret = NULL;
828         list_for_each_entry(pos, &state->lock_states, ls_locks) {
829                 if (pos->ls_owner == fl_owner) {
830                         ret = pos;
831                         break;
832                 }
833                 if (pos->ls_owner == fl_owner2)
834                         ret = pos;
835         }
836         if (ret)
837                 refcount_inc(&ret->ls_count);
838         return ret;
839 }
840 
841 /*
842  * Return a compatible lock_state. If no initialized lock_state structure
843  * exists, return an uninitialized one.
844  *
845  */
846 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
847 {
848         struct nfs4_lock_state *lsp;
849         struct nfs_server *server = state->owner->so_server;
850 
851         lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
852         if (lsp == NULL)
853                 return NULL;
854         nfs4_init_seqid_counter(&lsp->ls_seqid);
855         refcount_set(&lsp->ls_count, 1);
856         lsp->ls_state = state;
857         lsp->ls_owner = fl_owner;
858         lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
859         if (lsp->ls_seqid.owner_id < 0)
860                 goto out_free;
861         INIT_LIST_HEAD(&lsp->ls_locks);
862         return lsp;
863 out_free:
864         kfree(lsp);
865         return NULL;
866 }
867 
868 void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
869 {
870         ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
871         nfs4_destroy_seqid_counter(&lsp->ls_seqid);
872         kfree(lsp);
873 }
874 
875 /*
876  * Return a compatible lock_state. If no initialized lock_state structure
877  * exists, return an uninitialized one.
878  *
879  */
880 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
881 {
882         struct nfs4_lock_state *lsp, *new = NULL;
883         
884         for(;;) {
885                 spin_lock(&state->state_lock);
886                 lsp = __nfs4_find_lock_state(state, owner, NULL);
887                 if (lsp != NULL)
888                         break;
889                 if (new != NULL) {
890                         list_add(&new->ls_locks, &state->lock_states);
891                         set_bit(LK_STATE_IN_USE, &state->flags);
892                         lsp = new;
893                         new = NULL;
894                         break;
895                 }
896                 spin_unlock(&state->state_lock);
897                 new = nfs4_alloc_lock_state(state, owner);
898                 if (new == NULL)
899                         return NULL;
900         }
901         spin_unlock(&state->state_lock);
902         if (new != NULL)
903                 nfs4_free_lock_state(state->owner->so_server, new);
904         return lsp;
905 }
906 
907 /*
908  * Release reference to lock_state, and free it if we see that
909  * it is no longer in use
910  */
911 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
912 {
913         struct nfs_server *server;
914         struct nfs4_state *state;
915 
916         if (lsp == NULL)
917                 return;
918         state = lsp->ls_state;
919         if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
920                 return;
921         list_del(&lsp->ls_locks);
922         if (list_empty(&state->lock_states))
923                 clear_bit(LK_STATE_IN_USE, &state->flags);
924         spin_unlock(&state->state_lock);
925         server = state->owner->so_server;
926         if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
927                 struct nfs_client *clp = server->nfs_client;
928 
929                 clp->cl_mvops->free_lock_state(server, lsp);
930         } else
931                 nfs4_free_lock_state(server, lsp);
932 }
933 
934 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
935 {
936         struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
937 
938         dst->fl_u.nfs4_fl.owner = lsp;
939         refcount_inc(&lsp->ls_count);
940 }
941 
942 static void nfs4_fl_release_lock(struct file_lock *fl)
943 {
944         nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
945 }
946 
947 static const struct file_lock_operations nfs4_fl_lock_ops = {
948         .fl_copy_lock = nfs4_fl_copy_lock,
949         .fl_release_private = nfs4_fl_release_lock,
950 };
951 
952 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
953 {
954         struct nfs4_lock_state *lsp;
955 
956         if (fl->fl_ops != NULL)
957                 return 0;
958         lsp = nfs4_get_lock_state(state, fl->fl_owner);
959         if (lsp == NULL)
960                 return -ENOMEM;
961         fl->fl_u.nfs4_fl.owner = lsp;
962         fl->fl_ops = &nfs4_fl_lock_ops;
963         return 0;
964 }
965 
966 static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
967                 struct nfs4_state *state,
968                 const struct nfs_lock_context *l_ctx)
969 {
970         struct nfs4_lock_state *lsp;
971         fl_owner_t fl_owner, fl_flock_owner;
972         int ret = -ENOENT;
973 
974         if (l_ctx == NULL)
975                 goto out;
976 
977         if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
978                 goto out;
979 
980         fl_owner = l_ctx->lockowner;
981         fl_flock_owner = l_ctx->open_context->flock_owner;
982 
983         spin_lock(&state->state_lock);
984         lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
985         if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
986                 ret = -EIO;
987         else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
988                 nfs4_stateid_copy(dst, &lsp->ls_stateid);
989                 ret = 0;
990         }
991         spin_unlock(&state->state_lock);
992         nfs4_put_lock_state(lsp);
993 out:
994         return ret;
995 }
996 
997 bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
998 {
999         bool ret;
1000         int seq;
1001 
1002         do {
1003                 ret = false;
1004                 seq = read_seqbegin(&state->seqlock);
1005                 if (nfs4_state_match_open_stateid_other(state, dst)) {
1006                         dst->seqid = state->open_stateid.seqid;
1007                         ret = true;
1008                 }
1009         } while (read_seqretry(&state->seqlock, seq));
1010         return ret;
1011 }
1012 
1013 bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
1014 {
1015         bool ret;
1016         const nfs4_stateid *src;
1017         int seq;
1018 
1019         do {
1020                 ret = false;
1021                 src = &zero_stateid;
1022                 seq = read_seqbegin(&state->seqlock);
1023                 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1024                         src = &state->open_stateid;
1025                         ret = true;
1026                 }
1027                 nfs4_stateid_copy(dst, src);
1028         } while (read_seqretry(&state->seqlock, seq));
1029         return ret;
1030 }
1031 
1032 /*
1033  * Byte-range lock aware utility to initialize the stateid of read/write
1034  * requests.
1035  */
1036 int nfs4_select_rw_stateid(struct nfs4_state *state,
1037                 fmode_t fmode, const struct nfs_lock_context *l_ctx,
1038                 nfs4_stateid *dst, struct rpc_cred **cred)
1039 {
1040         int ret;
1041 
1042         if (!nfs4_valid_open_stateid(state))
1043                 return -EIO;
1044         if (cred != NULL)
1045                 *cred = NULL;
1046         ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
1047         if (ret == -EIO)
1048                 /* A lost lock - don't even consider delegations */
1049                 goto out;
1050         /* returns true if delegation stateid found and copied */
1051         if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
1052                 ret = 0;
1053                 goto out;
1054         }
1055         if (ret != -ENOENT)
1056                 /* nfs4_copy_delegation_stateid() didn't over-write
1057                  * dst, so it still has the lock stateid which we now
1058                  * choose to use.
1059                  */
1060                 goto out;
1061         nfs4_copy_open_stateid(dst, state);
1062         ret = 0;
1063 out:
1064         if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
1065                 dst->seqid = 0;
1066         return ret;
1067 }
1068 
1069 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
1070 {
1071         struct nfs_seqid *new;
1072 
1073         new = kmalloc(sizeof(*new), gfp_mask);
1074         if (new == NULL)
1075                 return ERR_PTR(-ENOMEM);
1076         new->sequence = counter;
1077         INIT_LIST_HEAD(&new->list);
1078         new->task = NULL;
1079         return new;
1080 }
1081 
1082 void nfs_release_seqid(struct nfs_seqid *seqid)
1083 {
1084         struct nfs_seqid_counter *sequence;
1085 
1086         if (seqid == NULL || list_empty(&seqid->list))
1087                 return;
1088         sequence = seqid->sequence;
1089         spin_lock(&sequence->lock);
1090         list_del_init(&seqid->list);
1091         if (!list_empty(&sequence->list)) {
1092                 struct nfs_seqid *next;
1093 
1094                 next = list_first_entry(&sequence->list,
1095                                 struct nfs_seqid, list);
1096                 rpc_wake_up_queued_task(&sequence->wait, next->task);
1097         }
1098         spin_unlock(&sequence->lock);
1099 }
1100 
1101 void nfs_free_seqid(struct nfs_seqid *seqid)
1102 {
1103         nfs_release_seqid(seqid);
1104         kfree(seqid);
1105 }
1106 
1107 /*
1108  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
1109  * failed with a seqid incrementing error -
1110  * see comments nfs4.h:seqid_mutating_error()
1111  */
1112 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
1113 {
1114         switch (status) {
1115                 case 0:
1116                         break;
1117                 case -NFS4ERR_BAD_SEQID:
1118                         if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
1119                                 return;
1120                         pr_warn_ratelimited("NFS: v4 server returned a bad"
1121                                         " sequence-id error on an"
1122                                         " unconfirmed sequence %p!\n",
1123                                         seqid->sequence);
1124                 case -NFS4ERR_STALE_CLIENTID:
1125                 case -NFS4ERR_STALE_STATEID:
1126                 case -NFS4ERR_BAD_STATEID:
1127                 case -NFS4ERR_BADXDR:
1128                 case -NFS4ERR_RESOURCE:
1129                 case -NFS4ERR_NOFILEHANDLE:
1130                 case -NFS4ERR_MOVED:
1131                         /* Non-seqid mutating errors */
1132                         return;
1133         };
1134         /*
1135          * Note: no locking needed as we are guaranteed to be first
1136          * on the sequence list
1137          */
1138         seqid->sequence->counter++;
1139 }
1140 
1141 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
1142 {
1143         struct nfs4_state_owner *sp;
1144 
1145         if (seqid == NULL)
1146                 return;
1147 
1148         sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
1149         if (status == -NFS4ERR_BAD_SEQID)
1150                 nfs4_reset_state_owner(sp);
1151         if (!nfs4_has_session(sp->so_server->nfs_client))
1152                 nfs_increment_seqid(status, seqid);
1153 }
1154 
1155 /*
1156  * Increment the seqid if the LOCK/LOCKU succeeded, or
1157  * failed with a seqid incrementing error -
1158  * see comments nfs4.h:seqid_mutating_error()
1159  */
1160 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
1161 {
1162         if (seqid != NULL)
1163                 nfs_increment_seqid(status, seqid);
1164 }
1165 
1166 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
1167 {
1168         struct nfs_seqid_counter *sequence;
1169         int status = 0;
1170 
1171         if (seqid == NULL)
1172                 goto out;
1173         sequence = seqid->sequence;
1174         spin_lock(&sequence->lock);
1175         seqid->task = task;
1176         if (list_empty(&seqid->list))
1177                 list_add_tail(&seqid->list, &sequence->list);
1178         if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
1179                 goto unlock;
1180         rpc_sleep_on(&sequence->wait, task, NULL);
1181         status = -EAGAIN;
1182 unlock:
1183         spin_unlock(&sequence->lock);
1184 out:
1185         return status;
1186 }
1187 
1188 static int nfs4_run_state_manager(void *);
1189 
1190 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
1191 {
1192         smp_mb__before_atomic();
1193         clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
1194         smp_mb__after_atomic();
1195         wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
1196         rpc_wake_up(&clp->cl_rpcwaitq);
1197 }
1198 
1199 /*
1200  * Schedule the nfs_client asynchronous state management routine
1201  */
1202 void nfs4_schedule_state_manager(struct nfs_client *clp)
1203 {
1204         struct task_struct *task;
1205         char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1206 
1207         if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1208                 return;
1209         __module_get(THIS_MODULE);
1210         refcount_inc(&clp->cl_count);
1211 
1212         /* The rcu_read_lock() is not strictly necessary, as the state
1213          * manager is the only thread that ever changes the rpc_xprt
1214          * after it's initialized.  At this point, we're single threaded. */
1215         rcu_read_lock();
1216         snprintf(buf, sizeof(buf), "%s-manager",
1217                         rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
1218         rcu_read_unlock();
1219         task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
1220         if (IS_ERR(task)) {
1221                 printk(KERN_ERR "%s: kthread_run: %ld\n",
1222                         __func__, PTR_ERR(task));
1223                 nfs4_clear_state_manager_bit(clp);
1224                 nfs_put_client(clp);
1225                 module_put(THIS_MODULE);
1226         }
1227 }
1228 
1229 /*
1230  * Schedule a lease recovery attempt
1231  */
1232 void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1233 {
1234         if (!clp)
1235                 return;
1236         if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1237                 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1238         dprintk("%s: scheduling lease recovery for server %s\n", __func__,
1239                         clp->cl_hostname);
1240         nfs4_schedule_state_manager(clp);
1241 }
1242 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
1243 
1244 /**
1245  * nfs4_schedule_migration_recovery - trigger migration recovery
1246  *
1247  * @server: FSID that is migrating
1248  *
1249  * Returns zero if recovery has started, otherwise a negative NFS4ERR
1250  * value is returned.
1251  */
1252 int nfs4_schedule_migration_recovery(const struct nfs_server *server)
1253 {
1254         struct nfs_client *clp = server->nfs_client;
1255 
1256         if (server->fh_expire_type != NFS4_FH_PERSISTENT) {
1257                 pr_err("NFS: volatile file handles not supported (server %s)\n",
1258                                 clp->cl_hostname);
1259                 return -NFS4ERR_IO;
1260         }
1261 
1262         if (test_bit(NFS_MIG_FAILED, &server->mig_status))
1263                 return -NFS4ERR_IO;
1264 
1265         dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n",
1266                         __func__,
1267                         (unsigned long long)server->fsid.major,
1268                         (unsigned long long)server->fsid.minor,
1269                         clp->cl_hostname);
1270 
1271         set_bit(NFS_MIG_IN_TRANSITION,
1272                         &((struct nfs_server *)server)->mig_status);
1273         set_bit(NFS4CLNT_MOVED, &clp->cl_state);
1274 
1275         nfs4_schedule_state_manager(clp);
1276         return 0;
1277 }
1278 EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery);
1279 
1280 /**
1281  * nfs4_schedule_lease_moved_recovery - start lease-moved recovery
1282  *
1283  * @clp: server to check for moved leases
1284  *
1285  */
1286 void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp)
1287 {
1288         dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n",
1289                 __func__, clp->cl_clientid, clp->cl_hostname);
1290 
1291         set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state);
1292         nfs4_schedule_state_manager(clp);
1293 }
1294 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery);
1295 
1296 int nfs4_wait_clnt_recover(struct nfs_client *clp)
1297 {
1298         int res;
1299 
1300         might_sleep();
1301 
1302         refcount_inc(&clp->cl_count);
1303         res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
1304                                  nfs_wait_bit_killable, TASK_KILLABLE);
1305         if (res)
1306                 goto out;
1307         if (clp->cl_cons_state < 0)
1308                 res = clp->cl_cons_state;
1309 out:
1310         nfs_put_client(clp);
1311         return res;
1312 }
1313 
1314 int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1315 {
1316         unsigned int loop;
1317         int ret;
1318 
1319         for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1320                 ret = nfs4_wait_clnt_recover(clp);
1321                 if (ret != 0)
1322                         break;
1323                 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1324                     !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1325                         break;
1326                 nfs4_schedule_state_manager(clp);
1327                 ret = -EIO;
1328         }
1329         return ret;
1330 }
1331 
1332 /*
1333  * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
1334  * @clp: client to process
1335  *
1336  * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
1337  * resend of the SETCLIENTID and hence re-establish the
1338  * callback channel. Then return all existing delegations.
1339  */
1340 static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1341 {
1342         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1343         nfs_expire_all_delegations(clp);
1344         dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
1345                         clp->cl_hostname);
1346 }
1347 
1348 void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1349 {
1350         nfs40_handle_cb_pathdown(clp);
1351         nfs4_schedule_state_manager(clp);
1352 }
1353 
1354 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1355 {
1356 
1357         if (!nfs4_valid_open_stateid(state))
1358                 return 0;
1359         set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1360         /* Don't recover state that expired before the reboot */
1361         if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
1362                 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1363                 return 0;
1364         }
1365         set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
1366         set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1367         return 1;
1368 }
1369 
1370 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1371 {
1372         if (!nfs4_valid_open_stateid(state))
1373                 return 0;
1374         set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1375         clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1376         set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
1377         set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1378         return 1;
1379 }
1380 
1381 int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1382 {
1383         struct nfs_client *clp = server->nfs_client;
1384 
1385         if (!nfs4_state_mark_reclaim_nograce(clp, state))
1386                 return -EBADF;
1387         dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
1388                         clp->cl_hostname);
1389         nfs4_schedule_state_manager(clp);
1390         return 0;
1391 }
1392 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
1393 
1394 static struct nfs4_lock_state *
1395 nfs_state_find_lock_state_by_stateid(struct nfs4_state *state,
1396                 const nfs4_stateid *stateid)
1397 {
1398         struct nfs4_lock_state *pos;
1399 
1400         list_for_each_entry(pos, &state->lock_states, ls_locks) {
1401                 if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags))
1402                         continue;
1403                 if (nfs4_stateid_match_other(&pos->ls_stateid, stateid))
1404                         return pos;
1405         }
1406         return NULL;
1407 }
1408 
1409 static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state,
1410                 const nfs4_stateid *stateid)
1411 {
1412         bool found = false;
1413 
1414         if (test_bit(LK_STATE_IN_USE, &state->flags)) {
1415                 spin_lock(&state->state_lock);
1416                 if (nfs_state_find_lock_state_by_stateid(state, stateid))
1417                         found = true;
1418                 spin_unlock(&state->state_lock);
1419         }
1420         return found;
1421 }
1422 
1423 void nfs_inode_find_state_and_recover(struct inode *inode,
1424                 const nfs4_stateid *stateid)
1425 {
1426         struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1427         struct nfs_inode *nfsi = NFS_I(inode);
1428         struct nfs_open_context *ctx;
1429         struct nfs4_state *state;
1430         bool found = false;
1431 
1432         spin_lock(&inode->i_lock);
1433         list_for_each_entry(ctx, &nfsi->open_files, list) {
1434                 state = ctx->state;
1435                 if (state == NULL)
1436                         continue;
1437                 if (nfs4_stateid_match_other(&state->stateid, stateid) &&
1438                     nfs4_state_mark_reclaim_nograce(clp, state)) {
1439                         found = true;
1440                         continue;
1441                 }
1442                 if (nfs4_stateid_match_other(&state->open_stateid, stateid) &&
1443                     nfs4_state_mark_reclaim_nograce(clp, state)) {
1444                         found = true;
1445                         continue;
1446                 }
1447                 if (nfs_state_lock_state_matches_stateid(state, stateid) &&
1448                     nfs4_state_mark_reclaim_nograce(clp, state))
1449                         found = true;
1450         }
1451         spin_unlock(&inode->i_lock);
1452 
1453         nfs_inode_find_delegation_state_and_recover(inode, stateid);
1454         if (found)
1455                 nfs4_schedule_state_manager(clp);
1456 }
1457 
1458 static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
1459 {
1460         struct inode *inode = state->inode;
1461         struct nfs_inode *nfsi = NFS_I(inode);
1462         struct nfs_open_context *ctx;
1463 
1464         spin_lock(&inode->i_lock);
1465         list_for_each_entry(ctx, &nfsi->open_files, list) {
1466                 if (ctx->state != state)
1467                         continue;
1468                 set_bit(NFS_CONTEXT_BAD, &ctx->flags);
1469         }
1470         spin_unlock(&inode->i_lock);
1471 }
1472 
1473 static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
1474 {
1475         set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
1476         nfs4_state_mark_open_context_bad(state);
1477 }
1478 
1479 
1480 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1481 {
1482         struct inode *inode = state->inode;
1483         struct nfs_inode *nfsi = NFS_I(inode);
1484         struct file_lock *fl;
1485         struct nfs4_lock_state *lsp;
1486         int status = 0;
1487         struct file_lock_context *flctx = inode->i_flctx;
1488         struct list_head *list;
1489 
1490         if (flctx == NULL)
1491                 return 0;
1492 
1493         list = &flctx->flc_posix;
1494 
1495         /* Guard against delegation returns and new lock/unlock calls */
1496         down_write(&nfsi->rwsem);
1497         spin_lock(&flctx->flc_lock);
1498 restart:
1499         list_for_each_entry(fl, list, fl_list) {
1500                 if (nfs_file_open_context(fl->fl_file)->state != state)
1501                         continue;
1502                 spin_unlock(&flctx->flc_lock);
1503                 status = ops->recover_lock(state, fl);
1504                 switch (status) {
1505                 case 0:
1506                         break;
1507                 case -ESTALE:
1508                 case -NFS4ERR_ADMIN_REVOKED:
1509                 case -NFS4ERR_STALE_STATEID:
1510                 case -NFS4ERR_BAD_STATEID:
1511                 case -NFS4ERR_EXPIRED:
1512                 case -NFS4ERR_NO_GRACE:
1513                 case -NFS4ERR_STALE_CLIENTID:
1514                 case -NFS4ERR_BADSESSION:
1515                 case -NFS4ERR_BADSLOT:
1516                 case -NFS4ERR_BAD_HIGH_SLOT:
1517                 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1518                         goto out;
1519                 default:
1520                         pr_err("NFS: %s: unhandled error %d\n",
1521                                         __func__, status);
1522                 case -ENOMEM:
1523                 case -NFS4ERR_DENIED:
1524                 case -NFS4ERR_RECLAIM_BAD:
1525                 case -NFS4ERR_RECLAIM_CONFLICT:
1526                         lsp = fl->fl_u.nfs4_fl.owner;
1527                         if (lsp)
1528                                 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
1529                         status = 0;
1530                 }
1531                 spin_lock(&flctx->flc_lock);
1532         }
1533         if (list == &flctx->flc_posix) {
1534                 list = &flctx->flc_flock;
1535                 goto restart;
1536         }
1537         spin_unlock(&flctx->flc_lock);
1538 out:
1539         up_write(&nfsi->rwsem);
1540         return status;
1541 }
1542 
1543 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1544 {
1545         struct nfs4_state *state;
1546         struct nfs4_lock_state *lock;
1547         int status = 0;
1548 
1549         /* Note: we rely on the sp->so_states list being ordered 
1550          * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1551          * states first.
1552          * This is needed to ensure that the server won't give us any
1553          * read delegations that we have to return if, say, we are
1554          * recovering after a network partition or a reboot from a
1555          * server that doesn't support a grace period.
1556          */
1557         spin_lock(&sp->so_lock);
1558         raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
1559 restart:
1560         list_for_each_entry(state, &sp->so_states, open_states) {
1561                 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1562                         continue;
1563                 if (!nfs4_valid_open_stateid(state))
1564                         continue;
1565                 if (state->state == 0)
1566                         continue;
1567                 atomic_inc(&state->count);
1568                 spin_unlock(&sp->so_lock);
1569                 status = ops->recover_open(sp, state);
1570                 if (status >= 0) {
1571                         status = nfs4_reclaim_locks(state, ops);
1572                         if (status >= 0) {
1573                                 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
1574                                         spin_lock(&state->state_lock);
1575                                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
1576                                                 if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
1577                                                         pr_warn_ratelimited("NFS: "
1578                                                                             "%s: Lock reclaim "
1579                                                                             "failed!\n", __func__);
1580                                         }
1581                                         spin_unlock(&state->state_lock);
1582                                 }
1583                                 clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1584                                         &state->flags);
1585                                 nfs4_put_open_state(state);
1586                                 spin_lock(&sp->so_lock);
1587                                 goto restart;
1588                         }
1589                 }
1590                 switch (status) {
1591                         default:
1592                                 printk(KERN_ERR "NFS: %s: unhandled error %d\n",
1593                                         __func__, status);
1594                         case -ENOENT:
1595                         case -ENOMEM:
1596                         case -EACCES:
1597                         case -EROFS:
1598                         case -EIO:
1599                         case -ESTALE:
1600                                 /* Open state on this file cannot be recovered */
1601                                 nfs4_state_mark_recovery_failed(state, status);
1602                                 break;
1603                         case -EAGAIN:
1604                                 ssleep(1);
1605                         case -NFS4ERR_ADMIN_REVOKED:
1606                         case -NFS4ERR_STALE_STATEID:
1607                         case -NFS4ERR_OLD_STATEID:
1608                         case -NFS4ERR_BAD_STATEID:
1609                         case -NFS4ERR_RECLAIM_BAD:
1610                         case -NFS4ERR_RECLAIM_CONFLICT:
1611                                 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1612                                 break;
1613                         case -NFS4ERR_EXPIRED:
1614                         case -NFS4ERR_NO_GRACE:
1615                                 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1616                         case -NFS4ERR_STALE_CLIENTID:
1617                         case -NFS4ERR_BADSESSION:
1618                         case -NFS4ERR_BADSLOT:
1619                         case -NFS4ERR_BAD_HIGH_SLOT:
1620                         case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1621                                 goto out_err;
1622                 }
1623                 nfs4_put_open_state(state);
1624                 spin_lock(&sp->so_lock);
1625                 goto restart;
1626         }
1627         raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1628         spin_unlock(&sp->so_lock);
1629         return 0;
1630 out_err:
1631         nfs4_put_open_state(state);
1632         spin_lock(&sp->so_lock);
1633         raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1634         spin_unlock(&sp->so_lock);
1635         return status;
1636 }
1637 
1638 static void nfs4_clear_open_state(struct nfs4_state *state)
1639 {
1640         struct nfs4_lock_state *lock;
1641 
1642         clear_bit(NFS_DELEGATED_STATE, &state->flags);
1643         clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1644         clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1645         clear_bit(NFS_O_RDWR_STATE, &state->flags);
1646         spin_lock(&state->state_lock);
1647         list_for_each_entry(lock, &state->lock_states, ls_locks) {
1648                 lock->ls_seqid.flags = 0;
1649                 clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags);
1650         }
1651         spin_unlock(&state->state_lock);
1652 }
1653 
1654 static void nfs4_reset_seqids(struct nfs_server *server,
1655         int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1656 {
1657         struct nfs_client *clp = server->nfs_client;
1658         struct nfs4_state_owner *sp;
1659         struct rb_node *pos;
1660         struct nfs4_state *state;
1661 
1662         spin_lock(&clp->cl_lock);
1663         for (pos = rb_first(&server->state_owners);
1664              pos != NULL;
1665              pos = rb_next(pos)) {
1666                 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1667                 sp->so_seqid.flags = 0;
1668                 spin_lock(&sp->so_lock);
1669                 list_for_each_entry(state, &sp->so_states, open_states) {
1670                         if (mark_reclaim(clp, state))
1671                                 nfs4_clear_open_state(state);
1672                 }
1673                 spin_unlock(&sp->so_lock);
1674         }
1675         spin_unlock(&clp->cl_lock);
1676 }
1677 
1678 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
1679         int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1680 {
1681         struct nfs_server *server;
1682 
1683         rcu_read_lock();
1684         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1685                 nfs4_reset_seqids(server, mark_reclaim);
1686         rcu_read_unlock();
1687 }
1688 
1689 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1690 {
1691         /* Mark all delegations for reclaim */
1692         nfs_delegation_mark_reclaim(clp);
1693         nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1694 }
1695 
1696 static int nfs4_reclaim_complete(struct nfs_client *clp,
1697                                  const struct nfs4_state_recovery_ops *ops,
1698                                  struct rpc_cred *cred)
1699 {
1700         /* Notify the server we're done reclaiming our state */
1701         if (ops->reclaim_complete)
1702                 return ops->reclaim_complete(clp, cred);
1703         return 0;
1704 }
1705 
1706 static void nfs4_clear_reclaim_server(struct nfs_server *server)
1707 {
1708         struct nfs_client *clp = server->nfs_client;
1709         struct nfs4_state_owner *sp;
1710         struct rb_node *pos;
1711         struct nfs4_state *state;
1712 
1713         spin_lock(&clp->cl_lock);
1714         for (pos = rb_first(&server->state_owners);
1715              pos != NULL;
1716              pos = rb_next(pos)) {
1717                 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1718                 spin_lock(&sp->so_lock);
1719                 list_for_each_entry(state, &sp->so_states, open_states) {
1720                         if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
1721                                                 &state->flags))
1722                                 continue;
1723                         nfs4_state_mark_reclaim_nograce(clp, state);
1724                 }
1725                 spin_unlock(&sp->so_lock);
1726         }
1727         spin_unlock(&clp->cl_lock);
1728 }
1729 
1730 static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1731 {
1732         struct nfs_server *server;
1733 
1734         if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1735                 return 0;
1736 
1737         rcu_read_lock();
1738         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1739                 nfs4_clear_reclaim_server(server);
1740         rcu_read_unlock();
1741 
1742         nfs_delegation_reap_unclaimed(clp);
1743         return 1;
1744 }
1745 
1746 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1747 {
1748         const struct nfs4_state_recovery_ops *ops;
1749         struct rpc_cred *cred;
1750         int err;
1751 
1752         if (!nfs4_state_clear_reclaim_reboot(clp))
1753                 return;
1754         ops = clp->cl_mvops->reboot_recovery_ops;
1755         cred = nfs4_get_clid_cred(clp);
1756         err = nfs4_reclaim_complete(clp, ops, cred);
1757         put_rpccred(cred);
1758         if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
1759                 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1760 }
1761 
1762 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1763 {
1764         nfs_mark_test_expired_all_delegations(clp);
1765         nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1766 }
1767 
1768 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1769 {
1770         switch (error) {
1771                 case 0:
1772                         break;
1773                 case -NFS4ERR_CB_PATH_DOWN:
1774                         nfs40_handle_cb_pathdown(clp);
1775                         break;
1776                 case -NFS4ERR_NO_GRACE:
1777                         nfs4_state_end_reclaim_reboot(clp);
1778                         break;
1779                 case -NFS4ERR_STALE_CLIENTID:
1780                         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1781                         nfs4_state_start_reclaim_reboot(clp);
1782                         break;
1783                 case -NFS4ERR_EXPIRED:
1784                         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1785                         nfs4_state_start_reclaim_nograce(clp);
1786                         break;
1787                 case -NFS4ERR_BADSESSION:
1788                 case -NFS4ERR_BADSLOT:
1789                 case -NFS4ERR_BAD_HIGH_SLOT:
1790                 case -NFS4ERR_DEADSESSION:
1791                 case -NFS4ERR_SEQ_FALSE_RETRY:
1792                 case -NFS4ERR_SEQ_MISORDERED:
1793                         set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1794                         /* Zero session reset errors */
1795                         break;
1796                 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1797                         set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1798                         break;
1799                 default:
1800                         dprintk("%s: failed to handle error %d for server %s\n",
1801                                         __func__, error, clp->cl_hostname);
1802                         return error;
1803         }
1804         dprintk("%s: handled error %d for server %s\n", __func__, error,
1805                         clp->cl_hostname);
1806         return 0;
1807 }
1808 
1809 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1810 {
1811         struct nfs4_state_owner *sp;
1812         struct nfs_server *server;
1813         struct rb_node *pos;
1814         int status = 0;
1815 
1816 restart:
1817         rcu_read_lock();
1818         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1819                 nfs4_purge_state_owners(server);
1820                 spin_lock(&clp->cl_lock);
1821                 for (pos = rb_first(&server->state_owners);
1822                      pos != NULL;
1823                      pos = rb_next(pos)) {
1824                         sp = rb_entry(pos,
1825                                 struct nfs4_state_owner, so_server_node);
1826                         if (!test_and_clear_bit(ops->owner_flag_bit,
1827                                                         &sp->so_flags))
1828                                 continue;
1829                         if (!atomic_inc_not_zero(&sp->so_count))
1830                                 continue;
1831                         spin_unlock(&clp->cl_lock);
1832                         rcu_read_unlock();
1833 
1834                         status = nfs4_reclaim_open_state(sp, ops);
1835                         if (status < 0) {
1836                                 set_bit(ops->owner_flag_bit, &sp->so_flags);
1837                                 nfs4_put_state_owner(sp);
1838                                 status = nfs4_recovery_handle_error(clp, status);
1839                                 return (status != 0) ? status : -EAGAIN;
1840                         }
1841 
1842                         nfs4_put_state_owner(sp);
1843                         goto restart;
1844                 }
1845                 spin_unlock(&clp->cl_lock);
1846         }
1847         rcu_read_unlock();
1848         return 0;
1849 }
1850 
1851 static int nfs4_check_lease(struct nfs_client *clp)
1852 {
1853         struct rpc_cred *cred;
1854         const struct nfs4_state_maintenance_ops *ops =
1855                 clp->cl_mvops->state_renewal_ops;
1856         int status;
1857 
1858         /* Is the client already known to have an expired lease? */
1859         if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1860                 return 0;
1861         spin_lock(&clp->cl_lock);
1862         cred = ops->get_state_renewal_cred_locked(clp);
1863         spin_unlock(&clp->cl_lock);
1864         if (cred == NULL) {
1865                 cred = nfs4_get_clid_cred(clp);
1866                 status = -ENOKEY;
1867                 if (cred == NULL)
1868                         goto out;
1869         }
1870         status = ops->renew_lease(clp, cred);
1871         put_rpccred(cred);
1872         if (status == -ETIMEDOUT) {
1873                 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1874                 return 0;
1875         }
1876 out:
1877         return nfs4_recovery_handle_error(clp, status);
1878 }
1879 
1880 /* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors
1881  * and for recoverable errors on EXCHANGE_ID for v4.1
1882  */
1883 static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
1884 {
1885         switch (status) {
1886         case -NFS4ERR_SEQ_MISORDERED:
1887                 if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
1888                         return -ESERVERFAULT;
1889                 /* Lease confirmation error: retry after purging the lease */
1890                 ssleep(1);
1891                 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1892                 break;
1893         case -NFS4ERR_STALE_CLIENTID:
1894                 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1895                 nfs4_state_start_reclaim_reboot(clp);
1896                 break;
1897         case -NFS4ERR_CLID_INUSE:
1898                 pr_err("NFS: Server %s reports our clientid is in use\n",
1899                         clp->cl_hostname);
1900                 nfs_mark_client_ready(clp, -EPERM);
1901                 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1902                 return -EPERM;
1903         case -EACCES:
1904         case -NFS4ERR_DELAY:
1905         case -ETIMEDOUT:
1906         case -EAGAIN:
1907                 ssleep(1);
1908                 break;
1909 
1910         case -NFS4ERR_MINOR_VERS_MISMATCH:
1911                 if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
1912                         nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
1913                 dprintk("%s: exit with error %d for server %s\n",
1914                                 __func__, -EPROTONOSUPPORT, clp->cl_hostname);
1915                 return -EPROTONOSUPPORT;
1916         case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1917                                  * in nfs4_exchange_id */
1918         default:
1919                 dprintk("%s: exit with error %d for server %s\n", __func__,
1920                                 status, clp->cl_hostname);
1921                 return status;
1922         }
1923         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1924         dprintk("%s: handled error %d for server %s\n", __func__, status,
1925                         clp->cl_hostname);
1926         return 0;
1927 }
1928 
1929 static int nfs4_establish_lease(struct nfs_client *clp)
1930 {
1931         struct rpc_cred *cred;
1932         const struct nfs4_state_recovery_ops *ops =
1933                 clp->cl_mvops->reboot_recovery_ops;
1934         int status;
1935 
1936         nfs4_begin_drain_session(clp);
1937         cred = nfs4_get_clid_cred(clp);
1938         if (cred == NULL)
1939                 return -ENOENT;
1940         status = ops->establish_clid(clp, cred);
1941         put_rpccred(cred);
1942         if (status != 0)
1943                 return status;
1944         pnfs_destroy_all_layouts(clp);
1945         return 0;
1946 }
1947 
1948 /*
1949  * Returns zero or a negative errno.  NFS4ERR values are converted
1950  * to local errno values.
1951  */
1952 static int nfs4_reclaim_lease(struct nfs_client *clp)
1953 {
1954         int status;
1955 
1956         status = nfs4_establish_lease(clp);
1957         if (status < 0)
1958                 return nfs4_handle_reclaim_lease_error(clp, status);
1959         if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state))
1960                 nfs4_state_start_reclaim_nograce(clp);
1961         if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1962                 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1963         clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1964         clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1965         return 0;
1966 }
1967 
1968 static int nfs4_purge_lease(struct nfs_client *clp)
1969 {
1970         int status;
1971 
1972         status = nfs4_establish_lease(clp);
1973         if (status < 0)
1974                 return nfs4_handle_reclaim_lease_error(clp, status);
1975         clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
1976         set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1977         nfs4_state_start_reclaim_nograce(clp);
1978         return 0;
1979 }
1980 
1981 /*
1982  * Try remote migration of one FSID from a source server to a
1983  * destination server.  The source server provides a list of
1984  * potential destinations.
1985  *
1986  * Returns zero or a negative NFS4ERR status code.
1987  */
1988 static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
1989 {
1990         struct nfs_client *clp = server->nfs_client;
1991         struct nfs4_fs_locations *locations = NULL;
1992         struct inode *inode;
1993         struct page *page;
1994         int status, result;
1995 
1996         dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__,
1997                         (unsigned long long)server->fsid.major,
1998                         (unsigned long long)server->fsid.minor,
1999                         clp->cl_hostname);
2000 
2001         result = 0;
2002         page = alloc_page(GFP_KERNEL);
2003         locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2004         if (page == NULL || locations == NULL) {
2005                 dprintk("<-- %s: no memory\n", __func__);
2006                 goto out;
2007         }
2008 
2009         inode = d_inode(server->super->s_root);
2010         result = nfs4_proc_get_locations(inode, locations, page, cred);
2011         if (result) {
2012                 dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
2013                         __func__, result);
2014                 goto out;
2015         }
2016 
2017         result = -NFS4ERR_NXIO;
2018         if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
2019                 dprintk("<-- %s: No fs_locations data, migration skipped\n",
2020                         __func__);
2021                 goto out;
2022         }
2023 
2024         nfs4_begin_drain_session(clp);
2025 
2026         status = nfs4_replace_transport(server, locations);
2027         if (status != 0) {
2028                 dprintk("<-- %s: failed to replace transport: %d\n",
2029                         __func__, status);
2030                 goto out;
2031         }
2032 
2033         result = 0;
2034         dprintk("<-- %s: migration succeeded\n", __func__);
2035 
2036 out:
2037         if (page != NULL)
2038                 __free_page(page);
2039         kfree(locations);
2040         if (result) {
2041                 pr_err("NFS: migration recovery failed (server %s)\n",
2042                                 clp->cl_hostname);
2043                 set_bit(NFS_MIG_FAILED, &server->mig_status);
2044         }
2045         return result;
2046 }
2047 
2048 /*
2049  * Returns zero or a negative NFS4ERR status code.
2050  */
2051 static int nfs4_handle_migration(struct nfs_client *clp)
2052 {
2053         const struct nfs4_state_maintenance_ops *ops =
2054                                 clp->cl_mvops->state_renewal_ops;
2055         struct nfs_server *server;
2056         struct rpc_cred *cred;
2057 
2058         dprintk("%s: migration reported on \"%s\"\n", __func__,
2059                         clp->cl_hostname);
2060 
2061         spin_lock(&clp->cl_lock);
2062         cred = ops->get_state_renewal_cred_locked(clp);
2063         spin_unlock(&clp->cl_lock);
2064         if (cred == NULL)
2065                 return -NFS4ERR_NOENT;
2066 
2067         clp->cl_mig_gen++;
2068 restart:
2069         rcu_read_lock();
2070         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2071                 int status;
2072 
2073                 if (server->mig_gen == clp->cl_mig_gen)
2074                         continue;
2075                 server->mig_gen = clp->cl_mig_gen;
2076 
2077                 if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION,
2078                                                 &server->mig_status))
2079                         continue;
2080 
2081                 rcu_read_unlock();
2082                 status = nfs4_try_migration(server, cred);
2083                 if (status < 0) {
2084                         put_rpccred(cred);
2085                         return status;
2086                 }
2087                 goto restart;
2088         }
2089         rcu_read_unlock();
2090         put_rpccred(cred);
2091         return 0;
2092 }
2093 
2094 /*
2095  * Test each nfs_server on the clp's cl_superblocks list to see
2096  * if it's moved to another server.  Stop when the server no longer
2097  * returns NFS4ERR_LEASE_MOVED.
2098  */
2099 static int nfs4_handle_lease_moved(struct nfs_client *clp)
2100 {
2101         const struct nfs4_state_maintenance_ops *ops =
2102                                 clp->cl_mvops->state_renewal_ops;
2103         struct nfs_server *server;
2104         struct rpc_cred *cred;
2105 
2106         dprintk("%s: lease moved reported on \"%s\"\n", __func__,
2107                         clp->cl_hostname);
2108 
2109         spin_lock(&clp->cl_lock);
2110         cred = ops->get_state_renewal_cred_locked(clp);
2111         spin_unlock(&clp->cl_lock);
2112         if (cred == NULL)
2113                 return -NFS4ERR_NOENT;
2114 
2115         clp->cl_mig_gen++;
2116 restart:
2117         rcu_read_lock();
2118         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2119                 struct inode *inode;
2120                 int status;
2121 
2122                 if (server->mig_gen == clp->cl_mig_gen)
2123                         continue;
2124                 server->mig_gen = clp->cl_mig_gen;
2125 
2126                 rcu_read_unlock();
2127 
2128                 inode = d_inode(server->super->s_root);
2129                 status = nfs4_proc_fsid_present(inode, cred);
2130                 if (status != -NFS4ERR_MOVED)
2131                         goto restart;   /* wasn't this one */
2132                 if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED)
2133                         goto restart;   /* there are more */
2134                 goto out;
2135         }
2136         rcu_read_unlock();
2137 
2138 out:
2139         put_rpccred(cred);
2140         return 0;
2141 }
2142 
2143 /**
2144  * nfs4_discover_server_trunking - Detect server IP address trunking
2145  *
2146  * @clp: nfs_client under test
2147  * @result: OUT: found nfs_client, or clp
2148  *
2149  * Returns zero or a negative errno.  If zero is returned,
2150  * an nfs_client pointer is planted in "result".
2151  *
2152  * Note: since we are invoked in process context, and
2153  * not from inside the state manager, we cannot use
2154  * nfs4_handle_reclaim_lease_error().
2155  */
2156 int nfs4_discover_server_trunking(struct nfs_client *clp,
2157                                   struct nfs_client **result)
2158 {
2159         const struct nfs4_state_recovery_ops *ops =
2160                                 clp->cl_mvops->reboot_recovery_ops;
2161         struct rpc_clnt *clnt;
2162         struct rpc_cred *cred;
2163         int i, status;
2164 
2165         dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
2166 
2167         clnt = clp->cl_rpcclient;
2168         i = 0;
2169 
2170         mutex_lock(&nfs_clid_init_mutex);
2171 again:
2172         status  = -ENOENT;
2173         cred = nfs4_get_clid_cred(clp);
2174         if (cred == NULL)
2175                 goto out_unlock;
2176 
2177         status = ops->detect_trunking(clp, result, cred);
2178         put_rpccred(cred);
2179         switch (status) {
2180         case 0:
2181         case -EINTR:
2182         case -ERESTARTSYS:
2183                 break;
2184         case -ETIMEDOUT:
2185                 if (clnt->cl_softrtry)
2186                         break;
2187         case -NFS4ERR_DELAY:
2188         case -EAGAIN:
2189                 ssleep(1);
2190         case -NFS4ERR_STALE_CLIENTID:
2191                 dprintk("NFS: %s after status %d, retrying\n",
2192                         __func__, status);
2193                 goto again;
2194         case -EACCES:
2195                 if (i++ == 0) {
2196                         nfs4_root_machine_cred(clp);
2197                         goto again;
2198                 }
2199                 if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
2200                         break;
2201         case -NFS4ERR_CLID_INUSE:
2202         case -NFS4ERR_WRONGSEC:
2203                 /* No point in retrying if we already used RPC_AUTH_UNIX */
2204                 if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) {
2205                         status = -EPERM;
2206                         break;
2207                 }
2208                 clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
2209                 if (IS_ERR(clnt)) {
2210                         status = PTR_ERR(clnt);
2211                         break;
2212                 }
2213                 /* Note: this is safe because we haven't yet marked the
2214                  * client as ready, so we are the only user of
2215                  * clp->cl_rpcclient
2216                  */
2217                 clnt = xchg(&clp->cl_rpcclient, clnt);
2218                 rpc_shutdown_client(clnt);
2219                 clnt = clp->cl_rpcclient;
2220                 goto again;
2221 
2222         case -NFS4ERR_MINOR_VERS_MISMATCH:
2223                 status = -EPROTONOSUPPORT;
2224                 break;
2225 
2226         case -EKEYEXPIRED:
2227         case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
2228                                  * in nfs4_exchange_id */
2229                 status = -EKEYEXPIRED;
2230                 break;
2231         default:
2232                 pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n",
2233                                 __func__, status);
2234                 status = -EIO;
2235         }
2236 
2237 out_unlock:
2238         mutex_unlock(&nfs_clid_init_mutex);
2239         dprintk("NFS: %s: status = %d\n", __func__, status);
2240         return status;
2241 }
2242 
2243 #ifdef CONFIG_NFS_V4_1
2244 void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
2245 {
2246         struct nfs_client *clp = session->clp;
2247 
2248         switch (err) {
2249         default:
2250                 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2251                 break;
2252         case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2253                 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2254         }
2255         nfs4_schedule_state_manager(clp);
2256 }
2257 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
2258 
2259 void nfs41_notify_server(struct nfs_client *clp)
2260 {
2261         /* Use CHECK_LEASE to ping the server with a SEQUENCE */
2262         set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
2263         nfs4_schedule_state_manager(clp);
2264 }
2265 
2266 static void nfs4_reset_all_state(struct nfs_client *clp)
2267 {
2268         if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2269                 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
2270                 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
2271                 nfs4_state_start_reclaim_nograce(clp);
2272                 dprintk("%s: scheduling reset of all state for server %s!\n",
2273                                 __func__, clp->cl_hostname);
2274                 nfs4_schedule_state_manager(clp);
2275         }
2276 }
2277 
2278 static void nfs41_handle_server_reboot(struct nfs_client *clp)
2279 {
2280         if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2281                 nfs4_state_start_reclaim_reboot(clp);
2282                 dprintk("%s: server %s rebooted!\n", __func__,
2283                                 clp->cl_hostname);
2284                 nfs4_schedule_state_manager(clp);
2285         }
2286 }
2287 
2288 static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2289 {
2290         nfs4_reset_all_state(clp);
2291         dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2292 }
2293 
2294 static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2295 {
2296         nfs4_state_start_reclaim_nograce(clp);
2297         nfs4_schedule_state_manager(clp);
2298 
2299         dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2300 }
2301 
2302 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2303 {
2304         /* FIXME: For now, we destroy all layouts. */
2305         pnfs_destroy_all_layouts(clp);
2306         /* FIXME: For now, we test all delegations+open state+locks. */
2307         nfs41_handle_some_state_revoked(clp);
2308         dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2309                         clp->cl_hostname);
2310 }
2311 
2312 static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2313 {
2314         set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2315         nfs4_schedule_state_manager(clp);
2316 
2317         dprintk("%s: server %s declared a backchannel fault\n", __func__,
2318                         clp->cl_hostname);
2319 }
2320 
2321 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
2322 {
2323         if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2324                 &clp->cl_state) == 0)
2325                 nfs4_schedule_state_manager(clp);
2326 }
2327 
2328 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags,
2329                 bool recovery)
2330 {
2331         if (!flags)
2332                 return;
2333 
2334         dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
2335                 __func__, clp->cl_hostname, clp->cl_clientid, flags);
2336         /*
2337          * If we're called from the state manager thread, then assume we're
2338          * already handling the RECLAIM_NEEDED and/or STATE_REVOKED.
2339          * Those flags are expected to remain set until we're done
2340          * recovering (see RFC5661, section 18.46.3).
2341          */
2342         if (recovery)
2343                 goto out_recovery;
2344 
2345         if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2346                 nfs41_handle_server_reboot(clp);
2347         if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2348                 nfs41_handle_all_state_revoked(clp);
2349         if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2350                             SEQ4_STATUS_ADMIN_STATE_REVOKED))
2351                 nfs41_handle_some_state_revoked(clp);
2352         if (flags & SEQ4_STATUS_LEASE_MOVED)
2353                 nfs4_schedule_lease_moved_recovery(clp);
2354         if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2355                 nfs41_handle_recallable_state_revoked(clp);
2356 out_recovery:
2357         if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
2358                 nfs41_handle_backchannel_fault(clp);
2359         else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2360                                 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
2361                 nfs41_handle_cb_path_down(clp);
2362 }
2363 
2364 static int nfs4_reset_session(struct nfs_client *clp)
2365 {
2366         struct rpc_cred *cred;
2367         int status;
2368 
2369         if (!nfs4_has_session(clp))
2370                 return 0;
2371         nfs4_begin_drain_session(clp);
2372         cred = nfs4_get_clid_cred(clp);
2373         status = nfs4_proc_destroy_session(clp->cl_session, cred);
2374         switch (status) {
2375         case 0:
2376         case -NFS4ERR_BADSESSION:
2377         case -NFS4ERR_DEADSESSION:
2378                 break;
2379         case -NFS4ERR_BACK_CHAN_BUSY:
2380         case -NFS4ERR_DELAY:
2381                 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2382                 status = 0;
2383                 ssleep(1);
2384                 goto out;
2385         default:
2386                 status = nfs4_recovery_handle_error(clp, status);
2387                 goto out;
2388         }
2389 
2390         memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
2391         status = nfs4_proc_create_session(clp, cred);
2392         if (status) {
2393                 dprintk("%s: session reset failed with status %d for server %s!\n",
2394                         __func__, status, clp->cl_hostname);
2395                 status = nfs4_handle_reclaim_lease_error(clp, status);
2396                 goto out;
2397         }
2398         nfs41_finish_session_reset(clp);
2399         dprintk("%s: session reset was successful for server %s!\n",
2400                         __func__, clp->cl_hostname);
2401 out:
2402         if (cred)
2403                 put_rpccred(cred);
2404         return status;
2405 }
2406 
2407 static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2408 {
2409         struct rpc_cred *cred;
2410         int ret;
2411 
2412         if (!nfs4_has_session(clp))
2413                 return 0;
2414         nfs4_begin_drain_session(clp);
2415         cred = nfs4_get_clid_cred(clp);
2416         ret = nfs4_proc_bind_conn_to_session(clp, cred);
2417         if (cred)
2418                 put_rpccred(cred);
2419         clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2420         switch (ret) {
2421         case 0:
2422                 dprintk("%s: bind_conn_to_session was successful for server %s!\n",
2423                         __func__, clp->cl_hostname);
2424                 break;
2425         case -NFS4ERR_DELAY:
2426                 ssleep(1);
2427                 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2428                 break;
2429         default:
2430                 return nfs4_recovery_handle_error(clp, ret);
2431         }
2432         return 0;
2433 }
2434 #else /* CONFIG_NFS_V4_1 */
2435 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
2436 
2437 static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2438 {
2439         return 0;
2440 }
2441 #endif /* CONFIG_NFS_V4_1 */
2442 
2443 static void nfs4_state_manager(struct nfs_client *clp)
2444 {
2445         int status = 0;
2446         const char *section = "", *section_sep = "";
2447 
2448         /* Ensure exclusive access to NFSv4 state */
2449         do {
2450                 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
2451                         section = "purge state";
2452                         status = nfs4_purge_lease(clp);
2453                         if (status < 0)
2454                                 goto out_error;
2455                         continue;
2456                 }
2457 
2458                 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
2459                         section = "lease expired";
2460                         /* We're going to have to re-establish a clientid */
2461                         status = nfs4_reclaim_lease(clp);
2462                         if (status < 0)
2463                                 goto out_error;
2464                         continue;
2465                 }
2466 
2467                 /* Initialize or reset the session */
2468                 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
2469                         section = "reset session";
2470                         status = nfs4_reset_session(clp);
2471                         if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
2472                                 continue;
2473                         if (status < 0)
2474                                 goto out_error;
2475                 }
2476 
2477                 /* Send BIND_CONN_TO_SESSION */
2478                 if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2479                                 &clp->cl_state)) {
2480                         section = "bind conn to session";
2481                         status = nfs4_bind_conn_to_session(clp);
2482                         if (status < 0)
2483                                 goto out_error;
2484                         continue;
2485                 }
2486 
2487                 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
2488                         section = "check lease";
2489                         status = nfs4_check_lease(clp);
2490                         if (status < 0)
2491                                 goto out_error;
2492                         continue;
2493                 }
2494 
2495                 if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
2496                         section = "migration";
2497                         status = nfs4_handle_migration(clp);
2498                         if (status < 0)
2499                                 goto out_error;
2500                 }
2501 
2502                 if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) {
2503                         section = "lease moved";
2504                         status = nfs4_handle_lease_moved(clp);
2505                         if (status < 0)
2506                                 goto out_error;
2507                 }
2508 
2509                 /* First recover reboot state... */
2510                 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
2511                         section = "reclaim reboot";
2512                         status = nfs4_do_reclaim(clp,
2513                                 clp->cl_mvops->reboot_recovery_ops);
2514                         if (status == -EAGAIN)
2515                                 continue;
2516                         if (status < 0)
2517                                 goto out_error;
2518                         nfs4_state_end_reclaim_reboot(clp);
2519                 }
2520 
2521                 /* Detect expired delegations... */
2522                 if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
2523                         section = "detect expired delegations";
2524                         nfs_reap_expired_delegations(clp);
2525                         continue;
2526                 }
2527 
2528                 /* Now recover expired state... */
2529                 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
2530                         section = "reclaim nograce";
2531                         status = nfs4_do_reclaim(clp,
2532                                 clp->cl_mvops->nograce_recovery_ops);
2533                         if (status == -EAGAIN)
2534                                 continue;
2535                         if (status < 0)
2536                                 goto out_error;
2537                 }
2538 
2539                 nfs4_end_drain_session(clp);
2540                 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
2541                         nfs_client_return_marked_delegations(clp);
2542                         continue;
2543                 }
2544 
2545                 nfs4_clear_state_manager_bit(clp);
2546                 /* Did we race with an attempt to give us more work? */
2547                 if (clp->cl_state == 0)
2548                         break;
2549                 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
2550                         break;
2551         } while (refcount_read(&clp->cl_count) > 1);
2552         return;
2553 out_error:
2554         if (strlen(section))
2555                 section_sep = ": ";
2556         pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
2557                         " with error %d\n", section_sep, section,
2558                         clp->cl_hostname, -status);
2559         ssleep(1);
2560         nfs4_end_drain_session(clp);
2561         nfs4_clear_state_manager_bit(clp);
2562 }
2563 
2564 static int nfs4_run_state_manager(void *ptr)
2565 {
2566         struct nfs_client *clp = ptr;
2567 
2568         allow_signal(SIGKILL);
2569         nfs4_state_manager(clp);
2570         nfs_put_client(clp);
2571         module_put_and_exit(0);
2572         return 0;
2573 }
2574 
2575 /*
2576  * Local variables:
2577  *  c-basic-offset: 8
2578  * End:
2579  */
2580 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp