~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/clnt.c

Version: ~ [ linux-5.8-rc4 ] ~ [ linux-5.7.7 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.50 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.131 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.187 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.229 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.229 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/net/sunrpc/clnt.c
  3  *
  4  *  This file contains the high-level RPC interface.
  5  *  It is modeled as a finite state machine to support both synchronous
  6  *  and asynchronous requests.
  7  *
  8  *  -   RPC header generation and argument serialization.
  9  *  -   Credential refresh.
 10  *  -   TCP connect handling.
 11  *  -   Retry of operation when it is suspected the operation failed because
 12  *      of uid squashing on the server, or when the credentials were stale
 13  *      and need to be refreshed, or when a packet was damaged in transit.
 14  *      This may be have to be moved to the VFS layer.
 15  *
 16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
 17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
 18  */
 19 
 20 
 21 #include <linux/module.h>
 22 #include <linux/types.h>
 23 #include <linux/kallsyms.h>
 24 #include <linux/mm.h>
 25 #include <linux/namei.h>
 26 #include <linux/mount.h>
 27 #include <linux/slab.h>
 28 #include <linux/utsname.h>
 29 #include <linux/workqueue.h>
 30 #include <linux/in.h>
 31 #include <linux/in6.h>
 32 #include <linux/un.h>
 33 #include <linux/rcupdate.h>
 34 
 35 #include <linux/sunrpc/clnt.h>
 36 #include <linux/sunrpc/addr.h>
 37 #include <linux/sunrpc/rpc_pipe_fs.h>
 38 #include <linux/sunrpc/metrics.h>
 39 #include <linux/sunrpc/bc_xprt.h>
 40 #include <trace/events/sunrpc.h>
 41 
 42 #include "sunrpc.h"
 43 #include "netns.h"
 44 
 45 #ifdef RPC_DEBUG
 46 # define RPCDBG_FACILITY        RPCDBG_CALL
 47 #endif
 48 
 49 #define dprint_status(t)                                        \
 50         dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
 51                         __func__, t->tk_status)
 52 
 53 /*
 54  * All RPC clients are linked into this list
 55  */
 56 
 57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
 58 
 59 
 60 static void     call_start(struct rpc_task *task);
 61 static void     call_reserve(struct rpc_task *task);
 62 static void     call_reserveresult(struct rpc_task *task);
 63 static void     call_allocate(struct rpc_task *task);
 64 static void     call_decode(struct rpc_task *task);
 65 static void     call_bind(struct rpc_task *task);
 66 static void     call_bind_status(struct rpc_task *task);
 67 static void     call_transmit(struct rpc_task *task);
 68 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 69 static void     call_bc_transmit(struct rpc_task *task);
 70 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 71 static void     call_status(struct rpc_task *task);
 72 static void     call_transmit_status(struct rpc_task *task);
 73 static void     call_refresh(struct rpc_task *task);
 74 static void     call_refreshresult(struct rpc_task *task);
 75 static void     call_timeout(struct rpc_task *task);
 76 static void     call_connect(struct rpc_task *task);
 77 static void     call_connect_status(struct rpc_task *task);
 78 
 79 static __be32   *rpc_encode_header(struct rpc_task *task);
 80 static __be32   *rpc_verify_header(struct rpc_task *task);
 81 static int      rpc_ping(struct rpc_clnt *clnt);
 82 
 83 static void rpc_register_client(struct rpc_clnt *clnt)
 84 {
 85         struct net *net = rpc_net_ns(clnt);
 86         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 87 
 88         spin_lock(&sn->rpc_client_lock);
 89         list_add(&clnt->cl_clients, &sn->all_clients);
 90         spin_unlock(&sn->rpc_client_lock);
 91 }
 92 
 93 static void rpc_unregister_client(struct rpc_clnt *clnt)
 94 {
 95         struct net *net = rpc_net_ns(clnt);
 96         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 97 
 98         spin_lock(&sn->rpc_client_lock);
 99         list_del(&clnt->cl_clients);
100         spin_unlock(&sn->rpc_client_lock);
101 }
102 
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104 {
105         rpc_remove_client_dir(clnt);
106 }
107 
108 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
109 {
110         struct net *net = rpc_net_ns(clnt);
111         struct super_block *pipefs_sb;
112 
113         pipefs_sb = rpc_get_sb_net(net);
114         if (pipefs_sb) {
115                 __rpc_clnt_remove_pipedir(clnt);
116                 rpc_put_sb_net(net);
117         }
118 }
119 
120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121                                     struct rpc_clnt *clnt)
122 {
123         static uint32_t clntid;
124         const char *dir_name = clnt->cl_program->pipe_dir_name;
125         char name[15];
126         struct dentry *dir, *dentry;
127 
128         dir = rpc_d_lookup_sb(sb, dir_name);
129         if (dir == NULL) {
130                 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
131                 return dir;
132         }
133         for (;;) {
134                 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135                 name[sizeof(name) - 1] = '\0';
136                 dentry = rpc_create_client_dir(dir, name, clnt);
137                 if (!IS_ERR(dentry))
138                         break;
139                 if (dentry == ERR_PTR(-EEXIST))
140                         continue;
141                 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
142                                 " %s/%s, error %ld\n",
143                                 dir_name, name, PTR_ERR(dentry));
144                 break;
145         }
146         dput(dir);
147         return dentry;
148 }
149 
150 static int
151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
152 {
153         struct dentry *dentry;
154 
155         if (clnt->cl_program->pipe_dir_name != NULL) {
156                 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
157                 if (IS_ERR(dentry))
158                         return PTR_ERR(dentry);
159         }
160         return 0;
161 }
162 
163 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
164 {
165         if (clnt->cl_program->pipe_dir_name == NULL)
166                 return 1;
167 
168         switch (event) {
169         case RPC_PIPEFS_MOUNT:
170                 if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
171                         return 1;
172                 if (atomic_read(&clnt->cl_count) == 0)
173                         return 1;
174                 break;
175         case RPC_PIPEFS_UMOUNT:
176                 if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
177                         return 1;
178                 break;
179         }
180         return 0;
181 }
182 
183 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
184                                    struct super_block *sb)
185 {
186         struct dentry *dentry;
187         int err = 0;
188 
189         switch (event) {
190         case RPC_PIPEFS_MOUNT:
191                 dentry = rpc_setup_pipedir_sb(sb, clnt);
192                 if (!dentry)
193                         return -ENOENT;
194                 if (IS_ERR(dentry))
195                         return PTR_ERR(dentry);
196                 break;
197         case RPC_PIPEFS_UMOUNT:
198                 __rpc_clnt_remove_pipedir(clnt);
199                 break;
200         default:
201                 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
202                 return -ENOTSUPP;
203         }
204         return err;
205 }
206 
207 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208                                 struct super_block *sb)
209 {
210         int error = 0;
211 
212         for (;; clnt = clnt->cl_parent) {
213                 if (!rpc_clnt_skip_event(clnt, event))
214                         error = __rpc_clnt_handle_event(clnt, event, sb);
215                 if (error || clnt == clnt->cl_parent)
216                         break;
217         }
218         return error;
219 }
220 
221 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
222 {
223         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
224         struct rpc_clnt *clnt;
225 
226         spin_lock(&sn->rpc_client_lock);
227         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
228                 if (rpc_clnt_skip_event(clnt, event))
229                         continue;
230                 spin_unlock(&sn->rpc_client_lock);
231                 return clnt;
232         }
233         spin_unlock(&sn->rpc_client_lock);
234         return NULL;
235 }
236 
237 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
238                             void *ptr)
239 {
240         struct super_block *sb = ptr;
241         struct rpc_clnt *clnt;
242         int error = 0;
243 
244         while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
245                 error = __rpc_pipefs_event(clnt, event, sb);
246                 if (error)
247                         break;
248         }
249         return error;
250 }
251 
252 static struct notifier_block rpc_clients_block = {
253         .notifier_call  = rpc_pipefs_event,
254         .priority       = SUNRPC_PIPEFS_RPC_PRIO,
255 };
256 
257 int rpc_clients_notifier_register(void)
258 {
259         return rpc_pipefs_notifier_register(&rpc_clients_block);
260 }
261 
262 void rpc_clients_notifier_unregister(void)
263 {
264         return rpc_pipefs_notifier_unregister(&rpc_clients_block);
265 }
266 
267 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
268 {
269         clnt->cl_nodelen = strlen(nodename);
270         if (clnt->cl_nodelen > UNX_MAXNODENAME)
271                 clnt->cl_nodelen = UNX_MAXNODENAME;
272         memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
273 }
274 
275 static int rpc_client_register(const struct rpc_create_args *args,
276                                struct rpc_clnt *clnt)
277 {
278         struct rpc_auth_create_args auth_args = {
279                 .pseudoflavor = args->authflavor,
280                 .target_name = args->client_name,
281         };
282         struct rpc_auth *auth;
283         struct net *net = rpc_net_ns(clnt);
284         struct super_block *pipefs_sb;
285         int err;
286 
287         pipefs_sb = rpc_get_sb_net(net);
288         if (pipefs_sb) {
289                 err = rpc_setup_pipedir(pipefs_sb, clnt);
290                 if (err)
291                         goto out;
292         }
293 
294         rpc_register_client(clnt);
295         if (pipefs_sb)
296                 rpc_put_sb_net(net);
297 
298         auth = rpcauth_create(&auth_args, clnt);
299         if (IS_ERR(auth)) {
300                 dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
301                                 args->authflavor);
302                 err = PTR_ERR(auth);
303                 goto err_auth;
304         }
305         return 0;
306 err_auth:
307         pipefs_sb = rpc_get_sb_net(net);
308         rpc_unregister_client(clnt);
309         __rpc_clnt_remove_pipedir(clnt);
310 out:
311         if (pipefs_sb)
312                 rpc_put_sb_net(net);
313         return err;
314 }
315 
316 static DEFINE_IDA(rpc_clids);
317 
318 void rpc_cleanup_clids(void)
319 {
320         ida_destroy(&rpc_clids);
321 }
322 
323 static int rpc_alloc_clid(struct rpc_clnt *clnt)
324 {
325         int clid;
326 
327         clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
328         if (clid < 0)
329                 return clid;
330         clnt->cl_clid = clid;
331         return 0;
332 }
333 
334 static void rpc_free_clid(struct rpc_clnt *clnt)
335 {
336         ida_simple_remove(&rpc_clids, clnt->cl_clid);
337 }
338 
339 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
340                 struct rpc_xprt *xprt,
341                 struct rpc_clnt *parent)
342 {
343         const struct rpc_program *program = args->program;
344         const struct rpc_version *version;
345         struct rpc_clnt         *clnt = NULL;
346         int err;
347 
348         /* sanity check the name before trying to print it */
349         dprintk("RPC:       creating %s client for %s (xprt %p)\n",
350                         program->name, args->servername, xprt);
351 
352         err = rpciod_up();
353         if (err)
354                 goto out_no_rpciod;
355 
356         err = -EINVAL;
357         if (args->version >= program->nrvers)
358                 goto out_err;
359         version = program->version[args->version];
360         if (version == NULL)
361                 goto out_err;
362 
363         err = -ENOMEM;
364         clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
365         if (!clnt)
366                 goto out_err;
367         clnt->cl_parent = parent ? : clnt;
368 
369         err = rpc_alloc_clid(clnt);
370         if (err)
371                 goto out_no_clid;
372 
373         rcu_assign_pointer(clnt->cl_xprt, xprt);
374         clnt->cl_procinfo = version->procs;
375         clnt->cl_maxproc  = version->nrprocs;
376         clnt->cl_prog     = args->prognumber ? : program->number;
377         clnt->cl_vers     = version->number;
378         clnt->cl_stats    = program->stats;
379         clnt->cl_metrics  = rpc_alloc_iostats(clnt);
380         rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
381         err = -ENOMEM;
382         if (clnt->cl_metrics == NULL)
383                 goto out_no_stats;
384         clnt->cl_program  = program;
385         INIT_LIST_HEAD(&clnt->cl_tasks);
386         spin_lock_init(&clnt->cl_lock);
387 
388         if (!xprt_bound(xprt))
389                 clnt->cl_autobind = 1;
390 
391         clnt->cl_timeout = xprt->timeout;
392         if (args->timeout != NULL) {
393                 memcpy(&clnt->cl_timeout_default, args->timeout,
394                                 sizeof(clnt->cl_timeout_default));
395                 clnt->cl_timeout = &clnt->cl_timeout_default;
396         }
397 
398         clnt->cl_rtt = &clnt->cl_rtt_default;
399         rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
400 
401         atomic_set(&clnt->cl_count, 1);
402 
403         /* save the nodename */
404         rpc_clnt_set_nodename(clnt, utsname()->nodename);
405 
406         err = rpc_client_register(args, clnt);
407         if (err)
408                 goto out_no_path;
409         if (parent)
410                 atomic_inc(&parent->cl_count);
411         return clnt;
412 
413 out_no_path:
414         rpc_free_iostats(clnt->cl_metrics);
415 out_no_stats:
416         rpc_free_clid(clnt);
417 out_no_clid:
418         kfree(clnt);
419 out_err:
420         rpciod_down();
421 out_no_rpciod:
422         xprt_put(xprt);
423         return ERR_PTR(err);
424 }
425 
426 /**
427  * rpc_create - create an RPC client and transport with one call
428  * @args: rpc_clnt create argument structure
429  *
430  * Creates and initializes an RPC transport and an RPC client.
431  *
432  * It can ping the server in order to determine if it is up, and to see if
433  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
434  * this behavior so asynchronous tasks can also use rpc_create.
435  */
436 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
437 {
438         struct rpc_xprt *xprt;
439         struct rpc_clnt *clnt;
440         struct xprt_create xprtargs = {
441                 .net = args->net,
442                 .ident = args->protocol,
443                 .srcaddr = args->saddress,
444                 .dstaddr = args->address,
445                 .addrlen = args->addrsize,
446                 .servername = args->servername,
447                 .bc_xprt = args->bc_xprt,
448         };
449         char servername[48];
450 
451         if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
452                 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
453         if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
454                 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
455         /*
456          * If the caller chooses not to specify a hostname, whip
457          * up a string representation of the passed-in address.
458          */
459         if (xprtargs.servername == NULL) {
460                 struct sockaddr_un *sun =
461                                 (struct sockaddr_un *)args->address;
462                 struct sockaddr_in *sin =
463                                 (struct sockaddr_in *)args->address;
464                 struct sockaddr_in6 *sin6 =
465                                 (struct sockaddr_in6 *)args->address;
466 
467                 servername[0] = '\0';
468                 switch (args->address->sa_family) {
469                 case AF_LOCAL:
470                         snprintf(servername, sizeof(servername), "%s",
471                                  sun->sun_path);
472                         break;
473                 case AF_INET:
474                         snprintf(servername, sizeof(servername), "%pI4",
475                                  &sin->sin_addr.s_addr);
476                         break;
477                 case AF_INET6:
478                         snprintf(servername, sizeof(servername), "%pI6",
479                                  &sin6->sin6_addr);
480                         break;
481                 default:
482                         /* caller wants default server name, but
483                          * address family isn't recognized. */
484                         return ERR_PTR(-EINVAL);
485                 }
486                 xprtargs.servername = servername;
487         }
488 
489         xprt = xprt_create_transport(&xprtargs);
490         if (IS_ERR(xprt))
491                 return (struct rpc_clnt *)xprt;
492 
493         /*
494          * By default, kernel RPC client connects from a reserved port.
495          * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
496          * but it is always enabled for rpciod, which handles the connect
497          * operation.
498          */
499         xprt->resvport = 1;
500         if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
501                 xprt->resvport = 0;
502 
503         clnt = rpc_new_client(args, xprt, NULL);
504         if (IS_ERR(clnt))
505                 return clnt;
506 
507         if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
508                 int err = rpc_ping(clnt);
509                 if (err != 0) {
510                         rpc_shutdown_client(clnt);
511                         return ERR_PTR(err);
512                 }
513         }
514 
515         clnt->cl_softrtry = 1;
516         if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
517                 clnt->cl_softrtry = 0;
518 
519         if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
520                 clnt->cl_autobind = 1;
521         if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
522                 clnt->cl_discrtry = 1;
523         if (!(args->flags & RPC_CLNT_CREATE_QUIET))
524                 clnt->cl_chatty = 1;
525 
526         return clnt;
527 }
528 EXPORT_SYMBOL_GPL(rpc_create);
529 
530 /*
531  * This function clones the RPC client structure. It allows us to share the
532  * same transport while varying parameters such as the authentication
533  * flavour.
534  */
535 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
536                                            struct rpc_clnt *clnt)
537 {
538         struct rpc_xprt *xprt;
539         struct rpc_clnt *new;
540         int err;
541 
542         err = -ENOMEM;
543         rcu_read_lock();
544         xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
545         rcu_read_unlock();
546         if (xprt == NULL)
547                 goto out_err;
548         args->servername = xprt->servername;
549 
550         new = rpc_new_client(args, xprt, clnt);
551         if (IS_ERR(new)) {
552                 err = PTR_ERR(new);
553                 goto out_err;
554         }
555 
556         /* Turn off autobind on clones */
557         new->cl_autobind = 0;
558         new->cl_softrtry = clnt->cl_softrtry;
559         new->cl_discrtry = clnt->cl_discrtry;
560         new->cl_chatty = clnt->cl_chatty;
561         return new;
562 
563 out_err:
564         dprintk("RPC:       %s: returned error %d\n", __func__, err);
565         return ERR_PTR(err);
566 }
567 
568 /**
569  * rpc_clone_client - Clone an RPC client structure
570  *
571  * @clnt: RPC client whose parameters are copied
572  *
573  * Returns a fresh RPC client or an ERR_PTR.
574  */
575 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
576 {
577         struct rpc_create_args args = {
578                 .program        = clnt->cl_program,
579                 .prognumber     = clnt->cl_prog,
580                 .version        = clnt->cl_vers,
581                 .authflavor     = clnt->cl_auth->au_flavor,
582         };
583         return __rpc_clone_client(&args, clnt);
584 }
585 EXPORT_SYMBOL_GPL(rpc_clone_client);
586 
587 /**
588  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
589  *
590  * @clnt: RPC client whose parameters are copied
591  * @flavor: security flavor for new client
592  *
593  * Returns a fresh RPC client or an ERR_PTR.
594  */
595 struct rpc_clnt *
596 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
597 {
598         struct rpc_create_args args = {
599                 .program        = clnt->cl_program,
600                 .prognumber     = clnt->cl_prog,
601                 .version        = clnt->cl_vers,
602                 .authflavor     = flavor,
603         };
604         return __rpc_clone_client(&args, clnt);
605 }
606 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
607 
608 /*
609  * Kill all tasks for the given client.
610  * XXX: kill their descendants as well?
611  */
612 void rpc_killall_tasks(struct rpc_clnt *clnt)
613 {
614         struct rpc_task *rovr;
615 
616 
617         if (list_empty(&clnt->cl_tasks))
618                 return;
619         dprintk("RPC:       killing all tasks for client %p\n", clnt);
620         /*
621          * Spin lock all_tasks to prevent changes...
622          */
623         spin_lock(&clnt->cl_lock);
624         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
625                 if (!RPC_IS_ACTIVATED(rovr))
626                         continue;
627                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
628                         rovr->tk_flags |= RPC_TASK_KILLED;
629                         rpc_exit(rovr, -EIO);
630                         if (RPC_IS_QUEUED(rovr))
631                                 rpc_wake_up_queued_task(rovr->tk_waitqueue,
632                                                         rovr);
633                 }
634         }
635         spin_unlock(&clnt->cl_lock);
636 }
637 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
638 
639 /*
640  * Properly shut down an RPC client, terminating all outstanding
641  * requests.
642  */
643 void rpc_shutdown_client(struct rpc_clnt *clnt)
644 {
645         might_sleep();
646 
647         dprintk_rcu("RPC:       shutting down %s client for %s\n",
648                         clnt->cl_program->name,
649                         rcu_dereference(clnt->cl_xprt)->servername);
650 
651         while (!list_empty(&clnt->cl_tasks)) {
652                 rpc_killall_tasks(clnt);
653                 wait_event_timeout(destroy_wait,
654                         list_empty(&clnt->cl_tasks), 1*HZ);
655         }
656 
657         rpc_release_client(clnt);
658 }
659 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
660 
661 /*
662  * Free an RPC client
663  */
664 static struct rpc_clnt *
665 rpc_free_client(struct rpc_clnt *clnt)
666 {
667         struct rpc_clnt *parent = NULL;
668 
669         dprintk_rcu("RPC:       destroying %s client for %s\n",
670                         clnt->cl_program->name,
671                         rcu_dereference(clnt->cl_xprt)->servername);
672         if (clnt->cl_parent != clnt)
673                 parent = clnt->cl_parent;
674         rpc_clnt_remove_pipedir(clnt);
675         rpc_unregister_client(clnt);
676         rpc_free_iostats(clnt->cl_metrics);
677         clnt->cl_metrics = NULL;
678         xprt_put(rcu_dereference_raw(clnt->cl_xprt));
679         rpciod_down();
680         rpc_free_clid(clnt);
681         kfree(clnt);
682         return parent;
683 }
684 
685 /*
686  * Free an RPC client
687  */
688 static struct rpc_clnt *
689 rpc_free_auth(struct rpc_clnt *clnt)
690 {
691         if (clnt->cl_auth == NULL)
692                 return rpc_free_client(clnt);
693 
694         /*
695          * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
696          *       release remaining GSS contexts. This mechanism ensures
697          *       that it can do so safely.
698          */
699         atomic_inc(&clnt->cl_count);
700         rpcauth_release(clnt->cl_auth);
701         clnt->cl_auth = NULL;
702         if (atomic_dec_and_test(&clnt->cl_count))
703                 return rpc_free_client(clnt);
704         return NULL;
705 }
706 
707 /*
708  * Release reference to the RPC client
709  */
710 void
711 rpc_release_client(struct rpc_clnt *clnt)
712 {
713         dprintk("RPC:       rpc_release_client(%p)\n", clnt);
714 
715         do {
716                 if (list_empty(&clnt->cl_tasks))
717                         wake_up(&destroy_wait);
718                 if (!atomic_dec_and_test(&clnt->cl_count))
719                         break;
720                 clnt = rpc_free_auth(clnt);
721         } while (clnt != NULL);
722 }
723 EXPORT_SYMBOL_GPL(rpc_release_client);
724 
725 /**
726  * rpc_bind_new_program - bind a new RPC program to an existing client
727  * @old: old rpc_client
728  * @program: rpc program to set
729  * @vers: rpc program version
730  *
731  * Clones the rpc client and sets up a new RPC program. This is mainly
732  * of use for enabling different RPC programs to share the same transport.
733  * The Sun NFSv2/v3 ACL protocol can do this.
734  */
735 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
736                                       const struct rpc_program *program,
737                                       u32 vers)
738 {
739         struct rpc_create_args args = {
740                 .program        = program,
741                 .prognumber     = program->number,
742                 .version        = vers,
743                 .authflavor     = old->cl_auth->au_flavor,
744         };
745         struct rpc_clnt *clnt;
746         int err;
747 
748         clnt = __rpc_clone_client(&args, old);
749         if (IS_ERR(clnt))
750                 goto out;
751         err = rpc_ping(clnt);
752         if (err != 0) {
753                 rpc_shutdown_client(clnt);
754                 clnt = ERR_PTR(err);
755         }
756 out:
757         return clnt;
758 }
759 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
760 
761 void rpc_task_release_client(struct rpc_task *task)
762 {
763         struct rpc_clnt *clnt = task->tk_client;
764 
765         if (clnt != NULL) {
766                 /* Remove from client task list */
767                 spin_lock(&clnt->cl_lock);
768                 list_del(&task->tk_task);
769                 spin_unlock(&clnt->cl_lock);
770                 task->tk_client = NULL;
771 
772                 rpc_release_client(clnt);
773         }
774 }
775 
776 static
777 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
778 {
779         if (clnt != NULL) {
780                 rpc_task_release_client(task);
781                 task->tk_client = clnt;
782                 atomic_inc(&clnt->cl_count);
783                 if (clnt->cl_softrtry)
784                         task->tk_flags |= RPC_TASK_SOFT;
785                 if (sk_memalloc_socks()) {
786                         struct rpc_xprt *xprt;
787 
788                         rcu_read_lock();
789                         xprt = rcu_dereference(clnt->cl_xprt);
790                         if (xprt->swapper)
791                                 task->tk_flags |= RPC_TASK_SWAPPER;
792                         rcu_read_unlock();
793                 }
794                 /* Add to the client's list of all tasks */
795                 spin_lock(&clnt->cl_lock);
796                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
797                 spin_unlock(&clnt->cl_lock);
798         }
799 }
800 
801 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
802 {
803         rpc_task_release_client(task);
804         rpc_task_set_client(task, clnt);
805 }
806 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
807 
808 
809 static void
810 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
811 {
812         if (msg != NULL) {
813                 task->tk_msg.rpc_proc = msg->rpc_proc;
814                 task->tk_msg.rpc_argp = msg->rpc_argp;
815                 task->tk_msg.rpc_resp = msg->rpc_resp;
816                 if (msg->rpc_cred != NULL)
817                         task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
818         }
819 }
820 
821 /*
822  * Default callback for async RPC calls
823  */
824 static void
825 rpc_default_callback(struct rpc_task *task, void *data)
826 {
827 }
828 
829 static const struct rpc_call_ops rpc_default_ops = {
830         .rpc_call_done = rpc_default_callback,
831 };
832 
833 /**
834  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
835  * @task_setup_data: pointer to task initialisation data
836  */
837 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
838 {
839         struct rpc_task *task;
840 
841         task = rpc_new_task(task_setup_data);
842         if (IS_ERR(task))
843                 goto out;
844 
845         rpc_task_set_client(task, task_setup_data->rpc_client);
846         rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
847 
848         if (task->tk_action == NULL)
849                 rpc_call_start(task);
850 
851         atomic_inc(&task->tk_count);
852         rpc_execute(task);
853 out:
854         return task;
855 }
856 EXPORT_SYMBOL_GPL(rpc_run_task);
857 
858 /**
859  * rpc_call_sync - Perform a synchronous RPC call
860  * @clnt: pointer to RPC client
861  * @msg: RPC call parameters
862  * @flags: RPC call flags
863  */
864 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
865 {
866         struct rpc_task *task;
867         struct rpc_task_setup task_setup_data = {
868                 .rpc_client = clnt,
869                 .rpc_message = msg,
870                 .callback_ops = &rpc_default_ops,
871                 .flags = flags,
872         };
873         int status;
874 
875         WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
876         if (flags & RPC_TASK_ASYNC) {
877                 rpc_release_calldata(task_setup_data.callback_ops,
878                         task_setup_data.callback_data);
879                 return -EINVAL;
880         }
881 
882         task = rpc_run_task(&task_setup_data);
883         if (IS_ERR(task))
884                 return PTR_ERR(task);
885         status = task->tk_status;
886         rpc_put_task(task);
887         return status;
888 }
889 EXPORT_SYMBOL_GPL(rpc_call_sync);
890 
891 /**
892  * rpc_call_async - Perform an asynchronous RPC call
893  * @clnt: pointer to RPC client
894  * @msg: RPC call parameters
895  * @flags: RPC call flags
896  * @tk_ops: RPC call ops
897  * @data: user call data
898  */
899 int
900 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
901                const struct rpc_call_ops *tk_ops, void *data)
902 {
903         struct rpc_task *task;
904         struct rpc_task_setup task_setup_data = {
905                 .rpc_client = clnt,
906                 .rpc_message = msg,
907                 .callback_ops = tk_ops,
908                 .callback_data = data,
909                 .flags = flags|RPC_TASK_ASYNC,
910         };
911 
912         task = rpc_run_task(&task_setup_data);
913         if (IS_ERR(task))
914                 return PTR_ERR(task);
915         rpc_put_task(task);
916         return 0;
917 }
918 EXPORT_SYMBOL_GPL(rpc_call_async);
919 
920 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
921 /**
922  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
923  * rpc_execute against it
924  * @req: RPC request
925  * @tk_ops: RPC call ops
926  */
927 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
928                                 const struct rpc_call_ops *tk_ops)
929 {
930         struct rpc_task *task;
931         struct xdr_buf *xbufp = &req->rq_snd_buf;
932         struct rpc_task_setup task_setup_data = {
933                 .callback_ops = tk_ops,
934         };
935 
936         dprintk("RPC: rpc_run_bc_task req= %p\n", req);
937         /*
938          * Create an rpc_task to send the data
939          */
940         task = rpc_new_task(&task_setup_data);
941         if (IS_ERR(task)) {
942                 xprt_free_bc_request(req);
943                 goto out;
944         }
945         task->tk_rqstp = req;
946 
947         /*
948          * Set up the xdr_buf length.
949          * This also indicates that the buffer is XDR encoded already.
950          */
951         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
952                         xbufp->tail[0].iov_len;
953 
954         task->tk_action = call_bc_transmit;
955         atomic_inc(&task->tk_count);
956         WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
957         rpc_execute(task);
958 
959 out:
960         dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
961         return task;
962 }
963 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
964 
965 void
966 rpc_call_start(struct rpc_task *task)
967 {
968         task->tk_action = call_start;
969 }
970 EXPORT_SYMBOL_GPL(rpc_call_start);
971 
972 /**
973  * rpc_peeraddr - extract remote peer address from clnt's xprt
974  * @clnt: RPC client structure
975  * @buf: target buffer
976  * @bufsize: length of target buffer
977  *
978  * Returns the number of bytes that are actually in the stored address.
979  */
980 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
981 {
982         size_t bytes;
983         struct rpc_xprt *xprt;
984 
985         rcu_read_lock();
986         xprt = rcu_dereference(clnt->cl_xprt);
987 
988         bytes = xprt->addrlen;
989         if (bytes > bufsize)
990                 bytes = bufsize;
991         memcpy(buf, &xprt->addr, bytes);
992         rcu_read_unlock();
993 
994         return bytes;
995 }
996 EXPORT_SYMBOL_GPL(rpc_peeraddr);
997 
998 /**
999  * rpc_peeraddr2str - return remote peer address in printable format
1000  * @clnt: RPC client structure
1001  * @format: address format
1002  *
1003  * NB: the lifetime of the memory referenced by the returned pointer is
1004  * the same as the rpc_xprt itself.  As long as the caller uses this
1005  * pointer, it must hold the RCU read lock.
1006  */
1007 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1008                              enum rpc_display_format_t format)
1009 {
1010         struct rpc_xprt *xprt;
1011 
1012         xprt = rcu_dereference(clnt->cl_xprt);
1013 
1014         if (xprt->address_strings[format] != NULL)
1015                 return xprt->address_strings[format];
1016         else
1017                 return "unprintable";
1018 }
1019 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1020 
1021 static const struct sockaddr_in rpc_inaddr_loopback = {
1022         .sin_family             = AF_INET,
1023         .sin_addr.s_addr        = htonl(INADDR_ANY),
1024 };
1025 
1026 static const struct sockaddr_in6 rpc_in6addr_loopback = {
1027         .sin6_family            = AF_INET6,
1028         .sin6_addr              = IN6ADDR_ANY_INIT,
1029 };
1030 
1031 /*
1032  * Try a getsockname() on a connected datagram socket.  Using a
1033  * connected datagram socket prevents leaving a socket in TIME_WAIT.
1034  * This conserves the ephemeral port number space.
1035  *
1036  * Returns zero and fills in "buf" if successful; otherwise, a
1037  * negative errno is returned.
1038  */
1039 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1040                         struct sockaddr *buf, int buflen)
1041 {
1042         struct socket *sock;
1043         int err;
1044 
1045         err = __sock_create(net, sap->sa_family,
1046                                 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1047         if (err < 0) {
1048                 dprintk("RPC:       can't create UDP socket (%d)\n", err);
1049                 goto out;
1050         }
1051 
1052         switch (sap->sa_family) {
1053         case AF_INET:
1054                 err = kernel_bind(sock,
1055                                 (struct sockaddr *)&rpc_inaddr_loopback,
1056                                 sizeof(rpc_inaddr_loopback));
1057                 break;
1058         case AF_INET6:
1059                 err = kernel_bind(sock,
1060                                 (struct sockaddr *)&rpc_in6addr_loopback,
1061                                 sizeof(rpc_in6addr_loopback));
1062                 break;
1063         default:
1064                 err = -EAFNOSUPPORT;
1065                 goto out;
1066         }
1067         if (err < 0) {
1068                 dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1069                 goto out_release;
1070         }
1071 
1072         err = kernel_connect(sock, sap, salen, 0);
1073         if (err < 0) {
1074                 dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1075                 goto out_release;
1076         }
1077 
1078         err = kernel_getsockname(sock, buf, &buflen);
1079         if (err < 0) {
1080                 dprintk("RPC:       getsockname failed (%d)\n", err);
1081                 goto out_release;
1082         }
1083 
1084         err = 0;
1085         if (buf->sa_family == AF_INET6) {
1086                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1087                 sin6->sin6_scope_id = 0;
1088         }
1089         dprintk("RPC:       %s succeeded\n", __func__);
1090 
1091 out_release:
1092         sock_release(sock);
1093 out:
1094         return err;
1095 }
1096 
1097 /*
1098  * Scraping a connected socket failed, so we don't have a useable
1099  * local address.  Fallback: generate an address that will prevent
1100  * the server from calling us back.
1101  *
1102  * Returns zero and fills in "buf" if successful; otherwise, a
1103  * negative errno is returned.
1104  */
1105 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1106 {
1107         switch (family) {
1108         case AF_INET:
1109                 if (buflen < sizeof(rpc_inaddr_loopback))
1110                         return -EINVAL;
1111                 memcpy(buf, &rpc_inaddr_loopback,
1112                                 sizeof(rpc_inaddr_loopback));
1113                 break;
1114         case AF_INET6:
1115                 if (buflen < sizeof(rpc_in6addr_loopback))
1116                         return -EINVAL;
1117                 memcpy(buf, &rpc_in6addr_loopback,
1118                                 sizeof(rpc_in6addr_loopback));
1119         default:
1120                 dprintk("RPC:       %s: address family not supported\n",
1121                         __func__);
1122                 return -EAFNOSUPPORT;
1123         }
1124         dprintk("RPC:       %s: succeeded\n", __func__);
1125         return 0;
1126 }
1127 
1128 /**
1129  * rpc_localaddr - discover local endpoint address for an RPC client
1130  * @clnt: RPC client structure
1131  * @buf: target buffer
1132  * @buflen: size of target buffer, in bytes
1133  *
1134  * Returns zero and fills in "buf" and "buflen" if successful;
1135  * otherwise, a negative errno is returned.
1136  *
1137  * This works even if the underlying transport is not currently connected,
1138  * or if the upper layer never previously provided a source address.
1139  *
1140  * The result of this function call is transient: multiple calls in
1141  * succession may give different results, depending on how local
1142  * networking configuration changes over time.
1143  */
1144 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1145 {
1146         struct sockaddr_storage address;
1147         struct sockaddr *sap = (struct sockaddr *)&address;
1148         struct rpc_xprt *xprt;
1149         struct net *net;
1150         size_t salen;
1151         int err;
1152 
1153         rcu_read_lock();
1154         xprt = rcu_dereference(clnt->cl_xprt);
1155         salen = xprt->addrlen;
1156         memcpy(sap, &xprt->addr, salen);
1157         net = get_net(xprt->xprt_net);
1158         rcu_read_unlock();
1159 
1160         rpc_set_port(sap, 0);
1161         err = rpc_sockname(net, sap, salen, buf, buflen);
1162         put_net(net);
1163         if (err != 0)
1164                 /* Couldn't discover local address, return ANYADDR */
1165                 return rpc_anyaddr(sap->sa_family, buf, buflen);
1166         return 0;
1167 }
1168 EXPORT_SYMBOL_GPL(rpc_localaddr);
1169 
1170 void
1171 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1172 {
1173         struct rpc_xprt *xprt;
1174 
1175         rcu_read_lock();
1176         xprt = rcu_dereference(clnt->cl_xprt);
1177         if (xprt->ops->set_buffer_size)
1178                 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1179         rcu_read_unlock();
1180 }
1181 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1182 
1183 /**
1184  * rpc_protocol - Get transport protocol number for an RPC client
1185  * @clnt: RPC client to query
1186  *
1187  */
1188 int rpc_protocol(struct rpc_clnt *clnt)
1189 {
1190         int protocol;
1191 
1192         rcu_read_lock();
1193         protocol = rcu_dereference(clnt->cl_xprt)->prot;
1194         rcu_read_unlock();
1195         return protocol;
1196 }
1197 EXPORT_SYMBOL_GPL(rpc_protocol);
1198 
1199 /**
1200  * rpc_net_ns - Get the network namespace for this RPC client
1201  * @clnt: RPC client to query
1202  *
1203  */
1204 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1205 {
1206         struct net *ret;
1207 
1208         rcu_read_lock();
1209         ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1210         rcu_read_unlock();
1211         return ret;
1212 }
1213 EXPORT_SYMBOL_GPL(rpc_net_ns);
1214 
1215 /**
1216  * rpc_max_payload - Get maximum payload size for a transport, in bytes
1217  * @clnt: RPC client to query
1218  *
1219  * For stream transports, this is one RPC record fragment (see RFC
1220  * 1831), as we don't support multi-record requests yet.  For datagram
1221  * transports, this is the size of an IP packet minus the IP, UDP, and
1222  * RPC header sizes.
1223  */
1224 size_t rpc_max_payload(struct rpc_clnt *clnt)
1225 {
1226         size_t ret;
1227 
1228         rcu_read_lock();
1229         ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1230         rcu_read_unlock();
1231         return ret;
1232 }
1233 EXPORT_SYMBOL_GPL(rpc_max_payload);
1234 
1235 /**
1236  * rpc_get_timeout - Get timeout for transport in units of HZ
1237  * @clnt: RPC client to query
1238  */
1239 unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1240 {
1241         unsigned long ret;
1242 
1243         rcu_read_lock();
1244         ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1245         rcu_read_unlock();
1246         return ret;
1247 }
1248 EXPORT_SYMBOL_GPL(rpc_get_timeout);
1249 
1250 /**
1251  * rpc_force_rebind - force transport to check that remote port is unchanged
1252  * @clnt: client to rebind
1253  *
1254  */
1255 void rpc_force_rebind(struct rpc_clnt *clnt)
1256 {
1257         if (clnt->cl_autobind) {
1258                 rcu_read_lock();
1259                 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1260                 rcu_read_unlock();
1261         }
1262 }
1263 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1264 
1265 /*
1266  * Restart an (async) RPC call from the call_prepare state.
1267  * Usually called from within the exit handler.
1268  */
1269 int
1270 rpc_restart_call_prepare(struct rpc_task *task)
1271 {
1272         if (RPC_ASSASSINATED(task))
1273                 return 0;
1274         task->tk_action = call_start;
1275         if (task->tk_ops->rpc_call_prepare != NULL)
1276                 task->tk_action = rpc_prepare_task;
1277         return 1;
1278 }
1279 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1280 
1281 /*
1282  * Restart an (async) RPC call. Usually called from within the
1283  * exit handler.
1284  */
1285 int
1286 rpc_restart_call(struct rpc_task *task)
1287 {
1288         if (RPC_ASSASSINATED(task))
1289                 return 0;
1290         task->tk_action = call_start;
1291         return 1;
1292 }
1293 EXPORT_SYMBOL_GPL(rpc_restart_call);
1294 
1295 #ifdef RPC_DEBUG
1296 static const char *rpc_proc_name(const struct rpc_task *task)
1297 {
1298         const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1299 
1300         if (proc) {
1301                 if (proc->p_name)
1302                         return proc->p_name;
1303                 else
1304                         return "NULL";
1305         } else
1306                 return "no proc";
1307 }
1308 #endif
1309 
1310 /*
1311  * 0.  Initial state
1312  *
1313  *     Other FSM states can be visited zero or more times, but
1314  *     this state is visited exactly once for each RPC.
1315  */
1316 static void
1317 call_start(struct rpc_task *task)
1318 {
1319         struct rpc_clnt *clnt = task->tk_client;
1320 
1321         dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1322                         clnt->cl_program->name, clnt->cl_vers,
1323                         rpc_proc_name(task),
1324                         (RPC_IS_ASYNC(task) ? "async" : "sync"));
1325 
1326         /* Increment call count */
1327         task->tk_msg.rpc_proc->p_count++;
1328         clnt->cl_stats->rpccnt++;
1329         task->tk_action = call_reserve;
1330 }
1331 
1332 /*
1333  * 1.   Reserve an RPC call slot
1334  */
1335 static void
1336 call_reserve(struct rpc_task *task)
1337 {
1338         dprint_status(task);
1339 
1340         task->tk_status  = 0;
1341         task->tk_action  = call_reserveresult;
1342         xprt_reserve(task);
1343 }
1344 
1345 static void call_retry_reserve(struct rpc_task *task);
1346 
1347 /*
1348  * 1b.  Grok the result of xprt_reserve()
1349  */
1350 static void
1351 call_reserveresult(struct rpc_task *task)
1352 {
1353         int status = task->tk_status;
1354 
1355         dprint_status(task);
1356 
1357         /*
1358          * After a call to xprt_reserve(), we must have either
1359          * a request slot or else an error status.
1360          */
1361         task->tk_status = 0;
1362         if (status >= 0) {
1363                 if (task->tk_rqstp) {
1364                         task->tk_action = call_refresh;
1365                         return;
1366                 }
1367 
1368                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1369                                 __func__, status);
1370                 rpc_exit(task, -EIO);
1371                 return;
1372         }
1373 
1374         /*
1375          * Even though there was an error, we may have acquired
1376          * a request slot somehow.  Make sure not to leak it.
1377          */
1378         if (task->tk_rqstp) {
1379                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1380                                 __func__, status);
1381                 xprt_release(task);
1382         }
1383 
1384         switch (status) {
1385         case -ENOMEM:
1386                 rpc_delay(task, HZ >> 2);
1387         case -EAGAIN:   /* woken up; retry */
1388                 task->tk_action = call_retry_reserve;
1389                 return;
1390         case -EIO:      /* probably a shutdown */
1391                 break;
1392         default:
1393                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1394                                 __func__, status);
1395                 break;
1396         }
1397         rpc_exit(task, status);
1398 }
1399 
1400 /*
1401  * 1c.  Retry reserving an RPC call slot
1402  */
1403 static void
1404 call_retry_reserve(struct rpc_task *task)
1405 {
1406         dprint_status(task);
1407 
1408         task->tk_status  = 0;
1409         task->tk_action  = call_reserveresult;
1410         xprt_retry_reserve(task);
1411 }
1412 
1413 /*
1414  * 2.   Bind and/or refresh the credentials
1415  */
1416 static void
1417 call_refresh(struct rpc_task *task)
1418 {
1419         dprint_status(task);
1420 
1421         task->tk_action = call_refreshresult;
1422         task->tk_status = 0;
1423         task->tk_client->cl_stats->rpcauthrefresh++;
1424         rpcauth_refreshcred(task);
1425 }
1426 
1427 /*
1428  * 2a.  Process the results of a credential refresh
1429  */
1430 static void
1431 call_refreshresult(struct rpc_task *task)
1432 {
1433         int status = task->tk_status;
1434 
1435         dprint_status(task);
1436 
1437         task->tk_status = 0;
1438         task->tk_action = call_refresh;
1439         switch (status) {
1440         case 0:
1441                 if (rpcauth_uptodatecred(task)) {
1442                         task->tk_action = call_allocate;
1443                         return;
1444                 }
1445                 /* Use rate-limiting and a max number of retries if refresh
1446                  * had status 0 but failed to update the cred.
1447                  */
1448         case -ETIMEDOUT:
1449                 rpc_delay(task, 3*HZ);
1450         case -EAGAIN:
1451                 status = -EACCES;
1452         case -EKEYEXPIRED:
1453                 if (!task->tk_cred_retry)
1454                         break;
1455                 task->tk_cred_retry--;
1456                 dprintk("RPC: %5u %s: retry refresh creds\n",
1457                                 task->tk_pid, __func__);
1458                 return;
1459         }
1460         dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1461                                 task->tk_pid, __func__, status);
1462         rpc_exit(task, status);
1463 }
1464 
1465 /*
1466  * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1467  *      (Note: buffer memory is freed in xprt_release).
1468  */
1469 static void
1470 call_allocate(struct rpc_task *task)
1471 {
1472         unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1473         struct rpc_rqst *req = task->tk_rqstp;
1474         struct rpc_xprt *xprt = req->rq_xprt;
1475         struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1476 
1477         dprint_status(task);
1478 
1479         task->tk_status = 0;
1480         task->tk_action = call_bind;
1481 
1482         if (req->rq_buffer)
1483                 return;
1484 
1485         if (proc->p_proc != 0) {
1486                 BUG_ON(proc->p_arglen == 0);
1487                 if (proc->p_decode != NULL)
1488                         BUG_ON(proc->p_replen == 0);
1489         }
1490 
1491         /*
1492          * Calculate the size (in quads) of the RPC call
1493          * and reply headers, and convert both values
1494          * to byte sizes.
1495          */
1496         req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1497         req->rq_callsize <<= 2;
1498         req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1499         req->rq_rcvsize <<= 2;
1500 
1501         req->rq_buffer = xprt->ops->buf_alloc(task,
1502                                         req->rq_callsize + req->rq_rcvsize);
1503         if (req->rq_buffer != NULL)
1504                 return;
1505 
1506         dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1507 
1508         if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1509                 task->tk_action = call_allocate;
1510                 rpc_delay(task, HZ>>4);
1511                 return;
1512         }
1513 
1514         rpc_exit(task, -ERESTARTSYS);
1515 }
1516 
1517 static inline int
1518 rpc_task_need_encode(struct rpc_task *task)
1519 {
1520         return task->tk_rqstp->rq_snd_buf.len == 0;
1521 }
1522 
1523 static inline void
1524 rpc_task_force_reencode(struct rpc_task *task)
1525 {
1526         task->tk_rqstp->rq_snd_buf.len = 0;
1527         task->tk_rqstp->rq_bytes_sent = 0;
1528 }
1529 
1530 static inline void
1531 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1532 {
1533         buf->head[0].iov_base = start;
1534         buf->head[0].iov_len = len;
1535         buf->tail[0].iov_len = 0;
1536         buf->page_len = 0;
1537         buf->flags = 0;
1538         buf->len = 0;
1539         buf->buflen = len;
1540 }
1541 
1542 /*
1543  * 3.   Encode arguments of an RPC call
1544  */
1545 static void
1546 rpc_xdr_encode(struct rpc_task *task)
1547 {
1548         struct rpc_rqst *req = task->tk_rqstp;
1549         kxdreproc_t     encode;
1550         __be32          *p;
1551 
1552         dprint_status(task);
1553 
1554         rpc_xdr_buf_init(&req->rq_snd_buf,
1555                          req->rq_buffer,
1556                          req->rq_callsize);
1557         rpc_xdr_buf_init(&req->rq_rcv_buf,
1558                          (char *)req->rq_buffer + req->rq_callsize,
1559                          req->rq_rcvsize);
1560 
1561         p = rpc_encode_header(task);
1562         if (p == NULL) {
1563                 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1564                 rpc_exit(task, -EIO);
1565                 return;
1566         }
1567 
1568         encode = task->tk_msg.rpc_proc->p_encode;
1569         if (encode == NULL)
1570                 return;
1571 
1572         task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1573                         task->tk_msg.rpc_argp);
1574 }
1575 
1576 /*
1577  * 4.   Get the server port number if not yet set
1578  */
1579 static void
1580 call_bind(struct rpc_task *task)
1581 {
1582         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1583 
1584         dprint_status(task);
1585 
1586         task->tk_action = call_connect;
1587         if (!xprt_bound(xprt)) {
1588                 task->tk_action = call_bind_status;
1589                 task->tk_timeout = xprt->bind_timeout;
1590                 xprt->ops->rpcbind(task);
1591         }
1592 }
1593 
1594 /*
1595  * 4a.  Sort out bind result
1596  */
1597 static void
1598 call_bind_status(struct rpc_task *task)
1599 {
1600         int status = -EIO;
1601 
1602         if (task->tk_status >= 0) {
1603                 dprint_status(task);
1604                 task->tk_status = 0;
1605                 task->tk_action = call_connect;
1606                 return;
1607         }
1608 
1609         trace_rpc_bind_status(task);
1610         switch (task->tk_status) {
1611         case -ENOMEM:
1612                 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1613                 rpc_delay(task, HZ >> 2);
1614                 goto retry_timeout;
1615         case -EACCES:
1616                 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1617                                 "unavailable\n", task->tk_pid);
1618                 /* fail immediately if this is an RPC ping */
1619                 if (task->tk_msg.rpc_proc->p_proc == 0) {
1620                         status = -EOPNOTSUPP;
1621                         break;
1622                 }
1623                 if (task->tk_rebind_retry == 0)
1624                         break;
1625                 task->tk_rebind_retry--;
1626                 rpc_delay(task, 3*HZ);
1627                 goto retry_timeout;
1628         case -ETIMEDOUT:
1629                 dprintk("RPC: %5u rpcbind request timed out\n",
1630                                 task->tk_pid);
1631                 goto retry_timeout;
1632         case -EPFNOSUPPORT:
1633                 /* server doesn't support any rpcbind version we know of */
1634                 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1635                                 task->tk_pid);
1636                 break;
1637         case -EPROTONOSUPPORT:
1638                 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1639                                 task->tk_pid);
1640                 task->tk_status = 0;
1641                 task->tk_action = call_bind;
1642                 return;
1643         case -ECONNREFUSED:             /* connection problems */
1644         case -ECONNRESET:
1645         case -ECONNABORTED:
1646         case -ENOTCONN:
1647         case -EHOSTDOWN:
1648         case -EHOSTUNREACH:
1649         case -ENETUNREACH:
1650         case -ENOBUFS:
1651         case -EPIPE:
1652                 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1653                                 task->tk_pid, task->tk_status);
1654                 if (!RPC_IS_SOFTCONN(task)) {
1655                         rpc_delay(task, 5*HZ);
1656                         goto retry_timeout;
1657                 }
1658                 status = task->tk_status;
1659                 break;
1660         default:
1661                 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1662                                 task->tk_pid, -task->tk_status);
1663         }
1664 
1665         rpc_exit(task, status);
1666         return;
1667 
1668 retry_timeout:
1669         task->tk_action = call_timeout;
1670 }
1671 
1672 /*
1673  * 4b.  Connect to the RPC server
1674  */
1675 static void
1676 call_connect(struct rpc_task *task)
1677 {
1678         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1679 
1680         dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1681                         task->tk_pid, xprt,
1682                         (xprt_connected(xprt) ? "is" : "is not"));
1683 
1684         task->tk_action = call_transmit;
1685         if (!xprt_connected(xprt)) {
1686                 task->tk_action = call_connect_status;
1687                 if (task->tk_status < 0)
1688                         return;
1689                 if (task->tk_flags & RPC_TASK_NOCONNECT) {
1690                         rpc_exit(task, -ENOTCONN);
1691                         return;
1692                 }
1693                 xprt_connect(task);
1694         }
1695 }
1696 
1697 /*
1698  * 4c.  Sort out connect result
1699  */
1700 static void
1701 call_connect_status(struct rpc_task *task)
1702 {
1703         struct rpc_clnt *clnt = task->tk_client;
1704         int status = task->tk_status;
1705 
1706         dprint_status(task);
1707 
1708         trace_rpc_connect_status(task, status);
1709         task->tk_status = 0;
1710         switch (status) {
1711         case -ECONNREFUSED:
1712         case -ECONNRESET:
1713         case -ECONNABORTED:
1714         case -ENETUNREACH:
1715         case -EHOSTUNREACH:
1716         case -ENOBUFS:
1717         case -EPIPE:
1718                 if (RPC_IS_SOFTCONN(task))
1719                         break;
1720                 /* retry with existing socket, after a delay */
1721                 rpc_delay(task, 3*HZ);
1722         case -EAGAIN:
1723                 /* Check for timeouts before looping back to call_bind */
1724         case -ETIMEDOUT:
1725                 task->tk_action = call_timeout;
1726                 return;
1727         case 0:
1728                 clnt->cl_stats->netreconn++;
1729                 task->tk_action = call_transmit;
1730                 return;
1731         }
1732         rpc_exit(task, status);
1733 }
1734 
1735 /*
1736  * 5.   Transmit the RPC request, and wait for reply
1737  */
1738 static void
1739 call_transmit(struct rpc_task *task)
1740 {
1741         dprint_status(task);
1742 
1743         task->tk_action = call_status;
1744         if (task->tk_status < 0)
1745                 return;
1746         task->tk_status = xprt_prepare_transmit(task);
1747         if (task->tk_status != 0)
1748                 return;
1749         task->tk_action = call_transmit_status;
1750         /* Encode here so that rpcsec_gss can use correct sequence number. */
1751         if (rpc_task_need_encode(task)) {
1752                 rpc_xdr_encode(task);
1753                 /* Did the encode result in an error condition? */
1754                 if (task->tk_status != 0) {
1755                         /* Was the error nonfatal? */
1756                         if (task->tk_status == -EAGAIN)
1757                                 rpc_delay(task, HZ >> 4);
1758                         else
1759                                 rpc_exit(task, task->tk_status);
1760                         return;
1761                 }
1762         }
1763         xprt_transmit(task);
1764         if (task->tk_status < 0)
1765                 return;
1766         /*
1767          * On success, ensure that we call xprt_end_transmit() before sleeping
1768          * in order to allow access to the socket to other RPC requests.
1769          */
1770         call_transmit_status(task);
1771         if (rpc_reply_expected(task))
1772                 return;
1773         task->tk_action = rpc_exit_task;
1774         rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1775 }
1776 
1777 /*
1778  * 5a.  Handle cleanup after a transmission
1779  */
1780 static void
1781 call_transmit_status(struct rpc_task *task)
1782 {
1783         task->tk_action = call_status;
1784 
1785         /*
1786          * Common case: success.  Force the compiler to put this
1787          * test first.
1788          */
1789         if (task->tk_status == 0) {
1790                 xprt_end_transmit(task);
1791                 rpc_task_force_reencode(task);
1792                 return;
1793         }
1794 
1795         switch (task->tk_status) {
1796         case -EAGAIN:
1797                 break;
1798         default:
1799                 dprint_status(task);
1800                 xprt_end_transmit(task);
1801                 rpc_task_force_reencode(task);
1802                 break;
1803                 /*
1804                  * Special cases: if we've been waiting on the
1805                  * socket's write_space() callback, or if the
1806                  * socket just returned a connection error,
1807                  * then hold onto the transport lock.
1808                  */
1809         case -ECONNREFUSED:
1810         case -EHOSTDOWN:
1811         case -EHOSTUNREACH:
1812         case -ENETUNREACH:
1813                 if (RPC_IS_SOFTCONN(task)) {
1814                         xprt_end_transmit(task);
1815                         rpc_exit(task, task->tk_status);
1816                         break;
1817                 }
1818         case -ECONNRESET:
1819         case -ECONNABORTED:
1820         case -ENOTCONN:
1821         case -ENOBUFS:
1822         case -EPIPE:
1823                 rpc_task_force_reencode(task);
1824         }
1825 }
1826 
1827 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1828 /*
1829  * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
1830  * addition, disconnect on connectivity errors.
1831  */
1832 static void
1833 call_bc_transmit(struct rpc_task *task)
1834 {
1835         struct rpc_rqst *req = task->tk_rqstp;
1836 
1837         task->tk_status = xprt_prepare_transmit(task);
1838         if (task->tk_status == -EAGAIN) {
1839                 /*
1840                  * Could not reserve the transport. Try again after the
1841                  * transport is released.
1842                  */
1843                 task->tk_status = 0;
1844                 task->tk_action = call_bc_transmit;
1845                 return;
1846         }
1847 
1848         task->tk_action = rpc_exit_task;
1849         if (task->tk_status < 0) {
1850                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1851                         "error: %d\n", task->tk_status);
1852                 return;
1853         }
1854 
1855         xprt_transmit(task);
1856         xprt_end_transmit(task);
1857         dprint_status(task);
1858         switch (task->tk_status) {
1859         case 0:
1860                 /* Success */
1861                 break;
1862         case -EHOSTDOWN:
1863         case -EHOSTUNREACH:
1864         case -ENETUNREACH:
1865         case -ETIMEDOUT:
1866                 /*
1867                  * Problem reaching the server.  Disconnect and let the
1868                  * forechannel reestablish the connection.  The server will
1869                  * have to retransmit the backchannel request and we'll
1870                  * reprocess it.  Since these ops are idempotent, there's no
1871                  * need to cache our reply at this time.
1872                  */
1873                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1874                         "error: %d\n", task->tk_status);
1875                 xprt_conditional_disconnect(req->rq_xprt,
1876                         req->rq_connect_cookie);
1877                 break;
1878         default:
1879                 /*
1880                  * We were unable to reply and will have to drop the
1881                  * request.  The server should reconnect and retransmit.
1882                  */
1883                 WARN_ON_ONCE(task->tk_status == -EAGAIN);
1884                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1885                         "error: %d\n", task->tk_status);
1886                 break;
1887         }
1888         rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1889 }
1890 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1891 
1892 /*
1893  * 6.   Sort out the RPC call status
1894  */
1895 static void
1896 call_status(struct rpc_task *task)
1897 {
1898         struct rpc_clnt *clnt = task->tk_client;
1899         struct rpc_rqst *req = task->tk_rqstp;
1900         int             status;
1901 
1902         if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1903                 task->tk_status = req->rq_reply_bytes_recvd;
1904 
1905         dprint_status(task);
1906 
1907         status = task->tk_status;
1908         if (status >= 0) {
1909                 task->tk_action = call_decode;
1910                 return;
1911         }
1912 
1913         trace_rpc_call_status(task);
1914         task->tk_status = 0;
1915         switch(status) {
1916         case -EHOSTDOWN:
1917         case -EHOSTUNREACH:
1918         case -ENETUNREACH:
1919                 /*
1920                  * Delay any retries for 3 seconds, then handle as if it
1921                  * were a timeout.
1922                  */
1923                 rpc_delay(task, 3*HZ);
1924         case -ETIMEDOUT:
1925                 task->tk_action = call_timeout;
1926                 if (task->tk_client->cl_discrtry)
1927                         xprt_conditional_disconnect(req->rq_xprt,
1928                                         req->rq_connect_cookie);
1929                 break;
1930         case -ECONNREFUSED:
1931         case -ECONNRESET:
1932         case -ECONNABORTED:
1933                 rpc_force_rebind(clnt);
1934         case -ENOBUFS:
1935                 rpc_delay(task, 3*HZ);
1936         case -EPIPE:
1937         case -ENOTCONN:
1938                 task->tk_action = call_bind;
1939                 break;
1940         case -EAGAIN:
1941                 task->tk_action = call_transmit;
1942                 break;
1943         case -EIO:
1944                 /* shutdown or soft timeout */
1945                 rpc_exit(task, status);
1946                 break;
1947         default:
1948                 if (clnt->cl_chatty)
1949                         printk("%s: RPC call returned error %d\n",
1950                                clnt->cl_program->name, -status);
1951                 rpc_exit(task, status);
1952         }
1953 }
1954 
1955 /*
1956  * 6a.  Handle RPC timeout
1957  *      We do not release the request slot, so we keep using the
1958  *      same XID for all retransmits.
1959  */
1960 static void
1961 call_timeout(struct rpc_task *task)
1962 {
1963         struct rpc_clnt *clnt = task->tk_client;
1964 
1965         if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1966                 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1967                 goto retry;
1968         }
1969 
1970         dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1971         task->tk_timeouts++;
1972 
1973         if (RPC_IS_SOFTCONN(task)) {
1974                 rpc_exit(task, -ETIMEDOUT);
1975                 return;
1976         }
1977         if (RPC_IS_SOFT(task)) {
1978                 if (clnt->cl_chatty) {
1979                         rcu_read_lock();
1980                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1981                                 clnt->cl_program->name,
1982                                 rcu_dereference(clnt->cl_xprt)->servername);
1983                         rcu_read_unlock();
1984                 }
1985                 if (task->tk_flags & RPC_TASK_TIMEOUT)
1986                         rpc_exit(task, -ETIMEDOUT);
1987                 else
1988                         rpc_exit(task, -EIO);
1989                 return;
1990         }
1991 
1992         if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1993                 task->tk_flags |= RPC_CALL_MAJORSEEN;
1994                 if (clnt->cl_chatty) {
1995                         rcu_read_lock();
1996                         printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1997                         clnt->cl_program->name,
1998                         rcu_dereference(clnt->cl_xprt)->servername);
1999                         rcu_read_unlock();
2000                 }
2001         }
2002         rpc_force_rebind(clnt);
2003         /*
2004          * Did our request time out due to an RPCSEC_GSS out-of-sequence
2005          * event? RFC2203 requires the server to drop all such requests.
2006          */
2007         rpcauth_invalcred(task);
2008 
2009 retry:
2010         clnt->cl_stats->rpcretrans++;
2011         task->tk_action = call_bind;
2012         task->tk_status = 0;
2013 }
2014 
2015 /*
2016  * 7.   Decode the RPC reply
2017  */
2018 static void
2019 call_decode(struct rpc_task *task)
2020 {
2021         struct rpc_clnt *clnt = task->tk_client;
2022         struct rpc_rqst *req = task->tk_rqstp;
2023         kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
2024         __be32          *p;
2025 
2026         dprint_status(task);
2027 
2028         if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2029                 if (clnt->cl_chatty) {
2030                         rcu_read_lock();
2031                         printk(KERN_NOTICE "%s: server %s OK\n",
2032                                 clnt->cl_program->name,
2033                                 rcu_dereference(clnt->cl_xprt)->servername);
2034                         rcu_read_unlock();
2035                 }
2036                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2037         }
2038 
2039         /*
2040          * Ensure that we see all writes made by xprt_complete_rqst()
2041          * before it changed req->rq_reply_bytes_recvd.
2042          */
2043         smp_rmb();
2044         req->rq_rcv_buf.len = req->rq_private_buf.len;
2045 
2046         /* Check that the softirq receive buffer is valid */
2047         WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2048                                 sizeof(req->rq_rcv_buf)) != 0);
2049 
2050         if (req->rq_rcv_buf.len < 12) {
2051                 if (!RPC_IS_SOFT(task)) {
2052                         task->tk_action = call_bind;
2053                         clnt->cl_stats->rpcretrans++;
2054                         goto out_retry;
2055                 }
2056                 dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2057                                 clnt->cl_program->name, task->tk_status);
2058                 task->tk_action = call_timeout;
2059                 goto out_retry;
2060         }
2061 
2062         p = rpc_verify_header(task);
2063         if (IS_ERR(p)) {
2064                 if (p == ERR_PTR(-EAGAIN))
2065                         goto out_retry;
2066                 return;
2067         }
2068 
2069         task->tk_action = rpc_exit_task;
2070 
2071         if (decode) {
2072                 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2073                                                       task->tk_msg.rpc_resp);
2074         }
2075         dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2076                         task->tk_status);
2077         return;
2078 out_retry:
2079         task->tk_status = 0;
2080         /* Note: rpc_verify_header() may have freed the RPC slot */
2081         if (task->tk_rqstp == req) {
2082                 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2083                 if (task->tk_client->cl_discrtry)
2084                         xprt_conditional_disconnect(req->rq_xprt,
2085                                         req->rq_connect_cookie);
2086         }
2087 }
2088 
2089 static __be32 *
2090 rpc_encode_header(struct rpc_task *task)
2091 {
2092         struct rpc_clnt *clnt = task->tk_client;
2093         struct rpc_rqst *req = task->tk_rqstp;
2094         __be32          *p = req->rq_svec[0].iov_base;
2095 
2096         /* FIXME: check buffer size? */
2097 
2098         p = xprt_skip_transport_header(req->rq_xprt, p);
2099         *p++ = req->rq_xid;             /* XID */
2100         *p++ = htonl(RPC_CALL);         /* CALL */
2101         *p++ = htonl(RPC_VERSION);      /* RPC version */
2102         *p++ = htonl(clnt->cl_prog);    /* program number */
2103         *p++ = htonl(clnt->cl_vers);    /* program version */
2104         *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
2105         p = rpcauth_marshcred(task, p);
2106         req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2107         return p;
2108 }
2109 
2110 static __be32 *
2111 rpc_verify_header(struct rpc_task *task)
2112 {
2113         struct rpc_clnt *clnt = task->tk_client;
2114         struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2115         int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2116         __be32  *p = iov->iov_base;
2117         u32 n;
2118         int error = -EACCES;
2119 
2120         if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2121                 /* RFC-1014 says that the representation of XDR data must be a
2122                  * multiple of four bytes
2123                  * - if it isn't pointer subtraction in the NFS client may give
2124                  *   undefined results
2125                  */
2126                 dprintk("RPC: %5u %s: XDR representation not a multiple of"
2127                        " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2128                        task->tk_rqstp->rq_rcv_buf.len);
2129                 error = -EIO;
2130                 goto out_err;
2131         }
2132         if ((len -= 3) < 0)
2133                 goto out_overflow;
2134 
2135         p += 1; /* skip XID */
2136         if ((n = ntohl(*p++)) != RPC_REPLY) {
2137                 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2138                         task->tk_pid, __func__, n);
2139                 error = -EIO;
2140                 goto out_garbage;
2141         }
2142 
2143         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2144                 if (--len < 0)
2145                         goto out_overflow;
2146                 switch ((n = ntohl(*p++))) {
2147                 case RPC_AUTH_ERROR:
2148                         break;
2149                 case RPC_MISMATCH:
2150                         dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2151                                 task->tk_pid, __func__);
2152                         error = -EPROTONOSUPPORT;
2153                         goto out_err;
2154                 default:
2155                         dprintk("RPC: %5u %s: RPC call rejected, "
2156                                 "unknown error: %x\n",
2157                                 task->tk_pid, __func__, n);
2158                         error = -EIO;
2159                         goto out_err;
2160                 }
2161                 if (--len < 0)
2162                         goto out_overflow;
2163                 switch ((n = ntohl(*p++))) {
2164                 case RPC_AUTH_REJECTEDCRED:
2165                 case RPC_AUTH_REJECTEDVERF:
2166                 case RPCSEC_GSS_CREDPROBLEM:
2167                 case RPCSEC_GSS_CTXPROBLEM:
2168                         if (!task->tk_cred_retry)
2169                                 break;
2170                         task->tk_cred_retry--;
2171                         dprintk("RPC: %5u %s: retry stale creds\n",
2172                                         task->tk_pid, __func__);
2173                         rpcauth_invalcred(task);
2174                         /* Ensure we obtain a new XID! */
2175                         xprt_release(task);
2176                         task->tk_action = call_reserve;
2177                         goto out_retry;
2178                 case RPC_AUTH_BADCRED:
2179                 case RPC_AUTH_BADVERF:
2180                         /* possibly garbled cred/verf? */
2181                         if (!task->tk_garb_retry)
2182                                 break;
2183                         task->tk_garb_retry--;
2184                         dprintk("RPC: %5u %s: retry garbled creds\n",
2185                                         task->tk_pid, __func__);
2186                         task->tk_action = call_bind;
2187                         goto out_retry;
2188                 case RPC_AUTH_TOOWEAK:
2189                         rcu_read_lock();
2190                         printk(KERN_NOTICE "RPC: server %s requires stronger "
2191                                "authentication.\n",
2192                                rcu_dereference(clnt->cl_xprt)->servername);
2193                         rcu_read_unlock();
2194                         break;
2195                 default:
2196                         dprintk("RPC: %5u %s: unknown auth error: %x\n",
2197                                         task->tk_pid, __func__, n);
2198                         error = -EIO;
2199                 }
2200                 dprintk("RPC: %5u %s: call rejected %d\n",
2201                                 task->tk_pid, __func__, n);
2202                 goto out_err;
2203         }
2204         p = rpcauth_checkverf(task, p);
2205         if (IS_ERR(p)) {
2206                 error = PTR_ERR(p);
2207                 dprintk("RPC: %5u %s: auth check failed with %d\n",
2208                                 task->tk_pid, __func__, error);
2209                 goto out_garbage;               /* bad verifier, retry */
2210         }
2211         len = p - (__be32 *)iov->iov_base - 1;
2212         if (len < 0)
2213                 goto out_overflow;
2214         switch ((n = ntohl(*p++))) {
2215         case RPC_SUCCESS:
2216                 return p;
2217         case RPC_PROG_UNAVAIL:
2218                 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2219                                 "by server %s\n", task->tk_pid, __func__,
2220                                 (unsigned int)clnt->cl_prog,
2221                                 rcu_dereference(clnt->cl_xprt)->servername);
2222                 error = -EPFNOSUPPORT;
2223                 goto out_err;
2224         case RPC_PROG_MISMATCH:
2225                 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2226                                 "by server %s\n", task->tk_pid, __func__,
2227                                 (unsigned int)clnt->cl_prog,
2228                                 (unsigned int)clnt->cl_vers,
2229                                 rcu_dereference(clnt->cl_xprt)->servername);
2230                 error = -EPROTONOSUPPORT;
2231                 goto out_err;
2232         case RPC_PROC_UNAVAIL:
2233                 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2234                                 "version %u on server %s\n",
2235                                 task->tk_pid, __func__,
2236                                 rpc_proc_name(task),
2237                                 clnt->cl_prog, clnt->cl_vers,
2238                                 rcu_dereference(clnt->cl_xprt)->servername);
2239                 error = -EOPNOTSUPP;
2240                 goto out_err;
2241         case RPC_GARBAGE_ARGS:
2242                 dprintk("RPC: %5u %s: server saw garbage\n",
2243                                 task->tk_pid, __func__);
2244                 break;                  /* retry */
2245         default:
2246                 dprintk("RPC: %5u %s: server accept status: %x\n",
2247                                 task->tk_pid, __func__, n);
2248                 /* Also retry */
2249         }
2250 
2251 out_garbage:
2252         clnt->cl_stats->rpcgarbage++;
2253         if (task->tk_garb_retry) {
2254                 task->tk_garb_retry--;
2255                 dprintk("RPC: %5u %s: retrying\n",
2256                                 task->tk_pid, __func__);
2257                 task->tk_action = call_bind;
2258 out_retry:
2259                 return ERR_PTR(-EAGAIN);
2260         }
2261 out_err:
2262         rpc_exit(task, error);
2263         dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2264                         __func__, error);
2265         return ERR_PTR(error);
2266 out_overflow:
2267         dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2268                         __func__);
2269         goto out_garbage;
2270 }
2271 
2272 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2273 {
2274 }
2275 
2276 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2277 {
2278         return 0;
2279 }
2280 
2281 static struct rpc_procinfo rpcproc_null = {
2282         .p_encode = rpcproc_encode_null,
2283         .p_decode = rpcproc_decode_null,
2284 };
2285 
2286 static int rpc_ping(struct rpc_clnt *clnt)
2287 {
2288         struct rpc_message msg = {
2289                 .rpc_proc = &rpcproc_null,
2290         };
2291         int err;
2292         msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2293         err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2294         put_rpccred(msg.rpc_cred);
2295         return err;
2296 }
2297 
2298 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2299 {
2300         struct rpc_message msg = {
2301                 .rpc_proc = &rpcproc_null,
2302                 .rpc_cred = cred,
2303         };
2304         struct rpc_task_setup task_setup_data = {
2305                 .rpc_client = clnt,
2306                 .rpc_message = &msg,
2307                 .callback_ops = &rpc_default_ops,
2308                 .flags = flags,
2309         };
2310         return rpc_run_task(&task_setup_data);
2311 }
2312 EXPORT_SYMBOL_GPL(rpc_call_null);
2313 
2314 #ifdef RPC_DEBUG
2315 static void rpc_show_header(void)
2316 {
2317         printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2318                 "-timeout ---ops--\n");
2319 }
2320 
2321 static void rpc_show_task(const struct rpc_clnt *clnt,
2322                           const struct rpc_task *task)
2323 {
2324         const char *rpc_waitq = "none";
2325 
2326         if (RPC_IS_QUEUED(task))
2327                 rpc_waitq = rpc_qname(task->tk_waitqueue);
2328 
2329         printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2330                 task->tk_pid, task->tk_flags, task->tk_status,
2331                 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2332                 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2333                 task->tk_action, rpc_waitq);
2334 }
2335 
2336 void rpc_show_tasks(struct net *net)
2337 {
2338         struct rpc_clnt *clnt;
2339         struct rpc_task *task;
2340         int header = 0;
2341         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2342 
2343         spin_lock(&sn->rpc_client_lock);
2344         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2345                 spin_lock(&clnt->cl_lock);
2346                 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2347                         if (!header) {
2348                                 rpc_show_header();
2349                                 header++;
2350                         }
2351                         rpc_show_task(clnt, task);
2352                 }
2353                 spin_unlock(&clnt->cl_lock);
2354         }
2355         spin_unlock(&sn->rpc_client_lock);
2356 }
2357 #endif
2358 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp