~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/clnt.c

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/net/sunrpc/clnt.c
  3  *
  4  *  This file contains the high-level RPC interface.
  5  *  It is modeled as a finite state machine to support both synchronous
  6  *  and asynchronous requests.
  7  *
  8  *  -   RPC header generation and argument serialization.
  9  *  -   Credential refresh.
 10  *  -   TCP connect handling.
 11  *  -   Retry of operation when it is suspected the operation failed because
 12  *      of uid squashing on the server, or when the credentials were stale
 13  *      and need to be refreshed, or when a packet was damaged in transit.
 14  *      This may be have to be moved to the VFS layer.
 15  *
 16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
 17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
 18  */
 19 
 20 
 21 #include <linux/module.h>
 22 #include <linux/types.h>
 23 #include <linux/kallsyms.h>
 24 #include <linux/mm.h>
 25 #include <linux/namei.h>
 26 #include <linux/mount.h>
 27 #include <linux/slab.h>
 28 #include <linux/rcupdate.h>
 29 #include <linux/utsname.h>
 30 #include <linux/workqueue.h>
 31 #include <linux/in.h>
 32 #include <linux/in6.h>
 33 #include <linux/un.h>
 34 
 35 #include <linux/sunrpc/clnt.h>
 36 #include <linux/sunrpc/addr.h>
 37 #include <linux/sunrpc/rpc_pipe_fs.h>
 38 #include <linux/sunrpc/metrics.h>
 39 #include <linux/sunrpc/bc_xprt.h>
 40 #include <trace/events/sunrpc.h>
 41 
 42 #include "sunrpc.h"
 43 #include "netns.h"
 44 
 45 #ifdef RPC_DEBUG
 46 # define RPCDBG_FACILITY        RPCDBG_CALL
 47 #endif
 48 
 49 #define dprint_status(t)                                        \
 50         dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
 51                         __func__, t->tk_status)
 52 
 53 /*
 54  * All RPC clients are linked into this list
 55  */
 56 
 57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
 58 
 59 
 60 static void     call_start(struct rpc_task *task);
 61 static void     call_reserve(struct rpc_task *task);
 62 static void     call_reserveresult(struct rpc_task *task);
 63 static void     call_allocate(struct rpc_task *task);
 64 static void     call_decode(struct rpc_task *task);
 65 static void     call_bind(struct rpc_task *task);
 66 static void     call_bind_status(struct rpc_task *task);
 67 static void     call_transmit(struct rpc_task *task);
 68 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 69 static void     call_bc_transmit(struct rpc_task *task);
 70 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 71 static void     call_status(struct rpc_task *task);
 72 static void     call_transmit_status(struct rpc_task *task);
 73 static void     call_refresh(struct rpc_task *task);
 74 static void     call_refreshresult(struct rpc_task *task);
 75 static void     call_timeout(struct rpc_task *task);
 76 static void     call_connect(struct rpc_task *task);
 77 static void     call_connect_status(struct rpc_task *task);
 78 
 79 static __be32   *rpc_encode_header(struct rpc_task *task);
 80 static __be32   *rpc_verify_header(struct rpc_task *task);
 81 static int      rpc_ping(struct rpc_clnt *clnt);
 82 
 83 static void rpc_register_client(struct rpc_clnt *clnt)
 84 {
 85         struct net *net = rpc_net_ns(clnt);
 86         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 87 
 88         spin_lock(&sn->rpc_client_lock);
 89         list_add(&clnt->cl_clients, &sn->all_clients);
 90         spin_unlock(&sn->rpc_client_lock);
 91 }
 92 
 93 static void rpc_unregister_client(struct rpc_clnt *clnt)
 94 {
 95         struct net *net = rpc_net_ns(clnt);
 96         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 97 
 98         spin_lock(&sn->rpc_client_lock);
 99         list_del(&clnt->cl_clients);
100         spin_unlock(&sn->rpc_client_lock);
101 }
102 
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104 {
105         rpc_remove_client_dir(clnt);
106 }
107 
108 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
109 {
110         struct net *net = rpc_net_ns(clnt);
111         struct super_block *pipefs_sb;
112 
113         pipefs_sb = rpc_get_sb_net(net);
114         if (pipefs_sb) {
115                 __rpc_clnt_remove_pipedir(clnt);
116                 rpc_put_sb_net(net);
117         }
118 }
119 
120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121                                     struct rpc_clnt *clnt)
122 {
123         static uint32_t clntid;
124         const char *dir_name = clnt->cl_program->pipe_dir_name;
125         char name[15];
126         struct dentry *dir, *dentry;
127 
128         dir = rpc_d_lookup_sb(sb, dir_name);
129         if (dir == NULL) {
130                 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
131                 return dir;
132         }
133         for (;;) {
134                 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135                 name[sizeof(name) - 1] = '\0';
136                 dentry = rpc_create_client_dir(dir, name, clnt);
137                 if (!IS_ERR(dentry))
138                         break;
139                 if (dentry == ERR_PTR(-EEXIST))
140                         continue;
141                 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
142                                 " %s/%s, error %ld\n",
143                                 dir_name, name, PTR_ERR(dentry));
144                 break;
145         }
146         dput(dir);
147         return dentry;
148 }
149 
150 static int
151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
152 {
153         struct dentry *dentry;
154 
155         if (clnt->cl_program->pipe_dir_name != NULL) {
156                 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
157                 if (IS_ERR(dentry))
158                         return PTR_ERR(dentry);
159         }
160         return 0;
161 }
162 
163 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
164 {
165         if (clnt->cl_program->pipe_dir_name == NULL)
166                 return 1;
167 
168         switch (event) {
169         case RPC_PIPEFS_MOUNT:
170                 if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
171                         return 1;
172                 if (atomic_read(&clnt->cl_count) == 0)
173                         return 1;
174                 break;
175         case RPC_PIPEFS_UMOUNT:
176                 if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
177                         return 1;
178                 break;
179         }
180         return 0;
181 }
182 
183 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
184                                    struct super_block *sb)
185 {
186         struct dentry *dentry;
187         int err = 0;
188 
189         switch (event) {
190         case RPC_PIPEFS_MOUNT:
191                 dentry = rpc_setup_pipedir_sb(sb, clnt);
192                 if (!dentry)
193                         return -ENOENT;
194                 if (IS_ERR(dentry))
195                         return PTR_ERR(dentry);
196                 break;
197         case RPC_PIPEFS_UMOUNT:
198                 __rpc_clnt_remove_pipedir(clnt);
199                 break;
200         default:
201                 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
202                 return -ENOTSUPP;
203         }
204         return err;
205 }
206 
207 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208                                 struct super_block *sb)
209 {
210         int error = 0;
211 
212         for (;; clnt = clnt->cl_parent) {
213                 if (!rpc_clnt_skip_event(clnt, event))
214                         error = __rpc_clnt_handle_event(clnt, event, sb);
215                 if (error || clnt == clnt->cl_parent)
216                         break;
217         }
218         return error;
219 }
220 
221 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
222 {
223         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
224         struct rpc_clnt *clnt;
225 
226         spin_lock(&sn->rpc_client_lock);
227         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
228                 if (rpc_clnt_skip_event(clnt, event))
229                         continue;
230                 spin_unlock(&sn->rpc_client_lock);
231                 return clnt;
232         }
233         spin_unlock(&sn->rpc_client_lock);
234         return NULL;
235 }
236 
237 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
238                             void *ptr)
239 {
240         struct super_block *sb = ptr;
241         struct rpc_clnt *clnt;
242         int error = 0;
243 
244         while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
245                 error = __rpc_pipefs_event(clnt, event, sb);
246                 if (error)
247                         break;
248         }
249         return error;
250 }
251 
252 static struct notifier_block rpc_clients_block = {
253         .notifier_call  = rpc_pipefs_event,
254         .priority       = SUNRPC_PIPEFS_RPC_PRIO,
255 };
256 
257 int rpc_clients_notifier_register(void)
258 {
259         return rpc_pipefs_notifier_register(&rpc_clients_block);
260 }
261 
262 void rpc_clients_notifier_unregister(void)
263 {
264         return rpc_pipefs_notifier_unregister(&rpc_clients_block);
265 }
266 
267 static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
268                 struct rpc_xprt *xprt,
269                 const struct rpc_timeout *timeout)
270 {
271         struct rpc_xprt *old;
272 
273         spin_lock(&clnt->cl_lock);
274         old = rcu_dereference_protected(clnt->cl_xprt,
275                         lockdep_is_held(&clnt->cl_lock));
276 
277         if (!xprt_bound(xprt))
278                 clnt->cl_autobind = 1;
279 
280         clnt->cl_timeout = timeout;
281         rcu_assign_pointer(clnt->cl_xprt, xprt);
282         spin_unlock(&clnt->cl_lock);
283 
284         return old;
285 }
286 
287 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
288 {
289         clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
290                         nodename, sizeof(clnt->cl_nodename));
291 }
292 
293 static int rpc_client_register(struct rpc_clnt *clnt,
294                                rpc_authflavor_t pseudoflavor,
295                                const char *client_name)
296 {
297         struct rpc_auth_create_args auth_args = {
298                 .pseudoflavor = pseudoflavor,
299                 .target_name = client_name,
300         };
301         struct rpc_auth *auth;
302         struct net *net = rpc_net_ns(clnt);
303         struct super_block *pipefs_sb;
304         int err;
305 
306         pipefs_sb = rpc_get_sb_net(net);
307         if (pipefs_sb) {
308                 err = rpc_setup_pipedir(pipefs_sb, clnt);
309                 if (err)
310                         goto out;
311         }
312 
313         rpc_register_client(clnt);
314         if (pipefs_sb)
315                 rpc_put_sb_net(net);
316 
317         auth = rpcauth_create(&auth_args, clnt);
318         if (IS_ERR(auth)) {
319                 dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
320                                 pseudoflavor);
321                 err = PTR_ERR(auth);
322                 goto err_auth;
323         }
324         return 0;
325 err_auth:
326         pipefs_sb = rpc_get_sb_net(net);
327         rpc_unregister_client(clnt);
328         __rpc_clnt_remove_pipedir(clnt);
329 out:
330         if (pipefs_sb)
331                 rpc_put_sb_net(net);
332         return err;
333 }
334 
335 static DEFINE_IDA(rpc_clids);
336 
337 static int rpc_alloc_clid(struct rpc_clnt *clnt)
338 {
339         int clid;
340 
341         clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
342         if (clid < 0)
343                 return clid;
344         clnt->cl_clid = clid;
345         return 0;
346 }
347 
348 static void rpc_free_clid(struct rpc_clnt *clnt)
349 {
350         ida_simple_remove(&rpc_clids, clnt->cl_clid);
351 }
352 
353 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
354                 struct rpc_xprt *xprt,
355                 struct rpc_clnt *parent)
356 {
357         const struct rpc_program *program = args->program;
358         const struct rpc_version *version;
359         struct rpc_clnt *clnt = NULL;
360         const struct rpc_timeout *timeout;
361         const char *nodename = args->nodename;
362         int err;
363 
364         /* sanity check the name before trying to print it */
365         dprintk("RPC:       creating %s client for %s (xprt %p)\n",
366                         program->name, args->servername, xprt);
367 
368         err = rpciod_up();
369         if (err)
370                 goto out_no_rpciod;
371 
372         err = -EINVAL;
373         if (args->version >= program->nrvers)
374                 goto out_err;
375         version = program->version[args->version];
376         if (version == NULL)
377                 goto out_err;
378 
379         err = -ENOMEM;
380         clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
381         if (!clnt)
382                 goto out_err;
383         clnt->cl_parent = parent ? : clnt;
384 
385         err = rpc_alloc_clid(clnt);
386         if (err)
387                 goto out_no_clid;
388 
389         clnt->cl_procinfo = version->procs;
390         clnt->cl_maxproc  = version->nrprocs;
391         clnt->cl_prog     = args->prognumber ? : program->number;
392         clnt->cl_vers     = version->number;
393         clnt->cl_stats    = program->stats;
394         clnt->cl_metrics  = rpc_alloc_iostats(clnt);
395         rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
396         err = -ENOMEM;
397         if (clnt->cl_metrics == NULL)
398                 goto out_no_stats;
399         clnt->cl_program  = program;
400         INIT_LIST_HEAD(&clnt->cl_tasks);
401         spin_lock_init(&clnt->cl_lock);
402 
403         timeout = xprt->timeout;
404         if (args->timeout != NULL) {
405                 memcpy(&clnt->cl_timeout_default, args->timeout,
406                                 sizeof(clnt->cl_timeout_default));
407                 timeout = &clnt->cl_timeout_default;
408         }
409 
410         rpc_clnt_set_transport(clnt, xprt, timeout);
411 
412         clnt->cl_rtt = &clnt->cl_rtt_default;
413         rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
414 
415         atomic_set(&clnt->cl_count, 1);
416 
417         if (nodename == NULL)
418                 nodename = utsname()->nodename;
419         /* save the nodename */
420         rpc_clnt_set_nodename(clnt, nodename);
421 
422         err = rpc_client_register(clnt, args->authflavor, args->client_name);
423         if (err)
424                 goto out_no_path;
425         if (parent)
426                 atomic_inc(&parent->cl_count);
427         return clnt;
428 
429 out_no_path:
430         rpc_free_iostats(clnt->cl_metrics);
431 out_no_stats:
432         rpc_free_clid(clnt);
433 out_no_clid:
434         kfree(clnt);
435 out_err:
436         rpciod_down();
437 out_no_rpciod:
438         xprt_put(xprt);
439         return ERR_PTR(err);
440 }
441 
442 static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
443                                         struct rpc_xprt *xprt)
444 {
445         struct rpc_clnt *clnt = NULL;
446 
447         clnt = rpc_new_client(args, xprt, NULL);
448         if (IS_ERR(clnt))
449                 return clnt;
450 
451         if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
452                 int err = rpc_ping(clnt);
453                 if (err != 0) {
454                         rpc_shutdown_client(clnt);
455                         return ERR_PTR(err);
456                 }
457         }
458 
459         clnt->cl_softrtry = 1;
460         if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
461                 clnt->cl_softrtry = 0;
462 
463         if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
464                 clnt->cl_autobind = 1;
465         if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
466                 clnt->cl_noretranstimeo = 1;
467         if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
468                 clnt->cl_discrtry = 1;
469         if (!(args->flags & RPC_CLNT_CREATE_QUIET))
470                 clnt->cl_chatty = 1;
471 
472         return clnt;
473 }
474 
475 /**
476  * rpc_create - create an RPC client and transport with one call
477  * @args: rpc_clnt create argument structure
478  *
479  * Creates and initializes an RPC transport and an RPC client.
480  *
481  * It can ping the server in order to determine if it is up, and to see if
482  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
483  * this behavior so asynchronous tasks can also use rpc_create.
484  */
485 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
486 {
487         struct rpc_xprt *xprt;
488         struct xprt_create xprtargs = {
489                 .net = args->net,
490                 .ident = args->protocol,
491                 .srcaddr = args->saddress,
492                 .dstaddr = args->address,
493                 .addrlen = args->addrsize,
494                 .servername = args->servername,
495                 .bc_xprt = args->bc_xprt,
496         };
497         char servername[48];
498 
499         if (args->bc_xprt) {
500                 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
501                 xprt = args->bc_xprt->xpt_bc_xprt;
502                 if (xprt) {
503                         xprt_get(xprt);
504                         return rpc_create_xprt(args, xprt);
505                 }
506         }
507 
508         if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
509                 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
510         if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
511                 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
512         /*
513          * If the caller chooses not to specify a hostname, whip
514          * up a string representation of the passed-in address.
515          */
516         if (xprtargs.servername == NULL) {
517                 struct sockaddr_un *sun =
518                                 (struct sockaddr_un *)args->address;
519                 struct sockaddr_in *sin =
520                                 (struct sockaddr_in *)args->address;
521                 struct sockaddr_in6 *sin6 =
522                                 (struct sockaddr_in6 *)args->address;
523 
524                 servername[0] = '\0';
525                 switch (args->address->sa_family) {
526                 case AF_LOCAL:
527                         snprintf(servername, sizeof(servername), "%s",
528                                  sun->sun_path);
529                         break;
530                 case AF_INET:
531                         snprintf(servername, sizeof(servername), "%pI4",
532                                  &sin->sin_addr.s_addr);
533                         break;
534                 case AF_INET6:
535                         snprintf(servername, sizeof(servername), "%pI6",
536                                  &sin6->sin6_addr);
537                         break;
538                 default:
539                         /* caller wants default server name, but
540                          * address family isn't recognized. */
541                         return ERR_PTR(-EINVAL);
542                 }
543                 xprtargs.servername = servername;
544         }
545 
546         xprt = xprt_create_transport(&xprtargs);
547         if (IS_ERR(xprt))
548                 return (struct rpc_clnt *)xprt;
549 
550         /*
551          * By default, kernel RPC client connects from a reserved port.
552          * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
553          * but it is always enabled for rpciod, which handles the connect
554          * operation.
555          */
556         xprt->resvport = 1;
557         if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
558                 xprt->resvport = 0;
559 
560         return rpc_create_xprt(args, xprt);
561 }
562 EXPORT_SYMBOL_GPL(rpc_create);
563 
564 /*
565  * This function clones the RPC client structure. It allows us to share the
566  * same transport while varying parameters such as the authentication
567  * flavour.
568  */
569 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
570                                            struct rpc_clnt *clnt)
571 {
572         struct rpc_xprt *xprt;
573         struct rpc_clnt *new;
574         int err;
575 
576         err = -ENOMEM;
577         rcu_read_lock();
578         xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
579         rcu_read_unlock();
580         if (xprt == NULL)
581                 goto out_err;
582         args->servername = xprt->servername;
583         args->nodename = clnt->cl_nodename;
584 
585         new = rpc_new_client(args, xprt, clnt);
586         if (IS_ERR(new)) {
587                 err = PTR_ERR(new);
588                 goto out_err;
589         }
590 
591         /* Turn off autobind on clones */
592         new->cl_autobind = 0;
593         new->cl_softrtry = clnt->cl_softrtry;
594         new->cl_noretranstimeo = clnt->cl_noretranstimeo;
595         new->cl_discrtry = clnt->cl_discrtry;
596         new->cl_chatty = clnt->cl_chatty;
597         return new;
598 
599 out_err:
600         dprintk("RPC:       %s: returned error %d\n", __func__, err);
601         return ERR_PTR(err);
602 }
603 
604 /**
605  * rpc_clone_client - Clone an RPC client structure
606  *
607  * @clnt: RPC client whose parameters are copied
608  *
609  * Returns a fresh RPC client or an ERR_PTR.
610  */
611 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
612 {
613         struct rpc_create_args args = {
614                 .program        = clnt->cl_program,
615                 .prognumber     = clnt->cl_prog,
616                 .version        = clnt->cl_vers,
617                 .authflavor     = clnt->cl_auth->au_flavor,
618         };
619         return __rpc_clone_client(&args, clnt);
620 }
621 EXPORT_SYMBOL_GPL(rpc_clone_client);
622 
623 /**
624  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
625  *
626  * @clnt: RPC client whose parameters are copied
627  * @flavor: security flavor for new client
628  *
629  * Returns a fresh RPC client or an ERR_PTR.
630  */
631 struct rpc_clnt *
632 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
633 {
634         struct rpc_create_args args = {
635                 .program        = clnt->cl_program,
636                 .prognumber     = clnt->cl_prog,
637                 .version        = clnt->cl_vers,
638                 .authflavor     = flavor,
639         };
640         return __rpc_clone_client(&args, clnt);
641 }
642 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
643 
644 /**
645  * rpc_switch_client_transport: switch the RPC transport on the fly
646  * @clnt: pointer to a struct rpc_clnt
647  * @args: pointer to the new transport arguments
648  * @timeout: pointer to the new timeout parameters
649  *
650  * This function allows the caller to switch the RPC transport for the
651  * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
652  * server, for instance.  It assumes that the caller has ensured that
653  * there are no active RPC tasks by using some form of locking.
654  *
655  * Returns zero if "clnt" is now using the new xprt.  Otherwise a
656  * negative errno is returned, and "clnt" continues to use the old
657  * xprt.
658  */
659 int rpc_switch_client_transport(struct rpc_clnt *clnt,
660                 struct xprt_create *args,
661                 const struct rpc_timeout *timeout)
662 {
663         const struct rpc_timeout *old_timeo;
664         rpc_authflavor_t pseudoflavor;
665         struct rpc_xprt *xprt, *old;
666         struct rpc_clnt *parent;
667         int err;
668 
669         xprt = xprt_create_transport(args);
670         if (IS_ERR(xprt)) {
671                 dprintk("RPC:       failed to create new xprt for clnt %p\n",
672                         clnt);
673                 return PTR_ERR(xprt);
674         }
675 
676         pseudoflavor = clnt->cl_auth->au_flavor;
677 
678         old_timeo = clnt->cl_timeout;
679         old = rpc_clnt_set_transport(clnt, xprt, timeout);
680 
681         rpc_unregister_client(clnt);
682         __rpc_clnt_remove_pipedir(clnt);
683 
684         /*
685          * A new transport was created.  "clnt" therefore
686          * becomes the root of a new cl_parent tree.  clnt's
687          * children, if it has any, still point to the old xprt.
688          */
689         parent = clnt->cl_parent;
690         clnt->cl_parent = clnt;
691 
692         /*
693          * The old rpc_auth cache cannot be re-used.  GSS
694          * contexts in particular are between a single
695          * client and server.
696          */
697         err = rpc_client_register(clnt, pseudoflavor, NULL);
698         if (err)
699                 goto out_revert;
700 
701         synchronize_rcu();
702         if (parent != clnt)
703                 rpc_release_client(parent);
704         xprt_put(old);
705         dprintk("RPC:       replaced xprt for clnt %p\n", clnt);
706         return 0;
707 
708 out_revert:
709         rpc_clnt_set_transport(clnt, old, old_timeo);
710         clnt->cl_parent = parent;
711         rpc_client_register(clnt, pseudoflavor, NULL);
712         xprt_put(xprt);
713         dprintk("RPC:       failed to switch xprt for clnt %p\n", clnt);
714         return err;
715 }
716 EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
717 
718 /*
719  * Kill all tasks for the given client.
720  * XXX: kill their descendants as well?
721  */
722 void rpc_killall_tasks(struct rpc_clnt *clnt)
723 {
724         struct rpc_task *rovr;
725 
726 
727         if (list_empty(&clnt->cl_tasks))
728                 return;
729         dprintk("RPC:       killing all tasks for client %p\n", clnt);
730         /*
731          * Spin lock all_tasks to prevent changes...
732          */
733         spin_lock(&clnt->cl_lock);
734         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
735                 if (!RPC_IS_ACTIVATED(rovr))
736                         continue;
737                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
738                         rovr->tk_flags |= RPC_TASK_KILLED;
739                         rpc_exit(rovr, -EIO);
740                         if (RPC_IS_QUEUED(rovr))
741                                 rpc_wake_up_queued_task(rovr->tk_waitqueue,
742                                                         rovr);
743                 }
744         }
745         spin_unlock(&clnt->cl_lock);
746 }
747 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
748 
749 /*
750  * Properly shut down an RPC client, terminating all outstanding
751  * requests.
752  */
753 void rpc_shutdown_client(struct rpc_clnt *clnt)
754 {
755         might_sleep();
756 
757         dprintk_rcu("RPC:       shutting down %s client for %s\n",
758                         clnt->cl_program->name,
759                         rcu_dereference(clnt->cl_xprt)->servername);
760 
761         while (!list_empty(&clnt->cl_tasks)) {
762                 rpc_killall_tasks(clnt);
763                 wait_event_timeout(destroy_wait,
764                         list_empty(&clnt->cl_tasks), 1*HZ);
765         }
766 
767         rpc_release_client(clnt);
768 }
769 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
770 
771 /*
772  * Free an RPC client
773  */
774 static struct rpc_clnt *
775 rpc_free_client(struct rpc_clnt *clnt)
776 {
777         struct rpc_clnt *parent = NULL;
778 
779         dprintk_rcu("RPC:       destroying %s client for %s\n",
780                         clnt->cl_program->name,
781                         rcu_dereference(clnt->cl_xprt)->servername);
782         if (clnt->cl_parent != clnt)
783                 parent = clnt->cl_parent;
784         rpc_clnt_remove_pipedir(clnt);
785         rpc_unregister_client(clnt);
786         rpc_free_iostats(clnt->cl_metrics);
787         clnt->cl_metrics = NULL;
788         xprt_put(rcu_dereference_raw(clnt->cl_xprt));
789         rpciod_down();
790         rpc_free_clid(clnt);
791         kfree(clnt);
792         return parent;
793 }
794 
795 /*
796  * Free an RPC client
797  */
798 static struct rpc_clnt * 
799 rpc_free_auth(struct rpc_clnt *clnt)
800 {
801         if (clnt->cl_auth == NULL)
802                 return rpc_free_client(clnt);
803 
804         /*
805          * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
806          *       release remaining GSS contexts. This mechanism ensures
807          *       that it can do so safely.
808          */
809         atomic_inc(&clnt->cl_count);
810         rpcauth_release(clnt->cl_auth);
811         clnt->cl_auth = NULL;
812         if (atomic_dec_and_test(&clnt->cl_count))
813                 return rpc_free_client(clnt);
814         return NULL;
815 }
816 
817 /*
818  * Release reference to the RPC client
819  */
820 void
821 rpc_release_client(struct rpc_clnt *clnt)
822 {
823         dprintk("RPC:       rpc_release_client(%p)\n", clnt);
824 
825         do {
826                 if (list_empty(&clnt->cl_tasks))
827                         wake_up(&destroy_wait);
828                 if (!atomic_dec_and_test(&clnt->cl_count))
829                         break;
830                 clnt = rpc_free_auth(clnt);
831         } while (clnt != NULL);
832 }
833 EXPORT_SYMBOL_GPL(rpc_release_client);
834 
835 /**
836  * rpc_bind_new_program - bind a new RPC program to an existing client
837  * @old: old rpc_client
838  * @program: rpc program to set
839  * @vers: rpc program version
840  *
841  * Clones the rpc client and sets up a new RPC program. This is mainly
842  * of use for enabling different RPC programs to share the same transport.
843  * The Sun NFSv2/v3 ACL protocol can do this.
844  */
845 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
846                                       const struct rpc_program *program,
847                                       u32 vers)
848 {
849         struct rpc_create_args args = {
850                 .program        = program,
851                 .prognumber     = program->number,
852                 .version        = vers,
853                 .authflavor     = old->cl_auth->au_flavor,
854         };
855         struct rpc_clnt *clnt;
856         int err;
857 
858         clnt = __rpc_clone_client(&args, old);
859         if (IS_ERR(clnt))
860                 goto out;
861         err = rpc_ping(clnt);
862         if (err != 0) {
863                 rpc_shutdown_client(clnt);
864                 clnt = ERR_PTR(err);
865         }
866 out:
867         return clnt;
868 }
869 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
870 
871 void rpc_task_release_client(struct rpc_task *task)
872 {
873         struct rpc_clnt *clnt = task->tk_client;
874 
875         if (clnt != NULL) {
876                 /* Remove from client task list */
877                 spin_lock(&clnt->cl_lock);
878                 list_del(&task->tk_task);
879                 spin_unlock(&clnt->cl_lock);
880                 task->tk_client = NULL;
881 
882                 rpc_release_client(clnt);
883         }
884 }
885 
886 static
887 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
888 {
889         if (clnt != NULL) {
890                 rpc_task_release_client(task);
891                 task->tk_client = clnt;
892                 atomic_inc(&clnt->cl_count);
893                 if (clnt->cl_softrtry)
894                         task->tk_flags |= RPC_TASK_SOFT;
895                 if (clnt->cl_noretranstimeo)
896                         task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
897                 if (sk_memalloc_socks()) {
898                         struct rpc_xprt *xprt;
899 
900                         rcu_read_lock();
901                         xprt = rcu_dereference(clnt->cl_xprt);
902                         if (xprt->swapper)
903                                 task->tk_flags |= RPC_TASK_SWAPPER;
904                         rcu_read_unlock();
905                 }
906                 /* Add to the client's list of all tasks */
907                 spin_lock(&clnt->cl_lock);
908                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
909                 spin_unlock(&clnt->cl_lock);
910         }
911 }
912 
913 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
914 {
915         rpc_task_release_client(task);
916         rpc_task_set_client(task, clnt);
917 }
918 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
919 
920 
921 static void
922 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
923 {
924         if (msg != NULL) {
925                 task->tk_msg.rpc_proc = msg->rpc_proc;
926                 task->tk_msg.rpc_argp = msg->rpc_argp;
927                 task->tk_msg.rpc_resp = msg->rpc_resp;
928                 if (msg->rpc_cred != NULL)
929                         task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
930         }
931 }
932 
933 /*
934  * Default callback for async RPC calls
935  */
936 static void
937 rpc_default_callback(struct rpc_task *task, void *data)
938 {
939 }
940 
941 static const struct rpc_call_ops rpc_default_ops = {
942         .rpc_call_done = rpc_default_callback,
943 };
944 
945 /**
946  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
947  * @task_setup_data: pointer to task initialisation data
948  */
949 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
950 {
951         struct rpc_task *task;
952 
953         task = rpc_new_task(task_setup_data);
954         if (IS_ERR(task))
955                 goto out;
956 
957         rpc_task_set_client(task, task_setup_data->rpc_client);
958         rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
959 
960         if (task->tk_action == NULL)
961                 rpc_call_start(task);
962 
963         atomic_inc(&task->tk_count);
964         rpc_execute(task);
965 out:
966         return task;
967 }
968 EXPORT_SYMBOL_GPL(rpc_run_task);
969 
970 /**
971  * rpc_call_sync - Perform a synchronous RPC call
972  * @clnt: pointer to RPC client
973  * @msg: RPC call parameters
974  * @flags: RPC call flags
975  */
976 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
977 {
978         struct rpc_task *task;
979         struct rpc_task_setup task_setup_data = {
980                 .rpc_client = clnt,
981                 .rpc_message = msg,
982                 .callback_ops = &rpc_default_ops,
983                 .flags = flags,
984         };
985         int status;
986 
987         WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
988         if (flags & RPC_TASK_ASYNC) {
989                 rpc_release_calldata(task_setup_data.callback_ops,
990                         task_setup_data.callback_data);
991                 return -EINVAL;
992         }
993 
994         task = rpc_run_task(&task_setup_data);
995         if (IS_ERR(task))
996                 return PTR_ERR(task);
997         status = task->tk_status;
998         rpc_put_task(task);
999         return status;
1000 }
1001 EXPORT_SYMBOL_GPL(rpc_call_sync);
1002 
1003 /**
1004  * rpc_call_async - Perform an asynchronous RPC call
1005  * @clnt: pointer to RPC client
1006  * @msg: RPC call parameters
1007  * @flags: RPC call flags
1008  * @tk_ops: RPC call ops
1009  * @data: user call data
1010  */
1011 int
1012 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
1013                const struct rpc_call_ops *tk_ops, void *data)
1014 {
1015         struct rpc_task *task;
1016         struct rpc_task_setup task_setup_data = {
1017                 .rpc_client = clnt,
1018                 .rpc_message = msg,
1019                 .callback_ops = tk_ops,
1020                 .callback_data = data,
1021                 .flags = flags|RPC_TASK_ASYNC,
1022         };
1023 
1024         task = rpc_run_task(&task_setup_data);
1025         if (IS_ERR(task))
1026                 return PTR_ERR(task);
1027         rpc_put_task(task);
1028         return 0;
1029 }
1030 EXPORT_SYMBOL_GPL(rpc_call_async);
1031 
1032 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1033 /**
1034  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1035  * rpc_execute against it
1036  * @req: RPC request
1037  * @tk_ops: RPC call ops
1038  */
1039 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
1040                                 const struct rpc_call_ops *tk_ops)
1041 {
1042         struct rpc_task *task;
1043         struct xdr_buf *xbufp = &req->rq_snd_buf;
1044         struct rpc_task_setup task_setup_data = {
1045                 .callback_ops = tk_ops,
1046         };
1047 
1048         dprintk("RPC: rpc_run_bc_task req= %p\n", req);
1049         /*
1050          * Create an rpc_task to send the data
1051          */
1052         task = rpc_new_task(&task_setup_data);
1053         if (IS_ERR(task)) {
1054                 xprt_free_bc_request(req);
1055                 goto out;
1056         }
1057         task->tk_rqstp = req;
1058 
1059         /*
1060          * Set up the xdr_buf length.
1061          * This also indicates that the buffer is XDR encoded already.
1062          */
1063         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1064                         xbufp->tail[0].iov_len;
1065 
1066         task->tk_action = call_bc_transmit;
1067         atomic_inc(&task->tk_count);
1068         WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
1069         rpc_execute(task);
1070 
1071 out:
1072         dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
1073         return task;
1074 }
1075 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1076 
1077 void
1078 rpc_call_start(struct rpc_task *task)
1079 {
1080         task->tk_action = call_start;
1081 }
1082 EXPORT_SYMBOL_GPL(rpc_call_start);
1083 
1084 /**
1085  * rpc_peeraddr - extract remote peer address from clnt's xprt
1086  * @clnt: RPC client structure
1087  * @buf: target buffer
1088  * @bufsize: length of target buffer
1089  *
1090  * Returns the number of bytes that are actually in the stored address.
1091  */
1092 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
1093 {
1094         size_t bytes;
1095         struct rpc_xprt *xprt;
1096 
1097         rcu_read_lock();
1098         xprt = rcu_dereference(clnt->cl_xprt);
1099 
1100         bytes = xprt->addrlen;
1101         if (bytes > bufsize)
1102                 bytes = bufsize;
1103         memcpy(buf, &xprt->addr, bytes);
1104         rcu_read_unlock();
1105 
1106         return bytes;
1107 }
1108 EXPORT_SYMBOL_GPL(rpc_peeraddr);
1109 
1110 /**
1111  * rpc_peeraddr2str - return remote peer address in printable format
1112  * @clnt: RPC client structure
1113  * @format: address format
1114  *
1115  * NB: the lifetime of the memory referenced by the returned pointer is
1116  * the same as the rpc_xprt itself.  As long as the caller uses this
1117  * pointer, it must hold the RCU read lock.
1118  */
1119 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1120                              enum rpc_display_format_t format)
1121 {
1122         struct rpc_xprt *xprt;
1123 
1124         xprt = rcu_dereference(clnt->cl_xprt);
1125 
1126         if (xprt->address_strings[format] != NULL)
1127                 return xprt->address_strings[format];
1128         else
1129                 return "unprintable";
1130 }
1131 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1132 
1133 static const struct sockaddr_in rpc_inaddr_loopback = {
1134         .sin_family             = AF_INET,
1135         .sin_addr.s_addr        = htonl(INADDR_ANY),
1136 };
1137 
1138 static const struct sockaddr_in6 rpc_in6addr_loopback = {
1139         .sin6_family            = AF_INET6,
1140         .sin6_addr              = IN6ADDR_ANY_INIT,
1141 };
1142 
1143 /*
1144  * Try a getsockname() on a connected datagram socket.  Using a
1145  * connected datagram socket prevents leaving a socket in TIME_WAIT.
1146  * This conserves the ephemeral port number space.
1147  *
1148  * Returns zero and fills in "buf" if successful; otherwise, a
1149  * negative errno is returned.
1150  */
1151 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1152                         struct sockaddr *buf, int buflen)
1153 {
1154         struct socket *sock;
1155         int err;
1156 
1157         err = __sock_create(net, sap->sa_family,
1158                                 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1159         if (err < 0) {
1160                 dprintk("RPC:       can't create UDP socket (%d)\n", err);
1161                 goto out;
1162         }
1163 
1164         switch (sap->sa_family) {
1165         case AF_INET:
1166                 err = kernel_bind(sock,
1167                                 (struct sockaddr *)&rpc_inaddr_loopback,
1168                                 sizeof(rpc_inaddr_loopback));
1169                 break;
1170         case AF_INET6:
1171                 err = kernel_bind(sock,
1172                                 (struct sockaddr *)&rpc_in6addr_loopback,
1173                                 sizeof(rpc_in6addr_loopback));
1174                 break;
1175         default:
1176                 err = -EAFNOSUPPORT;
1177                 goto out;
1178         }
1179         if (err < 0) {
1180                 dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1181                 goto out_release;
1182         }
1183 
1184         err = kernel_connect(sock, sap, salen, 0);
1185         if (err < 0) {
1186                 dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1187                 goto out_release;
1188         }
1189 
1190         err = kernel_getsockname(sock, buf, &buflen);
1191         if (err < 0) {
1192                 dprintk("RPC:       getsockname failed (%d)\n", err);
1193                 goto out_release;
1194         }
1195 
1196         err = 0;
1197         if (buf->sa_family == AF_INET6) {
1198                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1199                 sin6->sin6_scope_id = 0;
1200         }
1201         dprintk("RPC:       %s succeeded\n", __func__);
1202 
1203 out_release:
1204         sock_release(sock);
1205 out:
1206         return err;
1207 }
1208 
1209 /*
1210  * Scraping a connected socket failed, so we don't have a useable
1211  * local address.  Fallback: generate an address that will prevent
1212  * the server from calling us back.
1213  *
1214  * Returns zero and fills in "buf" if successful; otherwise, a
1215  * negative errno is returned.
1216  */
1217 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1218 {
1219         switch (family) {
1220         case AF_INET:
1221                 if (buflen < sizeof(rpc_inaddr_loopback))
1222                         return -EINVAL;
1223                 memcpy(buf, &rpc_inaddr_loopback,
1224                                 sizeof(rpc_inaddr_loopback));
1225                 break;
1226         case AF_INET6:
1227                 if (buflen < sizeof(rpc_in6addr_loopback))
1228                         return -EINVAL;
1229                 memcpy(buf, &rpc_in6addr_loopback,
1230                                 sizeof(rpc_in6addr_loopback));
1231         default:
1232                 dprintk("RPC:       %s: address family not supported\n",
1233                         __func__);
1234                 return -EAFNOSUPPORT;
1235         }
1236         dprintk("RPC:       %s: succeeded\n", __func__);
1237         return 0;
1238 }
1239 
1240 /**
1241  * rpc_localaddr - discover local endpoint address for an RPC client
1242  * @clnt: RPC client structure
1243  * @buf: target buffer
1244  * @buflen: size of target buffer, in bytes
1245  *
1246  * Returns zero and fills in "buf" and "buflen" if successful;
1247  * otherwise, a negative errno is returned.
1248  *
1249  * This works even if the underlying transport is not currently connected,
1250  * or if the upper layer never previously provided a source address.
1251  *
1252  * The result of this function call is transient: multiple calls in
1253  * succession may give different results, depending on how local
1254  * networking configuration changes over time.
1255  */
1256 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1257 {
1258         struct sockaddr_storage address;
1259         struct sockaddr *sap = (struct sockaddr *)&address;
1260         struct rpc_xprt *xprt;
1261         struct net *net;
1262         size_t salen;
1263         int err;
1264 
1265         rcu_read_lock();
1266         xprt = rcu_dereference(clnt->cl_xprt);
1267         salen = xprt->addrlen;
1268         memcpy(sap, &xprt->addr, salen);
1269         net = get_net(xprt->xprt_net);
1270         rcu_read_unlock();
1271 
1272         rpc_set_port(sap, 0);
1273         err = rpc_sockname(net, sap, salen, buf, buflen);
1274         put_net(net);
1275         if (err != 0)
1276                 /* Couldn't discover local address, return ANYADDR */
1277                 return rpc_anyaddr(sap->sa_family, buf, buflen);
1278         return 0;
1279 }
1280 EXPORT_SYMBOL_GPL(rpc_localaddr);
1281 
1282 void
1283 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1284 {
1285         struct rpc_xprt *xprt;
1286 
1287         rcu_read_lock();
1288         xprt = rcu_dereference(clnt->cl_xprt);
1289         if (xprt->ops->set_buffer_size)
1290                 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1291         rcu_read_unlock();
1292 }
1293 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1294 
1295 /**
1296  * rpc_protocol - Get transport protocol number for an RPC client
1297  * @clnt: RPC client to query
1298  *
1299  */
1300 int rpc_protocol(struct rpc_clnt *clnt)
1301 {
1302         int protocol;
1303 
1304         rcu_read_lock();
1305         protocol = rcu_dereference(clnt->cl_xprt)->prot;
1306         rcu_read_unlock();
1307         return protocol;
1308 }
1309 EXPORT_SYMBOL_GPL(rpc_protocol);
1310 
1311 /**
1312  * rpc_net_ns - Get the network namespace for this RPC client
1313  * @clnt: RPC client to query
1314  *
1315  */
1316 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1317 {
1318         struct net *ret;
1319 
1320         rcu_read_lock();
1321         ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1322         rcu_read_unlock();
1323         return ret;
1324 }
1325 EXPORT_SYMBOL_GPL(rpc_net_ns);
1326 
1327 /**
1328  * rpc_max_payload - Get maximum payload size for a transport, in bytes
1329  * @clnt: RPC client to query
1330  *
1331  * For stream transports, this is one RPC record fragment (see RFC
1332  * 1831), as we don't support multi-record requests yet.  For datagram
1333  * transports, this is the size of an IP packet minus the IP, UDP, and
1334  * RPC header sizes.
1335  */
1336 size_t rpc_max_payload(struct rpc_clnt *clnt)
1337 {
1338         size_t ret;
1339 
1340         rcu_read_lock();
1341         ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1342         rcu_read_unlock();
1343         return ret;
1344 }
1345 EXPORT_SYMBOL_GPL(rpc_max_payload);
1346 
1347 /**
1348  * rpc_get_timeout - Get timeout for transport in units of HZ
1349  * @clnt: RPC client to query
1350  */
1351 unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1352 {
1353         unsigned long ret;
1354 
1355         rcu_read_lock();
1356         ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1357         rcu_read_unlock();
1358         return ret;
1359 }
1360 EXPORT_SYMBOL_GPL(rpc_get_timeout);
1361 
1362 /**
1363  * rpc_force_rebind - force transport to check that remote port is unchanged
1364  * @clnt: client to rebind
1365  *
1366  */
1367 void rpc_force_rebind(struct rpc_clnt *clnt)
1368 {
1369         if (clnt->cl_autobind) {
1370                 rcu_read_lock();
1371                 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1372                 rcu_read_unlock();
1373         }
1374 }
1375 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1376 
1377 /*
1378  * Restart an (async) RPC call from the call_prepare state.
1379  * Usually called from within the exit handler.
1380  */
1381 int
1382 rpc_restart_call_prepare(struct rpc_task *task)
1383 {
1384         if (RPC_ASSASSINATED(task))
1385                 return 0;
1386         task->tk_action = call_start;
1387         task->tk_status = 0;
1388         if (task->tk_ops->rpc_call_prepare != NULL)
1389                 task->tk_action = rpc_prepare_task;
1390         return 1;
1391 }
1392 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1393 
1394 /*
1395  * Restart an (async) RPC call. Usually called from within the
1396  * exit handler.
1397  */
1398 int
1399 rpc_restart_call(struct rpc_task *task)
1400 {
1401         if (RPC_ASSASSINATED(task))
1402                 return 0;
1403         task->tk_action = call_start;
1404         task->tk_status = 0;
1405         return 1;
1406 }
1407 EXPORT_SYMBOL_GPL(rpc_restart_call);
1408 
1409 #ifdef RPC_DEBUG
1410 static const char *rpc_proc_name(const struct rpc_task *task)
1411 {
1412         const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1413 
1414         if (proc) {
1415                 if (proc->p_name)
1416                         return proc->p_name;
1417                 else
1418                         return "NULL";
1419         } else
1420                 return "no proc";
1421 }
1422 #endif
1423 
1424 /*
1425  * 0.  Initial state
1426  *
1427  *     Other FSM states can be visited zero or more times, but
1428  *     this state is visited exactly once for each RPC.
1429  */
1430 static void
1431 call_start(struct rpc_task *task)
1432 {
1433         struct rpc_clnt *clnt = task->tk_client;
1434 
1435         dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1436                         clnt->cl_program->name, clnt->cl_vers,
1437                         rpc_proc_name(task),
1438                         (RPC_IS_ASYNC(task) ? "async" : "sync"));
1439 
1440         /* Increment call count */
1441         task->tk_msg.rpc_proc->p_count++;
1442         clnt->cl_stats->rpccnt++;
1443         task->tk_action = call_reserve;
1444 }
1445 
1446 /*
1447  * 1.   Reserve an RPC call slot
1448  */
1449 static void
1450 call_reserve(struct rpc_task *task)
1451 {
1452         dprint_status(task);
1453 
1454         task->tk_status  = 0;
1455         task->tk_action  = call_reserveresult;
1456         xprt_reserve(task);
1457 }
1458 
1459 static void call_retry_reserve(struct rpc_task *task);
1460 
1461 /*
1462  * 1b.  Grok the result of xprt_reserve()
1463  */
1464 static void
1465 call_reserveresult(struct rpc_task *task)
1466 {
1467         int status = task->tk_status;
1468 
1469         dprint_status(task);
1470 
1471         /*
1472          * After a call to xprt_reserve(), we must have either
1473          * a request slot or else an error status.
1474          */
1475         task->tk_status = 0;
1476         if (status >= 0) {
1477                 if (task->tk_rqstp) {
1478                         task->tk_action = call_refresh;
1479                         return;
1480                 }
1481 
1482                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1483                                 __func__, status);
1484                 rpc_exit(task, -EIO);
1485                 return;
1486         }
1487 
1488         /*
1489          * Even though there was an error, we may have acquired
1490          * a request slot somehow.  Make sure not to leak it.
1491          */
1492         if (task->tk_rqstp) {
1493                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1494                                 __func__, status);
1495                 xprt_release(task);
1496         }
1497 
1498         switch (status) {
1499         case -ENOMEM:
1500                 rpc_delay(task, HZ >> 2);
1501         case -EAGAIN:   /* woken up; retry */
1502                 task->tk_action = call_retry_reserve;
1503                 return;
1504         case -EIO:      /* probably a shutdown */
1505                 break;
1506         default:
1507                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1508                                 __func__, status);
1509                 break;
1510         }
1511         rpc_exit(task, status);
1512 }
1513 
1514 /*
1515  * 1c.  Retry reserving an RPC call slot
1516  */
1517 static void
1518 call_retry_reserve(struct rpc_task *task)
1519 {
1520         dprint_status(task);
1521 
1522         task->tk_status  = 0;
1523         task->tk_action  = call_reserveresult;
1524         xprt_retry_reserve(task);
1525 }
1526 
1527 /*
1528  * 2.   Bind and/or refresh the credentials
1529  */
1530 static void
1531 call_refresh(struct rpc_task *task)
1532 {
1533         dprint_status(task);
1534 
1535         task->tk_action = call_refreshresult;
1536         task->tk_status = 0;
1537         task->tk_client->cl_stats->rpcauthrefresh++;
1538         rpcauth_refreshcred(task);
1539 }
1540 
1541 /*
1542  * 2a.  Process the results of a credential refresh
1543  */
1544 static void
1545 call_refreshresult(struct rpc_task *task)
1546 {
1547         int status = task->tk_status;
1548 
1549         dprint_status(task);
1550 
1551         task->tk_status = 0;
1552         task->tk_action = call_refresh;
1553         switch (status) {
1554         case 0:
1555                 if (rpcauth_uptodatecred(task)) {
1556                         task->tk_action = call_allocate;
1557                         return;
1558                 }
1559                 /* Use rate-limiting and a max number of retries if refresh
1560                  * had status 0 but failed to update the cred.
1561                  */
1562         case -ETIMEDOUT:
1563                 rpc_delay(task, 3*HZ);
1564         case -EAGAIN:
1565                 status = -EACCES;
1566         case -EKEYEXPIRED:
1567                 if (!task->tk_cred_retry)
1568                         break;
1569                 task->tk_cred_retry--;
1570                 dprintk("RPC: %5u %s: retry refresh creds\n",
1571                                 task->tk_pid, __func__);
1572                 return;
1573         }
1574         dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1575                                 task->tk_pid, __func__, status);
1576         rpc_exit(task, status);
1577 }
1578 
1579 /*
1580  * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1581  *      (Note: buffer memory is freed in xprt_release).
1582  */
1583 static void
1584 call_allocate(struct rpc_task *task)
1585 {
1586         unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1587         struct rpc_rqst *req = task->tk_rqstp;
1588         struct rpc_xprt *xprt = req->rq_xprt;
1589         struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1590 
1591         dprint_status(task);
1592 
1593         task->tk_status = 0;
1594         task->tk_action = call_bind;
1595 
1596         if (req->rq_buffer)
1597                 return;
1598 
1599         if (proc->p_proc != 0) {
1600                 BUG_ON(proc->p_arglen == 0);
1601                 if (proc->p_decode != NULL)
1602                         BUG_ON(proc->p_replen == 0);
1603         }
1604 
1605         /*
1606          * Calculate the size (in quads) of the RPC call
1607          * and reply headers, and convert both values
1608          * to byte sizes.
1609          */
1610         req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1611         req->rq_callsize <<= 2;
1612         req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1613         req->rq_rcvsize <<= 2;
1614 
1615         req->rq_buffer = xprt->ops->buf_alloc(task,
1616                                         req->rq_callsize + req->rq_rcvsize);
1617         if (req->rq_buffer != NULL)
1618                 return;
1619 
1620         dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1621 
1622         if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1623                 task->tk_action = call_allocate;
1624                 rpc_delay(task, HZ>>4);
1625                 return;
1626         }
1627 
1628         rpc_exit(task, -ERESTARTSYS);
1629 }
1630 
1631 static inline int
1632 rpc_task_need_encode(struct rpc_task *task)
1633 {
1634         return task->tk_rqstp->rq_snd_buf.len == 0;
1635 }
1636 
1637 static inline void
1638 rpc_task_force_reencode(struct rpc_task *task)
1639 {
1640         task->tk_rqstp->rq_snd_buf.len = 0;
1641         task->tk_rqstp->rq_bytes_sent = 0;
1642 }
1643 
1644 static inline void
1645 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1646 {
1647         buf->head[0].iov_base = start;
1648         buf->head[0].iov_len = len;
1649         buf->tail[0].iov_len = 0;
1650         buf->page_len = 0;
1651         buf->flags = 0;
1652         buf->len = 0;
1653         buf->buflen = len;
1654 }
1655 
1656 /*
1657  * 3.   Encode arguments of an RPC call
1658  */
1659 static void
1660 rpc_xdr_encode(struct rpc_task *task)
1661 {
1662         struct rpc_rqst *req = task->tk_rqstp;
1663         kxdreproc_t     encode;
1664         __be32          *p;
1665 
1666         dprint_status(task);
1667 
1668         rpc_xdr_buf_init(&req->rq_snd_buf,
1669                          req->rq_buffer,
1670                          req->rq_callsize);
1671         rpc_xdr_buf_init(&req->rq_rcv_buf,
1672                          (char *)req->rq_buffer + req->rq_callsize,
1673                          req->rq_rcvsize);
1674 
1675         p = rpc_encode_header(task);
1676         if (p == NULL) {
1677                 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1678                 rpc_exit(task, -EIO);
1679                 return;
1680         }
1681 
1682         encode = task->tk_msg.rpc_proc->p_encode;
1683         if (encode == NULL)
1684                 return;
1685 
1686         task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1687                         task->tk_msg.rpc_argp);
1688 }
1689 
1690 /*
1691  * 4.   Get the server port number if not yet set
1692  */
1693 static void
1694 call_bind(struct rpc_task *task)
1695 {
1696         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1697 
1698         dprint_status(task);
1699 
1700         task->tk_action = call_connect;
1701         if (!xprt_bound(xprt)) {
1702                 task->tk_action = call_bind_status;
1703                 task->tk_timeout = xprt->bind_timeout;
1704                 xprt->ops->rpcbind(task);
1705         }
1706 }
1707 
1708 /*
1709  * 4a.  Sort out bind result
1710  */
1711 static void
1712 call_bind_status(struct rpc_task *task)
1713 {
1714         int status = -EIO;
1715 
1716         if (task->tk_status >= 0) {
1717                 dprint_status(task);
1718                 task->tk_status = 0;
1719                 task->tk_action = call_connect;
1720                 return;
1721         }
1722 
1723         trace_rpc_bind_status(task);
1724         switch (task->tk_status) {
1725         case -ENOMEM:
1726                 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1727                 rpc_delay(task, HZ >> 2);
1728                 goto retry_timeout;
1729         case -EACCES:
1730                 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1731                                 "unavailable\n", task->tk_pid);
1732                 /* fail immediately if this is an RPC ping */
1733                 if (task->tk_msg.rpc_proc->p_proc == 0) {
1734                         status = -EOPNOTSUPP;
1735                         break;
1736                 }
1737                 if (task->tk_rebind_retry == 0)
1738                         break;
1739                 task->tk_rebind_retry--;
1740                 rpc_delay(task, 3*HZ);
1741                 goto retry_timeout;
1742         case -ETIMEDOUT:
1743                 dprintk("RPC: %5u rpcbind request timed out\n",
1744                                 task->tk_pid);
1745                 goto retry_timeout;
1746         case -EPFNOSUPPORT:
1747                 /* server doesn't support any rpcbind version we know of */
1748                 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1749                                 task->tk_pid);
1750                 break;
1751         case -EPROTONOSUPPORT:
1752                 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1753                                 task->tk_pid);
1754                 goto retry_timeout;
1755         case -ECONNREFUSED:             /* connection problems */
1756         case -ECONNRESET:
1757         case -ECONNABORTED:
1758         case -ENOTCONN:
1759         case -EHOSTDOWN:
1760         case -EHOSTUNREACH:
1761         case -ENETUNREACH:
1762         case -ENOBUFS:
1763         case -EPIPE:
1764                 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1765                                 task->tk_pid, task->tk_status);
1766                 if (!RPC_IS_SOFTCONN(task)) {
1767                         rpc_delay(task, 5*HZ);
1768                         goto retry_timeout;
1769                 }
1770                 status = task->tk_status;
1771                 break;
1772         default:
1773                 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1774                                 task->tk_pid, -task->tk_status);
1775         }
1776 
1777         rpc_exit(task, status);
1778         return;
1779 
1780 retry_timeout:
1781         task->tk_status = 0;
1782         task->tk_action = call_timeout;
1783 }
1784 
1785 /*
1786  * 4b.  Connect to the RPC server
1787  */
1788 static void
1789 call_connect(struct rpc_task *task)
1790 {
1791         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1792 
1793         dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1794                         task->tk_pid, xprt,
1795                         (xprt_connected(xprt) ? "is" : "is not"));
1796 
1797         task->tk_action = call_transmit;
1798         if (!xprt_connected(xprt)) {
1799                 task->tk_action = call_connect_status;
1800                 if (task->tk_status < 0)
1801                         return;
1802                 if (task->tk_flags & RPC_TASK_NOCONNECT) {
1803                         rpc_exit(task, -ENOTCONN);
1804                         return;
1805                 }
1806                 xprt_connect(task);
1807         }
1808 }
1809 
1810 /*
1811  * 4c.  Sort out connect result
1812  */
1813 static void
1814 call_connect_status(struct rpc_task *task)
1815 {
1816         struct rpc_clnt *clnt = task->tk_client;
1817         int status = task->tk_status;
1818 
1819         dprint_status(task);
1820 
1821         trace_rpc_connect_status(task, status);
1822         task->tk_status = 0;
1823         switch (status) {
1824         case -ECONNREFUSED:
1825         case -ECONNRESET:
1826         case -ECONNABORTED:
1827         case -ENETUNREACH:
1828         case -EHOSTUNREACH:
1829         case -ENOBUFS:
1830         case -EPIPE:
1831                 if (RPC_IS_SOFTCONN(task))
1832                         break;
1833                 /* retry with existing socket, after a delay */
1834                 rpc_delay(task, 3*HZ);
1835         case -EAGAIN:
1836                 /* Check for timeouts before looping back to call_bind */
1837         case -ETIMEDOUT:
1838                 task->tk_action = call_timeout;
1839                 return;
1840         case 0:
1841                 clnt->cl_stats->netreconn++;
1842                 task->tk_action = call_transmit;
1843                 return;
1844         }
1845         rpc_exit(task, status);
1846 }
1847 
1848 /*
1849  * 5.   Transmit the RPC request, and wait for reply
1850  */
1851 static void
1852 call_transmit(struct rpc_task *task)
1853 {
1854         int is_retrans = RPC_WAS_SENT(task);
1855 
1856         dprint_status(task);
1857 
1858         task->tk_action = call_status;
1859         if (task->tk_status < 0)
1860                 return;
1861         if (!xprt_prepare_transmit(task))
1862                 return;
1863         task->tk_action = call_transmit_status;
1864         /* Encode here so that rpcsec_gss can use correct sequence number. */
1865         if (rpc_task_need_encode(task)) {
1866                 rpc_xdr_encode(task);
1867                 /* Did the encode result in an error condition? */
1868                 if (task->tk_status != 0) {
1869                         /* Was the error nonfatal? */
1870                         if (task->tk_status == -EAGAIN)
1871                                 rpc_delay(task, HZ >> 4);
1872                         else
1873                                 rpc_exit(task, task->tk_status);
1874                         return;
1875                 }
1876         }
1877         xprt_transmit(task);
1878         if (task->tk_status < 0)
1879                 return;
1880         if (is_retrans)
1881                 task->tk_client->cl_stats->rpcretrans++;
1882         /*
1883          * On success, ensure that we call xprt_end_transmit() before sleeping
1884          * in order to allow access to the socket to other RPC requests.
1885          */
1886         call_transmit_status(task);
1887         if (rpc_reply_expected(task))
1888                 return;
1889         task->tk_action = rpc_exit_task;
1890         rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1891 }
1892 
1893 /*
1894  * 5a.  Handle cleanup after a transmission
1895  */
1896 static void
1897 call_transmit_status(struct rpc_task *task)
1898 {
1899         task->tk_action = call_status;
1900 
1901         /*
1902          * Common case: success.  Force the compiler to put this
1903          * test first.
1904          */
1905         if (task->tk_status == 0) {
1906                 xprt_end_transmit(task);
1907                 rpc_task_force_reencode(task);
1908                 return;
1909         }
1910 
1911         switch (task->tk_status) {
1912         case -EAGAIN:
1913                 break;
1914         default:
1915                 dprint_status(task);
1916                 xprt_end_transmit(task);
1917                 rpc_task_force_reencode(task);
1918                 break;
1919                 /*
1920                  * Special cases: if we've been waiting on the
1921                  * socket's write_space() callback, or if the
1922                  * socket just returned a connection error,
1923                  * then hold onto the transport lock.
1924                  */
1925         case -ECONNREFUSED:
1926         case -EHOSTDOWN:
1927         case -EHOSTUNREACH:
1928         case -ENETUNREACH:
1929         case -EPERM:
1930                 if (RPC_IS_SOFTCONN(task)) {
1931                         xprt_end_transmit(task);
1932                         rpc_exit(task, task->tk_status);
1933                         break;
1934                 }
1935         case -ECONNRESET:
1936         case -ECONNABORTED:
1937         case -ENOTCONN:
1938         case -ENOBUFS:
1939         case -EPIPE:
1940                 rpc_task_force_reencode(task);
1941         }
1942 }
1943 
1944 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1945 /*
1946  * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
1947  * addition, disconnect on connectivity errors.
1948  */
1949 static void
1950 call_bc_transmit(struct rpc_task *task)
1951 {
1952         struct rpc_rqst *req = task->tk_rqstp;
1953 
1954         if (!xprt_prepare_transmit(task)) {
1955                 /*
1956                  * Could not reserve the transport. Try again after the
1957                  * transport is released.
1958                  */
1959                 task->tk_status = 0;
1960                 task->tk_action = call_bc_transmit;
1961                 return;
1962         }
1963 
1964         task->tk_action = rpc_exit_task;
1965         if (task->tk_status < 0) {
1966                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1967                         "error: %d\n", task->tk_status);
1968                 return;
1969         }
1970 
1971         xprt_transmit(task);
1972         xprt_end_transmit(task);
1973         dprint_status(task);
1974         switch (task->tk_status) {
1975         case 0:
1976                 /* Success */
1977                 break;
1978         case -EHOSTDOWN:
1979         case -EHOSTUNREACH:
1980         case -ENETUNREACH:
1981         case -ETIMEDOUT:
1982                 /*
1983                  * Problem reaching the server.  Disconnect and let the
1984                  * forechannel reestablish the connection.  The server will
1985                  * have to retransmit the backchannel request and we'll
1986                  * reprocess it.  Since these ops are idempotent, there's no
1987                  * need to cache our reply at this time.
1988                  */
1989                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1990                         "error: %d\n", task->tk_status);
1991                 xprt_conditional_disconnect(req->rq_xprt,
1992                         req->rq_connect_cookie);
1993                 break;
1994         default:
1995                 /*
1996                  * We were unable to reply and will have to drop the
1997                  * request.  The server should reconnect and retransmit.
1998                  */
1999                 WARN_ON_ONCE(task->tk_status == -EAGAIN);
2000                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2001                         "error: %d\n", task->tk_status);
2002                 break;
2003         }
2004         rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
2005 }
2006 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2007 
2008 /*
2009  * 6.   Sort out the RPC call status
2010  */
2011 static void
2012 call_status(struct rpc_task *task)
2013 {
2014         struct rpc_clnt *clnt = task->tk_client;
2015         struct rpc_rqst *req = task->tk_rqstp;
2016         int             status;
2017 
2018         if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
2019                 task->tk_status = req->rq_reply_bytes_recvd;
2020 
2021         dprint_status(task);
2022 
2023         status = task->tk_status;
2024         if (status >= 0) {
2025                 task->tk_action = call_decode;
2026                 return;
2027         }
2028 
2029         trace_rpc_call_status(task);
2030         task->tk_status = 0;
2031         switch(status) {
2032         case -EHOSTDOWN:
2033         case -EHOSTUNREACH:
2034         case -ENETUNREACH:
2035         case -EPERM:
2036                 if (RPC_IS_SOFTCONN(task)) {
2037                         rpc_exit(task, status);
2038                         break;
2039                 }
2040                 /*
2041                  * Delay any retries for 3 seconds, then handle as if it
2042                  * were a timeout.
2043                  */
2044                 rpc_delay(task, 3*HZ);
2045         case -ETIMEDOUT:
2046                 task->tk_action = call_timeout;
2047                 if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
2048                     && task->tk_client->cl_discrtry)
2049                         xprt_conditional_disconnect(req->rq_xprt,
2050                                         req->rq_connect_cookie);
2051                 break;
2052         case -ECONNREFUSED:
2053         case -ECONNRESET:
2054         case -ECONNABORTED:
2055                 rpc_force_rebind(clnt);
2056         case -ENOBUFS:
2057                 rpc_delay(task, 3*HZ);
2058         case -EPIPE:
2059         case -ENOTCONN:
2060                 task->tk_action = call_bind;
2061                 break;
2062         case -EAGAIN:
2063                 task->tk_action = call_transmit;
2064                 break;
2065         case -EIO:
2066                 /* shutdown or soft timeout */
2067                 rpc_exit(task, status);
2068                 break;
2069         default:
2070                 if (clnt->cl_chatty)
2071                         printk("%s: RPC call returned error %d\n",
2072                                clnt->cl_program->name, -status);
2073                 rpc_exit(task, status);
2074         }
2075 }
2076 
2077 /*
2078  * 6a.  Handle RPC timeout
2079  *      We do not release the request slot, so we keep using the
2080  *      same XID for all retransmits.
2081  */
2082 static void
2083 call_timeout(struct rpc_task *task)
2084 {
2085         struct rpc_clnt *clnt = task->tk_client;
2086 
2087         if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
2088                 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
2089                 goto retry;
2090         }
2091 
2092         dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
2093         task->tk_timeouts++;
2094 
2095         if (RPC_IS_SOFTCONN(task)) {
2096                 rpc_exit(task, -ETIMEDOUT);
2097                 return;
2098         }
2099         if (RPC_IS_SOFT(task)) {
2100                 if (clnt->cl_chatty) {
2101                         rcu_read_lock();
2102                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
2103                                 clnt->cl_program->name,
2104                                 rcu_dereference(clnt->cl_xprt)->servername);
2105                         rcu_read_unlock();
2106                 }
2107                 if (task->tk_flags & RPC_TASK_TIMEOUT)
2108                         rpc_exit(task, -ETIMEDOUT);
2109                 else
2110                         rpc_exit(task, -EIO);
2111                 return;
2112         }
2113 
2114         if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
2115                 task->tk_flags |= RPC_CALL_MAJORSEEN;
2116                 if (clnt->cl_chatty) {
2117                         rcu_read_lock();
2118                         printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
2119                         clnt->cl_program->name,
2120                         rcu_dereference(clnt->cl_xprt)->servername);
2121                         rcu_read_unlock();
2122                 }
2123         }
2124         rpc_force_rebind(clnt);
2125         /*
2126          * Did our request time out due to an RPCSEC_GSS out-of-sequence
2127          * event? RFC2203 requires the server to drop all such requests.
2128          */
2129         rpcauth_invalcred(task);
2130 
2131 retry:
2132         task->tk_action = call_bind;
2133         task->tk_status = 0;
2134 }
2135 
2136 /*
2137  * 7.   Decode the RPC reply
2138  */
2139 static void
2140 call_decode(struct rpc_task *task)
2141 {
2142         struct rpc_clnt *clnt = task->tk_client;
2143         struct rpc_rqst *req = task->tk_rqstp;
2144         kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
2145         __be32          *p;
2146 
2147         dprint_status(task);
2148 
2149         if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2150                 if (clnt->cl_chatty) {
2151                         rcu_read_lock();
2152                         printk(KERN_NOTICE "%s: server %s OK\n",
2153                                 clnt->cl_program->name,
2154                                 rcu_dereference(clnt->cl_xprt)->servername);
2155                         rcu_read_unlock();
2156                 }
2157                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2158         }
2159 
2160         /*
2161          * Ensure that we see all writes made by xprt_complete_rqst()
2162          * before it changed req->rq_reply_bytes_recvd.
2163          */
2164         smp_rmb();
2165         req->rq_rcv_buf.len = req->rq_private_buf.len;
2166 
2167         /* Check that the softirq receive buffer is valid */
2168         WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2169                                 sizeof(req->rq_rcv_buf)) != 0);
2170 
2171         if (req->rq_rcv_buf.len < 12) {
2172                 if (!RPC_IS_SOFT(task)) {
2173                         task->tk_action = call_bind;
2174                         goto out_retry;
2175                 }
2176                 dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2177                                 clnt->cl_program->name, task->tk_status);
2178                 task->tk_action = call_timeout;
2179                 goto out_retry;
2180         }
2181 
2182         p = rpc_verify_header(task);
2183         if (IS_ERR(p)) {
2184                 if (p == ERR_PTR(-EAGAIN))
2185                         goto out_retry;
2186                 return;
2187         }
2188 
2189         task->tk_action = rpc_exit_task;
2190 
2191         if (decode) {
2192                 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2193                                                       task->tk_msg.rpc_resp);
2194         }
2195         dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2196                         task->tk_status);
2197         return;
2198 out_retry:
2199         task->tk_status = 0;
2200         /* Note: rpc_verify_header() may have freed the RPC slot */
2201         if (task->tk_rqstp == req) {
2202                 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2203                 if (task->tk_client->cl_discrtry)
2204                         xprt_conditional_disconnect(req->rq_xprt,
2205                                         req->rq_connect_cookie);
2206         }
2207 }
2208 
2209 static __be32 *
2210 rpc_encode_header(struct rpc_task *task)
2211 {
2212         struct rpc_clnt *clnt = task->tk_client;
2213         struct rpc_rqst *req = task->tk_rqstp;
2214         __be32          *p = req->rq_svec[0].iov_base;
2215 
2216         /* FIXME: check buffer size? */
2217 
2218         p = xprt_skip_transport_header(req->rq_xprt, p);
2219         *p++ = req->rq_xid;             /* XID */
2220         *p++ = htonl(RPC_CALL);         /* CALL */
2221         *p++ = htonl(RPC_VERSION);      /* RPC version */
2222         *p++ = htonl(clnt->cl_prog);    /* program number */
2223         *p++ = htonl(clnt->cl_vers);    /* program version */
2224         *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
2225         p = rpcauth_marshcred(task, p);
2226         req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2227         return p;
2228 }
2229 
2230 static __be32 *
2231 rpc_verify_header(struct rpc_task *task)
2232 {
2233         struct rpc_clnt *clnt = task->tk_client;
2234         struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2235         int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2236         __be32  *p = iov->iov_base;
2237         u32 n;
2238         int error = -EACCES;
2239 
2240         if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2241                 /* RFC-1014 says that the representation of XDR data must be a
2242                  * multiple of four bytes
2243                  * - if it isn't pointer subtraction in the NFS client may give
2244                  *   undefined results
2245                  */
2246                 dprintk("RPC: %5u %s: XDR representation not a multiple of"
2247                        " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2248                        task->tk_rqstp->rq_rcv_buf.len);
2249                 error = -EIO;
2250                 goto out_err;
2251         }
2252         if ((len -= 3) < 0)
2253                 goto out_overflow;
2254 
2255         p += 1; /* skip XID */
2256         if ((n = ntohl(*p++)) != RPC_REPLY) {
2257                 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2258                         task->tk_pid, __func__, n);
2259                 error = -EIO;
2260                 goto out_garbage;
2261         }
2262 
2263         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2264                 if (--len < 0)
2265                         goto out_overflow;
2266                 switch ((n = ntohl(*p++))) {
2267                 case RPC_AUTH_ERROR:
2268                         break;
2269                 case RPC_MISMATCH:
2270                         dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2271                                 task->tk_pid, __func__);
2272                         error = -EPROTONOSUPPORT;
2273                         goto out_err;
2274                 default:
2275                         dprintk("RPC: %5u %s: RPC call rejected, "
2276                                 "unknown error: %x\n",
2277                                 task->tk_pid, __func__, n);
2278                         error = -EIO;
2279                         goto out_err;
2280                 }
2281                 if (--len < 0)
2282                         goto out_overflow;
2283                 switch ((n = ntohl(*p++))) {
2284                 case RPC_AUTH_REJECTEDCRED:
2285                 case RPC_AUTH_REJECTEDVERF:
2286                 case RPCSEC_GSS_CREDPROBLEM:
2287                 case RPCSEC_GSS_CTXPROBLEM:
2288                         if (!task->tk_cred_retry)
2289                                 break;
2290                         task->tk_cred_retry--;
2291                         dprintk("RPC: %5u %s: retry stale creds\n",
2292                                         task->tk_pid, __func__);
2293                         rpcauth_invalcred(task);
2294                         /* Ensure we obtain a new XID! */
2295                         xprt_release(task);
2296                         task->tk_action = call_reserve;
2297                         goto out_retry;
2298                 case RPC_AUTH_BADCRED:
2299                 case RPC_AUTH_BADVERF:
2300                         /* possibly garbled cred/verf? */
2301                         if (!task->tk_garb_retry)
2302                                 break;
2303                         task->tk_garb_retry--;
2304                         dprintk("RPC: %5u %s: retry garbled creds\n",
2305                                         task->tk_pid, __func__);
2306                         task->tk_action = call_bind;
2307                         goto out_retry;
2308                 case RPC_AUTH_TOOWEAK:
2309                         rcu_read_lock();
2310                         printk(KERN_NOTICE "RPC: server %s requires stronger "
2311                                "authentication.\n",
2312                                rcu_dereference(clnt->cl_xprt)->servername);
2313                         rcu_read_unlock();
2314                         break;
2315                 default:
2316                         dprintk("RPC: %5u %s: unknown auth error: %x\n",
2317                                         task->tk_pid, __func__, n);
2318                         error = -EIO;
2319                 }
2320                 dprintk("RPC: %5u %s: call rejected %d\n",
2321                                 task->tk_pid, __func__, n);
2322                 goto out_err;
2323         }
2324         p = rpcauth_checkverf(task, p);
2325         if (IS_ERR(p)) {
2326                 error = PTR_ERR(p);
2327                 dprintk("RPC: %5u %s: auth check failed with %d\n",
2328                                 task->tk_pid, __func__, error);
2329                 goto out_garbage;               /* bad verifier, retry */
2330         }
2331         len = p - (__be32 *)iov->iov_base - 1;
2332         if (len < 0)
2333                 goto out_overflow;
2334         switch ((n = ntohl(*p++))) {
2335         case RPC_SUCCESS:
2336                 return p;
2337         case RPC_PROG_UNAVAIL:
2338                 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2339                                 "by server %s\n", task->tk_pid, __func__,
2340                                 (unsigned int)clnt->cl_prog,
2341                                 rcu_dereference(clnt->cl_xprt)->servername);
2342                 error = -EPFNOSUPPORT;
2343                 goto out_err;
2344         case RPC_PROG_MISMATCH:
2345                 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2346                                 "by server %s\n", task->tk_pid, __func__,
2347                                 (unsigned int)clnt->cl_prog,
2348                                 (unsigned int)clnt->cl_vers,
2349                                 rcu_dereference(clnt->cl_xprt)->servername);
2350                 error = -EPROTONOSUPPORT;
2351                 goto out_err;
2352         case RPC_PROC_UNAVAIL:
2353                 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2354                                 "version %u on server %s\n",
2355                                 task->tk_pid, __func__,
2356                                 rpc_proc_name(task),
2357                                 clnt->cl_prog, clnt->cl_vers,
2358                                 rcu_dereference(clnt->cl_xprt)->servername);
2359                 error = -EOPNOTSUPP;
2360                 goto out_err;
2361         case RPC_GARBAGE_ARGS:
2362                 dprintk("RPC: %5u %s: server saw garbage\n",
2363                                 task->tk_pid, __func__);
2364                 break;                  /* retry */
2365         default:
2366                 dprintk("RPC: %5u %s: server accept status: %x\n",
2367                                 task->tk_pid, __func__, n);
2368                 /* Also retry */
2369         }
2370 
2371 out_garbage:
2372         clnt->cl_stats->rpcgarbage++;
2373         if (task->tk_garb_retry) {
2374                 task->tk_garb_retry--;
2375                 dprintk("RPC: %5u %s: retrying\n",
2376                                 task->tk_pid, __func__);
2377                 task->tk_action = call_bind;
2378 out_retry:
2379                 return ERR_PTR(-EAGAIN);
2380         }
2381 out_err:
2382         rpc_exit(task, error);
2383         dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2384                         __func__, error);
2385         return ERR_PTR(error);
2386 out_overflow:
2387         dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2388                         __func__);
2389         goto out_garbage;
2390 }
2391 
2392 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2393 {
2394 }
2395 
2396 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2397 {
2398         return 0;
2399 }
2400 
2401 static struct rpc_procinfo rpcproc_null = {
2402         .p_encode = rpcproc_encode_null,
2403         .p_decode = rpcproc_decode_null,
2404 };
2405 
2406 static int rpc_ping(struct rpc_clnt *clnt)
2407 {
2408         struct rpc_message msg = {
2409                 .rpc_proc = &rpcproc_null,
2410         };
2411         int err;
2412         msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2413         err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2414         put_rpccred(msg.rpc_cred);
2415         return err;
2416 }
2417 
2418 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2419 {
2420         struct rpc_message msg = {
2421                 .rpc_proc = &rpcproc_null,
2422                 .rpc_cred = cred,
2423         };
2424         struct rpc_task_setup task_setup_data = {
2425                 .rpc_client = clnt,
2426                 .rpc_message = &msg,
2427                 .callback_ops = &rpc_default_ops,
2428                 .flags = flags,
2429         };
2430         return rpc_run_task(&task_setup_data);
2431 }
2432 EXPORT_SYMBOL_GPL(rpc_call_null);
2433 
2434 #ifdef RPC_DEBUG
2435 static void rpc_show_header(void)
2436 {
2437         printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2438                 "-timeout ---ops--\n");
2439 }
2440 
2441 static void rpc_show_task(const struct rpc_clnt *clnt,
2442                           const struct rpc_task *task)
2443 {
2444         const char *rpc_waitq = "none";
2445 
2446         if (RPC_IS_QUEUED(task))
2447                 rpc_waitq = rpc_qname(task->tk_waitqueue);
2448 
2449         printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2450                 task->tk_pid, task->tk_flags, task->tk_status,
2451                 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2452                 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2453                 task->tk_action, rpc_waitq);
2454 }
2455 
2456 void rpc_show_tasks(struct net *net)
2457 {
2458         struct rpc_clnt *clnt;
2459         struct rpc_task *task;
2460         int header = 0;
2461         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2462 
2463         spin_lock(&sn->rpc_client_lock);
2464         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2465                 spin_lock(&clnt->cl_lock);
2466                 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2467                         if (!header) {
2468                                 rpc_show_header();
2469                                 header++;
2470                         }
2471                         rpc_show_task(clnt, task);
2472                 }
2473                 spin_unlock(&clnt->cl_lock);
2474         }
2475         spin_unlock(&sn->rpc_client_lock);
2476 }
2477 #endif
2478 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp