~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/workqueue.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/workqueue.h (Version linux-3.15.10) and /include/linux/workqueue.h (Version linux-5.9.1)


** Warning: Cannot open xref database.

  1 /*                                                  1 
  2  * workqueue.h --- work queue handling for Lin    
  3  */                                               
  4                                                   
  5 #ifndef _LINUX_WORKQUEUE_H                        
  6 #define _LINUX_WORKQUEUE_H                        
  7                                                   
  8 #include <linux/timer.h>                          
  9 #include <linux/linkage.h>                        
 10 #include <linux/bitops.h>                         
 11 #include <linux/lockdep.h>                        
 12 #include <linux/threads.h>                        
 13 #include <linux/atomic.h>                         
 14 #include <linux/cpumask.h>                        
 15                                                   
 16 struct workqueue_struct;                          
 17                                                   
 18 struct work_struct;                               
 19 typedef void (*work_func_t)(struct work_struct    
 20 void delayed_work_timer_fn(unsigned long __dat    
 21                                                   
 22 /*                                                
 23  * The first word is the work queue pointer an    
 24  * one                                            
 25  */                                               
 26 #define work_data_bits(work) ((unsigned long *    
 27                                                   
 28 enum {                                            
 29         WORK_STRUCT_PENDING_BIT = 0,    /* wor    
 30         WORK_STRUCT_DELAYED_BIT = 1,    /* wor    
 31         WORK_STRUCT_PWQ_BIT     = 2,    /* dat    
 32         WORK_STRUCT_LINKED_BIT  = 3,    /* nex    
 33 #ifdef CONFIG_DEBUG_OBJECTS_WORK                  
 34         WORK_STRUCT_STATIC_BIT  = 4,    /* sta    
 35         WORK_STRUCT_COLOR_SHIFT = 5,    /* col    
 36 #else                                             
 37         WORK_STRUCT_COLOR_SHIFT = 4,    /* col    
 38 #endif                                            
 39                                                   
 40         WORK_STRUCT_COLOR_BITS  = 4,              
 41                                                   
 42         WORK_STRUCT_PENDING     = 1 << WORK_ST    
 43         WORK_STRUCT_DELAYED     = 1 << WORK_ST    
 44         WORK_STRUCT_PWQ         = 1 << WORK_ST    
 45         WORK_STRUCT_LINKED      = 1 << WORK_ST    
 46 #ifdef CONFIG_DEBUG_OBJECTS_WORK                  
 47         WORK_STRUCT_STATIC      = 1 << WORK_ST    
 48 #else                                             
 49         WORK_STRUCT_STATIC      = 0,              
 50 #endif                                            
 51                                                   
 52         /*                                        
 53          * The last color is no color used for    
 54          * participate in workqueue flushing.     
 55          */                                       
 56         WORK_NR_COLORS          = (1 << WORK_S    
 57         WORK_NO_COLOR           = WORK_NR_COLO    
 58                                                   
 59         /* special cpu IDs */                     
 60         WORK_CPU_UNBOUND        = NR_CPUS,        
 61         WORK_CPU_END            = NR_CPUS + 1,    
 62                                                   
 63         /*                                        
 64          * Reserve 7 bits off of pwq pointer w    
 65          * This makes pwqs aligned to 256 byte    
 66          * flush colors.                          
 67          */                                       
 68         WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_    
 69                                   WORK_STRUCT_    
 70                                                   
 71         /* data contains off-queue information    
 72         WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_    
 73                                                   
 74         WORK_OFFQ_CANCELING     = (1 << WORK_O    
 75                                                   
 76         /*                                        
 77          * When a work item is off queue, its     
 78          * pool it was on.  Cap at 31 bits and    
 79          * indicate that no pool is associated    
 80          */                                       
 81         WORK_OFFQ_FLAG_BITS     = 1,              
 82         WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FL    
 83         WORK_OFFQ_LEFT          = BITS_PER_LON    
 84         WORK_OFFQ_POOL_BITS     = WORK_OFFQ_LE    
 85         WORK_OFFQ_POOL_NONE     = (1LU << WORK    
 86                                                   
 87         /* convenience constants */               
 88         WORK_STRUCT_FLAG_MASK   = (1UL << WORK    
 89         WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUC    
 90         WORK_STRUCT_NO_POOL     = (unsigned lo    
 91                                                   
 92         /* bit mask for work_busy() return val    
 93         WORK_BUSY_PENDING       = 1 << 0,         
 94         WORK_BUSY_RUNNING       = 1 << 1,         
 95                                                   
 96         /* maximum string length for set_worke    
 97         WORKER_DESC_LEN         = 24,             
 98 };                                                
 99                                                   
100 struct work_struct {                              
101         atomic_long_t data;                       
102         struct list_head entry;                   
103         work_func_t func;                         
104 #ifdef CONFIG_LOCKDEP                             
105         struct lockdep_map lockdep_map;           
106 #endif                                            
107 };                                                
108                                                   
109 #define WORK_DATA_INIT()        ATOMIC_LONG_IN    
110 #define WORK_DATA_STATIC_INIT() \                 
111         ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL |    
112                                                   
113 struct delayed_work {                             
114         struct work_struct work;                  
115         struct timer_list timer;                  
116                                                   
117         /* target workqueue and CPU ->timer us    
118         struct workqueue_struct *wq;              
119         int cpu;                                  
120 };                                                
121                                                   
122 /*                                                
123  * A struct for workqueue attributes.  This ca    
124  * attributes of an unbound workqueue.            
125  *                                                
126  * Unlike other fields, ->no_numa isn't a prop    
127  * only modifies how apply_workqueue_attrs() s    
128  * participate in pool hash calculations or eq    
129  */                                               
130 struct workqueue_attrs {                          
131         int                     nice;             
132         cpumask_var_t           cpumask;          
133         bool                    no_numa;          
134 };                                                
135                                                   
136 static inline struct delayed_work *to_delayed_    
137 {                                                 
138         return container_of(work, struct delay    
139 }                                                 
140                                                   
141 struct execute_work {                             
142         struct work_struct work;                  
143 };                                                
144                                                   
145 #ifdef CONFIG_LOCKDEP                             
146 /*                                                
147  * NB: because we have to copy the lockdep_map    
148  * here is required, otherwise it could get in    
149  * copy of the lockdep_map!                       
150  */                                               
151 #define __WORK_INIT_LOCKDEP_MAP(n, k) \           
152         .lockdep_map = STATIC_LOCKDEP_MAP_INIT    
153 #else                                             
154 #define __WORK_INIT_LOCKDEP_MAP(n, k)             
155 #endif                                            
156                                                   
157 #define __WORK_INITIALIZER(n, f) {                
158         .data = WORK_DATA_STATIC_INIT(),          
159         .entry  = { &(n).entry, &(n).entry },     
160         .func = (f),                              
161         __WORK_INIT_LOCKDEP_MAP(#n, &(n))         
162         }                                         
163                                                   
164 #define __DELAYED_WORK_INITIALIZER(n, f, tflag    
165         .work = __WORK_INITIALIZER((n).work, (    
166         .timer = __TIMER_INITIALIZER(delayed_w    
167                                      0, (unsig    
168                                      (tflags)     
169         }                                         
170                                                   
171 #define DECLARE_WORK(n, f)                        
172         struct work_struct n = __WORK_INITIALI    
173                                                   
174 #define DECLARE_DELAYED_WORK(n, f)                
175         struct delayed_work n = __DELAYED_WORK    
176                                                   
177 #define DECLARE_DEFERRABLE_WORK(n, f)             
178         struct delayed_work n = __DELAYED_WORK    
179                                                   
180 #ifdef CONFIG_DEBUG_OBJECTS_WORK                  
181 extern void __init_work(struct work_struct *wo    
182 extern void destroy_work_on_stack(struct work_    
183 extern void destroy_delayed_work_on_stack(stru    
184 static inline unsigned int work_static(struct     
185 {                                                 
186         return *work_data_bits(work) & WORK_ST    
187 }                                                 
188 #else                                             
189 static inline void __init_work(struct work_str    
190 static inline void destroy_work_on_stack(struc    
191 static inline void destroy_delayed_work_on_sta    
192 static inline unsigned int work_static(struct     
193 #endif                                            
194                                                   
195 /*                                                
196  * initialize all of a work item in one go        
197  *                                                
198  * NOTE! No point in using "atomic_long_set()"    
199  * assignment of the work data initializer all    
200  * to generate better code.                       
201  */                                               
202 #ifdef CONFIG_LOCKDEP                             
203 #define __INIT_WORK(_work, _func, _onstack)       
204         do {                                      
205                 static struct lock_class_key _    
206                                                   
207                 __init_work((_work), _onstack)    
208                 (_work)->data = (atomic_long_t    
209                 lockdep_init_map(&(_work)->loc    
210                 INIT_LIST_HEAD(&(_work)->entry    
211                 (_work)->func = (_func);          
212         } while (0)                               
213 #else                                             
214 #define __INIT_WORK(_work, _func, _onstack)       
215         do {                                      
216                 __init_work((_work), _onstack)    
217                 (_work)->data = (atomic_long_t    
218                 INIT_LIST_HEAD(&(_work)->entry    
219                 (_work)->func = (_func);          
220         } while (0)                               
221 #endif                                            
222                                                   
223 #define INIT_WORK(_work, _func)                   
224         do {                                      
225                 __INIT_WORK((_work), (_func),     
226         } while (0)                               
227                                                   
228 #define INIT_WORK_ONSTACK(_work, _func)           
229         do {                                      
230                 __INIT_WORK((_work), (_func),     
231         } while (0)                               
232                                                   
233 #define __INIT_DELAYED_WORK(_work, _func, _tfl    
234         do {                                      
235                 INIT_WORK(&(_work)->work, (_fu    
236                 __setup_timer(&(_work)->timer,    
237                               (unsigned long)(    
238                               (_tflags) | TIME    
239         } while (0)                               
240                                                   
241 #define __INIT_DELAYED_WORK_ONSTACK(_work, _fu    
242         do {                                      
243                 INIT_WORK_ONSTACK(&(_work)->wo    
244                 __setup_timer_on_stack(&(_work    
245                                        delayed    
246                                        (unsign    
247                                        (_tflag    
248         } while (0)                               
249                                                   
250 #define INIT_DELAYED_WORK(_work, _func)           
251         __INIT_DELAYED_WORK(_work, _func, 0)      
252                                                   
253 #define INIT_DELAYED_WORK_ONSTACK(_work, _func    
254         __INIT_DELAYED_WORK_ONSTACK(_work, _fu    
255                                                   
256 #define INIT_DEFERRABLE_WORK(_work, _func)        
257         __INIT_DELAYED_WORK(_work, _func, TIME    
258                                                   
259 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _f    
260         __INIT_DELAYED_WORK_ONSTACK(_work, _fu    
261                                                   
262 /**                                               
263  * work_pending - Find out whether a work item    
264  * @work: The work item in question               
265  */                                               
266 #define work_pending(work) \                      
267         test_bit(WORK_STRUCT_PENDING_BIT, work    
268                                                   
269 /**                                               
270  * delayed_work_pending - Find out whether a d    
271  * pending                                        
272  * @work: The work item in question               
273  */                                               
274 #define delayed_work_pending(w) \                 
275         work_pending(&(w)->work)                  
276                                                   
277 /**                                               
278  * work_clear_pending - for internal use only,    
279  * @work: The work item in question               
280  */                                               
281 #define work_clear_pending(work) \                
282         clear_bit(WORK_STRUCT_PENDING_BIT, wor    
283                                                   
284 /*                                                
285  * Workqueue flags and constants.  For details    
286  * Documentation/workqueue.txt.                   
287  */                                               
288 enum {                                            
289         WQ_UNBOUND              = 1 << 1, /* n    
290         WQ_FREEZABLE            = 1 << 2, /* f    
291         WQ_MEM_RECLAIM          = 1 << 3, /* m    
292         WQ_HIGHPRI              = 1 << 4, /* h    
293         WQ_CPU_INTENSIVE        = 1 << 5, /* c    
294         WQ_SYSFS                = 1 << 6, /* v    
295                                                   
296         /*                                        
297          * Per-cpu workqueues are generally pr    
298          * show better performance thanks to c    
299          * workqueues exclude the scheduler fr    
300          * execute the worker threads, which h    
301          * of increasing power consumption.       
302          *                                        
303          * The scheduler considers a CPU idle     
304          * to execute and tries to keep idle c    
305          * however, for example, a per-cpu wor    
306          * interrupt handler on an idle CPU wi    
307          * excute the work item on that CPU br    
308          * turn may lead to more scheduling ch    
309          * in terms of power consumption.         
310          *                                        
311          * Workqueues marked with WQ_POWER_EFF    
312          * but become unbound if workqueue.pow    
313          * specified.  Per-cpu workqueues whic    
314          * contribute significantly to power-c    
315          * marked with this flag and enabling     
316          * leads to noticeable power saving at    
317          * performance disadvantage.              
318          *                                        
319          * http://thread.gmane.org/gmane.linux    
320          */                                       
321         WQ_POWER_EFFICIENT      = 1 << 7,         
322                                                   
323         __WQ_DRAINING           = 1 << 16, /*     
324         __WQ_ORDERED            = 1 << 17, /*     
325                                                   
326         WQ_MAX_ACTIVE           = 512,    /* I    
327         WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4    
328         WQ_DFL_ACTIVE           = WQ_MAX_ACTIV    
329 };                                                
330                                                   
331 /* unbound wq's aren't per-cpu, scale max_acti    
332 #define WQ_UNBOUND_MAX_ACTIVE   \                 
333         max_t(int, WQ_MAX_ACTIVE, num_possible    
334                                                   
335 /*                                                
336  * System-wide workqueues which are always pre    
337  *                                                
338  * system_wq is the one used by schedule[_dela    
339  * Multi-CPU multi-threaded.  There are users     
340  * short queue flush time.  Don't queue works     
341  * long.                                          
342  *                                                
343  * system_long_wq is similar to system_wq but     
344  * works.  Queue flushing might take relativel    
345  *                                                
346  * system_unbound_wq is unbound workqueue.  Wo    
347  * any specific CPU, not concurrency managed,     
348  * executed immediately as long as max_active     
349  * resources are available.                       
350  *                                                
351  * system_freezable_wq is equivalent to system    
352  * freezable.                                     
353  *                                                
354  * *_power_efficient_wq are inclined towards s    
355  * into WQ_UNBOUND variants if 'wq_power_effic    
356  * they are same as their non-power-efficient     
357  * system_power_efficient_wq is identical to s    
358  * 'wq_power_efficient' is disabled.  See WQ_P    
359  */                                               
360 extern struct workqueue_struct *system_wq;        
361 extern struct workqueue_struct *system_long_wq    
362 extern struct workqueue_struct *system_unbound    
363 extern struct workqueue_struct *system_freezab    
364 extern struct workqueue_struct *system_power_e    
365 extern struct workqueue_struct *system_freezab    
366                                                   
367 static inline struct workqueue_struct * __depr    
368 {                                                 
369         return system_wq;                         
370 }                                                 
371                                                   
372 static inline struct workqueue_struct * __depr    
373 {                                                 
374         return system_freezable_wq;               
375 }                                                 
376                                                   
377 /* equivlalent to system_wq and system_freezab    
378 #define system_nrt_wq                   __syst    
379 #define system_nrt_freezable_wq         __syst    
380                                                   
381 extern struct workqueue_struct *                  
382 __alloc_workqueue_key(const char *fmt, unsigne    
383         struct lock_class_key *key, const char    
384                                                   
385 /**                                               
386  * alloc_workqueue - allocate a workqueue         
387  * @fmt: printf format for the name of the wor    
388  * @flags: WQ_* flags                             
389  * @max_active: max in-flight work items, 0 fo    
390  * @args: args for @fmt                           
391  *                                                
392  * Allocate a workqueue with the specified par    
393  * information on WQ_* flags, please refer to     
394  *                                                
395  * The __lock_name macro dance is to guarantee    
396  * doesn't end up with different namesm, which    
397  *                                                
398  * RETURNS:                                       
399  * Pointer to the allocated workqueue on succe    
400  */                                               
401 #ifdef CONFIG_LOCKDEP                             
402 #define alloc_workqueue(fmt, flags, max_active    
403 ({                                                
404         static struct lock_class_key __key;       
405         const char *__lock_name;                  
406                                                   
407         __lock_name = #fmt#args;                  
408                                                   
409         __alloc_workqueue_key((fmt), (flags),     
410                               &__key, __lock_n    
411 })                                                
412 #else                                             
413 #define alloc_workqueue(fmt, flags, max_active    
414         __alloc_workqueue_key((fmt), (flags),     
415                               NULL, NULL, ##ar    
416 #endif                                            
417                                                   
418 /**                                               
419  * alloc_ordered_workqueue - allocate an order    
420  * @fmt: printf format for the name of the wor    
421  * @flags: WQ_* flags (only WQ_FREEZABLE and W    
422  * @args: args for @fmt                           
423  *                                                
424  * Allocate an ordered workqueue.  An ordered     
425  * most one work item at any given time in the    
426  * implemented as unbound workqueues with @max    
427  *                                                
428  * RETURNS:                                       
429  * Pointer to the allocated workqueue on succe    
430  */                                               
431 #define alloc_ordered_workqueue(fmt, flags, ar    
432         alloc_workqueue(fmt, WQ_UNBOUND | __WQ    
433                                                   
434 #define create_workqueue(name)                    
435         alloc_workqueue("%s", WQ_MEM_RECLAIM,     
436 #define create_freezable_workqueue(name)          
437         alloc_workqueue("%s", WQ_FREEZABLE | W    
438                         1, (name))                
439 #define create_singlethread_workqueue(name)       
440         alloc_workqueue("%s", WQ_UNBOUND | WQ_    
441                                                   
442 extern void destroy_workqueue(struct workqueue    
443                                                   
444 struct workqueue_attrs *alloc_workqueue_attrs(    
445 void free_workqueue_attrs(struct workqueue_att    
446 int apply_workqueue_attrs(struct workqueue_str    
447                           const struct workque    
448                                                   
449 extern bool queue_work_on(int cpu, struct work    
450                         struct work_struct *wo    
451 extern bool queue_delayed_work_on(int cpu, str    
452                         struct delayed_work *w    
453 extern bool mod_delayed_work_on(int cpu, struc    
454                         struct delayed_work *d    
455                                                   
456 extern void flush_workqueue(struct workqueue_s    
457 extern void drain_workqueue(struct workqueue_s    
458 extern void flush_scheduled_work(void);           
459                                                   
460 extern int schedule_on_each_cpu(work_func_t fu    
461                                                   
462 int execute_in_process_context(work_func_t fn,    
463                                                   
464 extern bool flush_work(struct work_struct *wor    
465 extern bool cancel_work_sync(struct work_struc    
466                                                   
467 extern bool flush_delayed_work(struct delayed_    
468 extern bool cancel_delayed_work(struct delayed    
469 extern bool cancel_delayed_work_sync(struct de    
470                                                   
471 extern void workqueue_set_max_active(struct wo    
472                                      int max_a    
473 extern bool current_is_workqueue_rescuer(void)    
474 extern bool workqueue_congested(int cpu, struc    
475 extern unsigned int work_busy(struct work_stru    
476 extern __printf(1, 2) void set_worker_desc(con    
477 extern void print_worker_info(const char *log_    
478                                                   
479 /**                                               
480  * queue_work - queue work on a workqueue         
481  * @wq: workqueue to use                          
482  * @work: work to queue                           
483  *                                                
484  * Returns %false if @work was already on a qu    
485  *                                                
486  * We queue the work to the CPU on which it wa    
487  * it can be processed by another CPU.            
488  */                                               
489 static inline bool queue_work(struct workqueue    
490                               struct work_stru    
491 {                                                 
492         return queue_work_on(WORK_CPU_UNBOUND,    
493 }                                                 
494                                                   
495 /**                                               
496  * queue_delayed_work - queue work on a workqu    
497  * @wq: workqueue to use                          
498  * @dwork: delayable work to queue                
499  * @delay: number of jiffies to wait before qu    
500  *                                                
501  * Equivalent to queue_delayed_work_on() but t    
502  */                                               
503 static inline bool queue_delayed_work(struct w    
504                                       struct d    
505                                       unsigned    
506 {                                                 
507         return queue_delayed_work_on(WORK_CPU_    
508 }                                                 
509                                                   
510 /**                                               
511  * mod_delayed_work - modify delay of or queue    
512  * @wq: workqueue to use                          
513  * @dwork: work to queue                          
514  * @delay: number of jiffies to wait before qu    
515  *                                                
516  * mod_delayed_work_on() on local CPU.            
517  */                                               
518 static inline bool mod_delayed_work(struct wor    
519                                     struct del    
520                                     unsigned l    
521 {                                                 
522         return mod_delayed_work_on(WORK_CPU_UN    
523 }                                                 
524                                                   
525 /**                                               
526  * schedule_work_on - put work task on a speci    
527  * @cpu: cpu to put the work task on              
528  * @work: job to be done                          
529  *                                                
530  * This puts a job on a specific cpu              
531  */                                               
532 static inline bool schedule_work_on(int cpu, s    
533 {                                                 
534         return queue_work_on(cpu, system_wq, w    
535 }                                                 
536                                                   
537 /**                                               
538  * schedule_work - put work task in global wor    
539  * @work: job to be done                          
540  *                                                
541  * Returns %false if @work was already on the     
542  * %true otherwise.                               
543  *                                                
544  * This puts a job in the kernel-global workqu    
545  * queued and leaves it in the same position o    
546  * workqueue otherwise.                           
547  */                                               
548 static inline bool schedule_work(struct work_s    
549 {                                                 
550         return queue_work(system_wq, work);       
551 }                                                 
552                                                   
553 /**                                               
554  * schedule_delayed_work_on - queue work in gl    
555  * @cpu: cpu to use                               
556  * @dwork: job to be done                         
557  * @delay: number of jiffies to wait              
558  *                                                
559  * After waiting for a given time this puts a     
560  * workqueue on the specified CPU.                
561  */                                               
562 static inline bool schedule_delayed_work_on(in    
563                                             un    
564 {                                                 
565         return queue_delayed_work_on(cpu, syst    
566 }                                                 
567                                                   
568 /**                                               
569  * schedule_delayed_work - put work task in gl    
570  * @dwork: job to be done                         
571  * @delay: number of jiffies to wait or 0 for     
572  *                                                
573  * After waiting for a given time this puts a     
574  * workqueue.                                     
575  */                                               
576 static inline bool schedule_delayed_work(struc    
577                                          unsig    
578 {                                                 
579         return queue_delayed_work(system_wq, d    
580 }                                                 
581                                                   
582 /**                                               
583  * keventd_up - is workqueue initialized yet?     
584  */                                               
585 static inline bool keventd_up(void)               
586 {                                                 
587         return system_wq != NULL;                 
588 }                                                 
589                                                   
590 /* used to be different but now identical to f    
591 static inline bool __deprecated flush_work_syn    
592 {                                                 
593         return flush_work(work);                  
594 }                                                 
595                                                   
596 /* used to be different but now identical to f    
597 static inline bool __deprecated flush_delayed_    
598 {                                                 
599         return flush_delayed_work(dwork);         
600 }                                                 
601                                                   
602 #ifndef CONFIG_SMP                                
603 static inline long work_on_cpu(int cpu, long (    
604 {                                                 
605         return fn(arg);                           
606 }                                                 
607 #else                                             
608 long work_on_cpu(int cpu, long (*fn)(void *),     
609 #endif /* CONFIG_SMP */                           
610                                                   
611 #ifdef CONFIG_FREEZER                             
612 extern void freeze_workqueues_begin(void);        
613 extern bool freeze_workqueues_busy(void);         
614 extern void thaw_workqueues(void);                
615 #endif /* CONFIG_FREEZER */                       
616                                                   
617 #ifdef CONFIG_SYSFS                               
618 int workqueue_sysfs_register(struct workqueue_    
619 #else   /* CONFIG_SYSFS */                        
620 static inline int workqueue_sysfs_register(str    
621 { return 0; }                                     
622 #endif  /* CONFIG_SYSFS */                        
623                                                   
624 #endif                                            
625                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp