1 /* 2 * workqueue.h --- work queue handling for Linux. 3 */ 4 5 #ifndef _LINUX_WORKQUEUE_H 6 #define _LINUX_WORKQUEUE_H 7 8 #include <linux/timer.h> 9 #include <linux/linkage.h> 10 #include <linux/bitops.h> 11 #include <linux/lockdep.h> 12 #include <linux/threads.h> 13 #include <linux/atomic.h> 14 #include <linux/cpumask.h> 15 16 struct workqueue_struct; 17 18 struct work_struct; 19 typedef void (*work_func_t)(struct work_struct *work); 20 void delayed_work_timer_fn(unsigned long __data); 21 22 /* 23 * The first word is the work queue pointer and the flags rolled into 24 * one 25 */ 26 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 27 28 enum { 29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 31 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 33 #ifdef CONFIG_DEBUG_OBJECTS_WORK 34 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 35 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ 36 #else 37 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 38 #endif 39 40 WORK_STRUCT_COLOR_BITS = 4, 41 42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 46 #ifdef CONFIG_DEBUG_OBJECTS_WORK 47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 48 #else 49 WORK_STRUCT_STATIC = 0, 50 #endif 51 52 /* 53 * The last color is no color used for works which don't 54 * participate in workqueue flushing. 55 */ 56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, 57 WORK_NO_COLOR = WORK_NR_COLORS, 58 59 /* not bound to any CPU, prefer the local CPU */ 60 WORK_CPU_UNBOUND = NR_CPUS, 61 62 /* 63 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. 64 * This makes pwqs aligned to 256 bytes and allows 15 workqueue 65 * flush colors. 66 */ 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 68 WORK_STRUCT_COLOR_BITS, 69 70 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 72 73 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, 74 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), 75 76 /* 77 * When a work item is off queue, its high bits point to the last 78 * pool it was on. Cap at 31 bits and use the highest number to 79 * indicate that no pool is associated. 80 */ 81 WORK_OFFQ_FLAG_BITS = 1, 82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, 86 87 /* convenience constants */ 88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, 91 92 /* bit mask for work_busy() return values */ 93 WORK_BUSY_PENDING = 1 << 0, 94 WORK_BUSY_RUNNING = 1 << 1, 95 96 /* maximum string length for set_worker_desc() */ 97 WORKER_DESC_LEN = 24, 98 }; 99 100 struct work_struct { 101 atomic_long_t data; 102 struct list_head entry; 103 work_func_t func; 104 #ifdef CONFIG_LOCKDEP 105 struct lockdep_map lockdep_map; 106 #endif 107 }; 108 109 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 110 #define WORK_DATA_STATIC_INIT() \ 111 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 112 113 struct delayed_work { 114 struct work_struct work; 115 struct timer_list timer; 116 117 /* target workqueue and CPU ->timer uses to queue ->work */ 118 struct workqueue_struct *wq; 119 int cpu; 120 }; 121 122 /** 123 * struct workqueue_attrs - A struct for workqueue attributes. 124 * 125 * This can be used to change attributes of an unbound workqueue. 126 */ 127 struct workqueue_attrs { 128 /** 129 * @nice: nice level 130 */ 131 int nice; 132 133 /** 134 * @cpumask: allowed CPUs 135 */ 136 cpumask_var_t cpumask; 137 138 /** 139 * @no_numa: disable NUMA affinity 140 * 141 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It 142 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus 143 * doesn't participate in pool hash calculations or equality comparisons. 144 */ 145 bool no_numa; 146 }; 147 148 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 149 { 150 return container_of(work, struct delayed_work, work); 151 } 152 153 struct execute_work { 154 struct work_struct work; 155 }; 156 157 #ifdef CONFIG_LOCKDEP 158 /* 159 * NB: because we have to copy the lockdep_map, setting _key 160 * here is required, otherwise it could get initialised to the 161 * copy of the lockdep_map! 162 */ 163 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 164 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 165 #else 166 #define __WORK_INIT_LOCKDEP_MAP(n, k) 167 #endif 168 169 #define __WORK_INITIALIZER(n, f) { \ 170 .data = WORK_DATA_STATIC_INIT(), \ 171 .entry = { &(n).entry, &(n).entry }, \ 172 .func = (f), \ 173 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 174 } 175 176 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 177 .work = __WORK_INITIALIZER((n).work, (f)), \ 178 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ 179 0, (unsigned long)&(n), \ 180 (tflags) | TIMER_IRQSAFE), \ 181 } 182 183 #define DECLARE_WORK(n, f) \ 184 struct work_struct n = __WORK_INITIALIZER(n, f) 185 186 #define DECLARE_DELAYED_WORK(n, f) \ 187 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 188 189 #define DECLARE_DEFERRABLE_WORK(n, f) \ 190 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 191 192 #ifdef CONFIG_DEBUG_OBJECTS_WORK 193 extern void __init_work(struct work_struct *work, int onstack); 194 extern void destroy_work_on_stack(struct work_struct *work); 195 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 196 static inline unsigned int work_static(struct work_struct *work) 197 { 198 return *work_data_bits(work) & WORK_STRUCT_STATIC; 199 } 200 #else 201 static inline void __init_work(struct work_struct *work, int onstack) { } 202 static inline void destroy_work_on_stack(struct work_struct *work) { } 203 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 204 static inline unsigned int work_static(struct work_struct *work) { return 0; } 205 #endif 206 207 /* 208 * initialize all of a work item in one go 209 * 210 * NOTE! No point in using "atomic_long_set()": using a direct 211 * assignment of the work data initializer allows the compiler 212 * to generate better code. 213 */ 214 #ifdef CONFIG_LOCKDEP 215 #define __INIT_WORK(_work, _func, _onstack) \ 216 do { \ 217 static struct lock_class_key __key; \ 218 \ 219 __init_work((_work), _onstack); \ 220 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 221 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ 222 INIT_LIST_HEAD(&(_work)->entry); \ 223 (_work)->func = (_func); \ 224 } while (0) 225 #else 226 #define __INIT_WORK(_work, _func, _onstack) \ 227 do { \ 228 __init_work((_work), _onstack); \ 229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 230 INIT_LIST_HEAD(&(_work)->entry); \ 231 (_work)->func = (_func); \ 232 } while (0) 233 #endif 234 235 #define INIT_WORK(_work, _func) \ 236 __INIT_WORK((_work), (_func), 0) 237 238 #define INIT_WORK_ONSTACK(_work, _func) \ 239 __INIT_WORK((_work), (_func), 1) 240 241 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 242 do { \ 243 INIT_WORK(&(_work)->work, (_func)); \ 244 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ 245 (unsigned long)(_work), \ 246 (_tflags) | TIMER_IRQSAFE); \ 247 } while (0) 248 249 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 250 do { \ 251 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 252 __setup_timer_on_stack(&(_work)->timer, \ 253 delayed_work_timer_fn, \ 254 (unsigned long)(_work), \ 255 (_tflags) | TIMER_IRQSAFE); \ 256 } while (0) 257 258 #define INIT_DELAYED_WORK(_work, _func) \ 259 __INIT_DELAYED_WORK(_work, _func, 0) 260 261 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 262 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 263 264 #define INIT_DEFERRABLE_WORK(_work, _func) \ 265 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 266 267 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 268 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 269 270 /** 271 * work_pending - Find out whether a work item is currently pending 272 * @work: The work item in question 273 */ 274 #define work_pending(work) \ 275 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 276 277 /** 278 * delayed_work_pending - Find out whether a delayable work item is currently 279 * pending 280 * @w: The work item in question 281 */ 282 #define delayed_work_pending(w) \ 283 work_pending(&(w)->work) 284 285 /* 286 * Workqueue flags and constants. For details, please refer to 287 * Documentation/core-api/workqueue.rst. 288 */ 289 enum { 290 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 291 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 292 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 293 WQ_HIGHPRI = 1 << 4, /* high priority */ 294 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 295 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ 296 297 /* 298 * Per-cpu workqueues are generally preferred because they tend to 299 * show better performance thanks to cache locality. Per-cpu 300 * workqueues exclude the scheduler from choosing the CPU to 301 * execute the worker threads, which has an unfortunate side effect 302 * of increasing power consumption. 303 * 304 * The scheduler considers a CPU idle if it doesn't have any task 305 * to execute and tries to keep idle cores idle to conserve power; 306 * however, for example, a per-cpu work item scheduled from an 307 * interrupt handler on an idle CPU will force the scheduler to 308 * excute the work item on that CPU breaking the idleness, which in 309 * turn may lead to more scheduling choices which are sub-optimal 310 * in terms of power consumption. 311 * 312 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 313 * but become unbound if workqueue.power_efficient kernel param is 314 * specified. Per-cpu workqueues which are identified to 315 * contribute significantly to power-consumption are identified and 316 * marked with this flag and enabling the power_efficient mode 317 * leads to noticeable power saving at the cost of small 318 * performance disadvantage. 319 * 320 * http://thread.gmane.org/gmane.linux.kernel/1480396 321 */ 322 WQ_POWER_EFFICIENT = 1 << 7, 323 324 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 325 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 326 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 327 328 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 329 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 330 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 331 }; 332 333 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ 334 #define WQ_UNBOUND_MAX_ACTIVE \ 335 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) 336 337 /* 338 * System-wide workqueues which are always present. 339 * 340 * system_wq is the one used by schedule[_delayed]_work[_on](). 341 * Multi-CPU multi-threaded. There are users which expect relatively 342 * short queue flush time. Don't queue works which can run for too 343 * long. 344 * 345 * system_highpri_wq is similar to system_wq but for work items which 346 * require WQ_HIGHPRI. 347 * 348 * system_long_wq is similar to system_wq but may host long running 349 * works. Queue flushing might take relatively long. 350 * 351 * system_unbound_wq is unbound workqueue. Workers are not bound to 352 * any specific CPU, not concurrency managed, and all queued works are 353 * executed immediately as long as max_active limit is not reached and 354 * resources are available. 355 * 356 * system_freezable_wq is equivalent to system_wq except that it's 357 * freezable. 358 * 359 * *_power_efficient_wq are inclined towards saving power and converted 360 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 361 * they are same as their non-power-efficient counterparts - e.g. 362 * system_power_efficient_wq is identical to system_wq if 363 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 364 */ 365 extern struct workqueue_struct *system_wq; 366 extern struct workqueue_struct *system_highpri_wq; 367 extern struct workqueue_struct *system_long_wq; 368 extern struct workqueue_struct *system_unbound_wq; 369 extern struct workqueue_struct *system_freezable_wq; 370 extern struct workqueue_struct *system_power_efficient_wq; 371 extern struct workqueue_struct *system_freezable_power_efficient_wq; 372 373 extern struct workqueue_struct * 374 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 375 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); 376 377 /** 378 * alloc_workqueue - allocate a workqueue 379 * @fmt: printf format for the name of the workqueue 380 * @flags: WQ_* flags 381 * @max_active: max in-flight work items, 0 for default 382 * @args...: args for @fmt 383 * 384 * Allocate a workqueue with the specified parameters. For detailed 385 * information on WQ_* flags, please refer to 386 * Documentation/core-api/workqueue.rst. 387 * 388 * The __lock_name macro dance is to guarantee that single lock_class_key 389 * doesn't end up with different namesm, which isn't allowed by lockdep. 390 * 391 * RETURNS: 392 * Pointer to the allocated workqueue on success, %NULL on failure. 393 */ 394 #ifdef CONFIG_LOCKDEP 395 #define alloc_workqueue(fmt, flags, max_active, args...) \ 396 ({ \ 397 static struct lock_class_key __key; \ 398 const char *__lock_name; \ 399 \ 400 __lock_name = #fmt#args; \ 401 \ 402 __alloc_workqueue_key((fmt), (flags), (max_active), \ 403 &__key, __lock_name, ##args); \ 404 }) 405 #else 406 #define alloc_workqueue(fmt, flags, max_active, args...) \ 407 __alloc_workqueue_key((fmt), (flags), (max_active), \ 408 NULL, NULL, ##args) 409 #endif 410 411 /** 412 * alloc_ordered_workqueue - allocate an ordered workqueue 413 * @fmt: printf format for the name of the workqueue 414 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 415 * @args...: args for @fmt 416 * 417 * Allocate an ordered workqueue. An ordered workqueue executes at 418 * most one work item at any given time in the queued order. They are 419 * implemented as unbound workqueues with @max_active of one. 420 * 421 * RETURNS: 422 * Pointer to the allocated workqueue on success, %NULL on failure. 423 */ 424 #define alloc_ordered_workqueue(fmt, flags, args...) \ 425 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 426 427 #define create_workqueue(name) \ 428 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) 429 #define create_freezable_workqueue(name) \ 430 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 431 WQ_MEM_RECLAIM, 1, (name)) 432 #define create_singlethread_workqueue(name) \ 433 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 434 435 extern void destroy_workqueue(struct workqueue_struct *wq); 436 437 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); 438 void free_workqueue_attrs(struct workqueue_attrs *attrs); 439 int apply_workqueue_attrs(struct workqueue_struct *wq, 440 const struct workqueue_attrs *attrs); 441 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); 442 443 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 444 struct work_struct *work); 445 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 446 struct delayed_work *work, unsigned long delay); 447 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 448 struct delayed_work *dwork, unsigned long delay); 449 450 extern void flush_workqueue(struct workqueue_struct *wq); 451 extern void drain_workqueue(struct workqueue_struct *wq); 452 453 extern int schedule_on_each_cpu(work_func_t func); 454 455 int execute_in_process_context(work_func_t fn, struct execute_work *); 456 457 extern bool flush_work(struct work_struct *work); 458 extern bool cancel_work(struct work_struct *work); 459 extern bool cancel_work_sync(struct work_struct *work); 460 461 extern bool flush_delayed_work(struct delayed_work *dwork); 462 extern bool cancel_delayed_work(struct delayed_work *dwork); 463 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 464 465 extern void workqueue_set_max_active(struct workqueue_struct *wq, 466 int max_active); 467 extern bool current_is_workqueue_rescuer(void); 468 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 469 extern unsigned int work_busy(struct work_struct *work); 470 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 471 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 472 extern void show_workqueue_state(void); 473 474 /** 475 * queue_work - queue work on a workqueue 476 * @wq: workqueue to use 477 * @work: work to queue 478 * 479 * Returns %false if @work was already on a queue, %true otherwise. 480 * 481 * We queue the work to the CPU on which it was submitted, but if the CPU dies 482 * it can be processed by another CPU. 483 */ 484 static inline bool queue_work(struct workqueue_struct *wq, 485 struct work_struct *work) 486 { 487 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 488 } 489 490 /** 491 * queue_delayed_work - queue work on a workqueue after delay 492 * @wq: workqueue to use 493 * @dwork: delayable work to queue 494 * @delay: number of jiffies to wait before queueing 495 * 496 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 497 */ 498 static inline bool queue_delayed_work(struct workqueue_struct *wq, 499 struct delayed_work *dwork, 500 unsigned long delay) 501 { 502 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 503 } 504 505 /** 506 * mod_delayed_work - modify delay of or queue a delayed work 507 * @wq: workqueue to use 508 * @dwork: work to queue 509 * @delay: number of jiffies to wait before queueing 510 * 511 * mod_delayed_work_on() on local CPU. 512 */ 513 static inline bool mod_delayed_work(struct workqueue_struct *wq, 514 struct delayed_work *dwork, 515 unsigned long delay) 516 { 517 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 518 } 519 520 /** 521 * schedule_work_on - put work task on a specific cpu 522 * @cpu: cpu to put the work task on 523 * @work: job to be done 524 * 525 * This puts a job on a specific cpu 526 */ 527 static inline bool schedule_work_on(int cpu, struct work_struct *work) 528 { 529 return queue_work_on(cpu, system_wq, work); 530 } 531 532 /** 533 * schedule_work - put work task in global workqueue 534 * @work: job to be done 535 * 536 * Returns %false if @work was already on the kernel-global workqueue and 537 * %true otherwise. 538 * 539 * This puts a job in the kernel-global workqueue if it was not already 540 * queued and leaves it in the same position on the kernel-global 541 * workqueue otherwise. 542 */ 543 static inline bool schedule_work(struct work_struct *work) 544 { 545 return queue_work(system_wq, work); 546 } 547 548 /** 549 * flush_scheduled_work - ensure that any scheduled work has run to completion. 550 * 551 * Forces execution of the kernel-global workqueue and blocks until its 552 * completion. 553 * 554 * Think twice before calling this function! It's very easy to get into 555 * trouble if you don't take great care. Either of the following situations 556 * will lead to deadlock: 557 * 558 * One of the work items currently on the workqueue needs to acquire 559 * a lock held by your code or its caller. 560 * 561 * Your code is running in the context of a work routine. 562 * 563 * They will be detected by lockdep when they occur, but the first might not 564 * occur very often. It depends on what work items are on the workqueue and 565 * what locks they need, which you have no control over. 566 * 567 * In most situations flushing the entire workqueue is overkill; you merely 568 * need to know that a particular work item isn't queued and isn't running. 569 * In such cases you should use cancel_delayed_work_sync() or 570 * cancel_work_sync() instead. 571 */ 572 static inline void flush_scheduled_work(void) 573 { 574 flush_workqueue(system_wq); 575 } 576 577 /** 578 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 579 * @cpu: cpu to use 580 * @dwork: job to be done 581 * @delay: number of jiffies to wait 582 * 583 * After waiting for a given time this puts a job in the kernel-global 584 * workqueue on the specified CPU. 585 */ 586 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 587 unsigned long delay) 588 { 589 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 590 } 591 592 /** 593 * schedule_delayed_work - put work task in global workqueue after delay 594 * @dwork: job to be done 595 * @delay: number of jiffies to wait or 0 for immediate execution 596 * 597 * After waiting for a given time this puts a job in the kernel-global 598 * workqueue. 599 */ 600 static inline bool schedule_delayed_work(struct delayed_work *dwork, 601 unsigned long delay) 602 { 603 return queue_delayed_work(system_wq, dwork, delay); 604 } 605 606 #ifndef CONFIG_SMP 607 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 608 { 609 return fn(arg); 610 } 611 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 612 { 613 return fn(arg); 614 } 615 #else 616 long work_on_cpu(int cpu, long (*fn)(void *), void *arg); 617 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); 618 #endif /* CONFIG_SMP */ 619 620 #ifdef CONFIG_FREEZER 621 extern void freeze_workqueues_begin(void); 622 extern bool freeze_workqueues_busy(void); 623 extern void thaw_workqueues(void); 624 #endif /* CONFIG_FREEZER */ 625 626 #ifdef CONFIG_SYSFS 627 int workqueue_sysfs_register(struct workqueue_struct *wq); 628 #else /* CONFIG_SYSFS */ 629 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 630 { return 0; } 631 #endif /* CONFIG_SYSFS */ 632 633 #ifdef CONFIG_WQ_WATCHDOG 634 void wq_watchdog_touch(int cpu); 635 #else /* CONFIG_WQ_WATCHDOG */ 636 static inline void wq_watchdog_touch(int cpu) { } 637 #endif /* CONFIG_WQ_WATCHDOG */ 638 639 #ifdef CONFIG_SMP 640 int workqueue_prepare_cpu(unsigned int cpu); 641 int workqueue_online_cpu(unsigned int cpu); 642 int workqueue_offline_cpu(unsigned int cpu); 643 #endif 644 645 int __init workqueue_init_early(void); 646 int __init workqueue_init(void); 647 648 #endif 649