1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 #include <linux/random.h> 47 #include <linux/sysctl.h> 48 49 #include <linux/uaccess.h> 50 #include <asm/unistd.h> 51 #include <asm/div64.h> 52 #include <asm/timex.h> 53 #include <asm/io.h> 54 55 #include "tick-internal.h" 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/timer.h> 59 60 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 61 62 EXPORT_SYMBOL(jiffies_64); 63 64 /* 65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 66 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 67 * level has a different granularity. 68 * 69 * The level granularity is: LVL_CLK_DIV ^ lvl 70 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 71 * 72 * The array level of a newly armed timer depends on the relative expiry 73 * time. The farther the expiry time is away the higher the array level and 74 * therefor the granularity becomes. 75 * 76 * Contrary to the original timer wheel implementation, which aims for 'exact' 77 * expiry of the timers, this implementation removes the need for recascading 78 * the timers into the lower array levels. The previous 'classic' timer wheel 79 * implementation of the kernel already violated the 'exact' expiry by adding 80 * slack to the expiry time to provide batched expiration. The granularity 81 * levels provide implicit batching. 82 * 83 * This is an optimization of the original timer wheel implementation for the 84 * majority of the timer wheel use cases: timeouts. The vast majority of 85 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 86 * the timeout expires it indicates that normal operation is disturbed, so it 87 * does not matter much whether the timeout comes with a slight delay. 88 * 89 * The only exception to this are networking timers with a small expiry 90 * time. They rely on the granularity. Those fit into the first wheel level, 91 * which has HZ granularity. 92 * 93 * We don't have cascading anymore. timers with a expiry time above the 94 * capacity of the last wheel level are force expired at the maximum timeout 95 * value of the last wheel level. From data sampling we know that the maximum 96 * value observed is 5 days (network connection tracking), so this should not 97 * be an issue. 98 * 99 * The currently chosen array constants values are a good compromise between 100 * array size and granularity. 101 * 102 * This results in the following granularity and range levels: 103 * 104 * HZ 1000 steps 105 * Level Offset Granularity Range 106 * 0 0 1 ms 0 ms - 63 ms 107 * 1 64 8 ms 64 ms - 511 ms 108 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 109 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 110 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 111 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 112 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 113 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 114 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 115 * 116 * HZ 300 117 * Level Offset Granularity Range 118 * 0 0 3 ms 0 ms - 210 ms 119 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 120 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 121 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 122 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 123 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 124 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 125 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 126 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 127 * 128 * HZ 250 129 * Level Offset Granularity Range 130 * 0 0 4 ms 0 ms - 255 ms 131 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 132 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 133 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 134 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 135 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 136 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 137 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 138 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 139 * 140 * HZ 100 141 * Level Offset Granularity Range 142 * 0 0 10 ms 0 ms - 630 ms 143 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 144 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 145 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 146 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 147 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 148 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 149 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 150 */ 151 152 /* Clock divisor for the next level */ 153 #define LVL_CLK_SHIFT 3 154 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 156 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 157 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 158 159 /* 160 * The time start value for each level to select the bucket at enqueue 161 * time. We start from the last possible delta of the previous level 162 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()). 163 */ 164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 165 166 /* Size of each clock level */ 167 #define LVL_BITS 6 168 #define LVL_SIZE (1UL << LVL_BITS) 169 #define LVL_MASK (LVL_SIZE - 1) 170 #define LVL_OFFS(n) ((n) * LVL_SIZE) 171 172 /* Level depth */ 173 #if HZ > 100 174 # define LVL_DEPTH 9 175 # else 176 # define LVL_DEPTH 8 177 #endif 178 179 /* The cutoff (max. capacity of the wheel) */ 180 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 182 183 /* 184 * The resulting wheel size. If NOHZ is configured we allocate two 185 * wheels so we have a separate storage for the deferrable timers. 186 */ 187 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 188 189 #ifdef CONFIG_NO_HZ_COMMON 190 # define NR_BASES 2 191 # define BASE_STD 0 192 # define BASE_DEF 1 193 #else 194 # define NR_BASES 1 195 # define BASE_STD 0 196 # define BASE_DEF 0 197 #endif 198 199 /** 200 * struct timer_base - Per CPU timer base (number of base depends on config) 201 * @lock: Lock protecting the timer_base 202 * @running_timer: When expiring timers, the lock is dropped. To make 203 * sure not to race agains deleting/modifying a 204 * currently running timer, the pointer is set to the 205 * timer, which expires at the moment. If no timer is 206 * running, the pointer is NULL. 207 * @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around 208 * timer expiry callback execution and when trying to 209 * delete a running timer and it wasn't successful in 210 * the first glance. It prevents priority inversion 211 * when callback was preempted on a remote CPU and a 212 * caller tries to delete the running timer. It also 213 * prevents a life lock, when the task which tries to 214 * delete a timer preempted the softirq thread which 215 * is running the timer callback function. 216 * @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter 217 * waiting for the end of the timer callback function 218 * execution. 219 * @clk: clock of the timer base; is updated before enqueue 220 * of a timer; during expiry, it is 1 offset ahead of 221 * jiffies to avoid endless requeuing to current 222 * jiffies 223 * @next_expiry: expiry value of the first timer; it is updated when 224 * finding the next timer and during enqueue; the 225 * value is not valid, when next_expiry_recalc is set 226 * @cpu: Number of CPU the timer base belongs to 227 * @next_expiry_recalc: States, whether a recalculation of next_expiry is 228 * required. Value is set true, when a timer was 229 * deleted. 230 * @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ 231 * code. This state is only used in standard 232 * base. Deferrable timers, which are enqueued remotely 233 * never wake up an idle CPU. So no matter of supporting it 234 * for this base. 235 * @timers_pending: Is set, when a timer is pending in the base. It is only 236 * reliable when next_expiry_recalc is not set. 237 * @pending_map: bitmap of the timer wheel; each bit reflects a 238 * bucket of the wheel. When a bit is set, at least a 239 * single timer is enqueued in the related bucket. 240 * @vectors: Array of lists; Each array member reflects a bucket 241 * of the timer wheel. The list contains all timers 242 * which are enqueued into a specific bucket. 243 */ 244 struct timer_base { 245 raw_spinlock_t lock; 246 struct timer_list *running_timer; 247 #ifdef CONFIG_PREEMPT_RT 248 spinlock_t expiry_lock; 249 atomic_t timer_waiters; 250 #endif 251 unsigned long clk; 252 unsigned long next_expiry; 253 unsigned int cpu; 254 bool next_expiry_recalc; 255 bool is_idle; 256 bool timers_pending; 257 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 258 struct hlist_head vectors[WHEEL_SIZE]; 259 } ____cacheline_aligned; 260 261 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 262 263 #ifdef CONFIG_NO_HZ_COMMON 264 265 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 266 static DEFINE_MUTEX(timer_keys_mutex); 267 268 static void timer_update_keys(struct work_struct *work); 269 static DECLARE_WORK(timer_update_work, timer_update_keys); 270 271 #ifdef CONFIG_SMP 272 static unsigned int sysctl_timer_migration = 1; 273 274 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 275 276 static void timers_update_migration(void) 277 { 278 if (sysctl_timer_migration && tick_nohz_active) 279 static_branch_enable(&timers_migration_enabled); 280 else 281 static_branch_disable(&timers_migration_enabled); 282 } 283 284 #ifdef CONFIG_SYSCTL 285 static int timer_migration_handler(struct ctl_table *table, int write, 286 void *buffer, size_t *lenp, loff_t *ppos) 287 { 288 int ret; 289 290 mutex_lock(&timer_keys_mutex); 291 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 292 if (!ret && write) 293 timers_update_migration(); 294 mutex_unlock(&timer_keys_mutex); 295 return ret; 296 } 297 298 static struct ctl_table timer_sysctl[] = { 299 { 300 .procname = "timer_migration", 301 .data = &sysctl_timer_migration, 302 .maxlen = sizeof(unsigned int), 303 .mode = 0644, 304 .proc_handler = timer_migration_handler, 305 .extra1 = SYSCTL_ZERO, 306 .extra2 = SYSCTL_ONE, 307 }, 308 {} 309 }; 310 311 static int __init timer_sysctl_init(void) 312 { 313 register_sysctl("kernel", timer_sysctl); 314 return 0; 315 } 316 device_initcall(timer_sysctl_init); 317 #endif /* CONFIG_SYSCTL */ 318 #else /* CONFIG_SMP */ 319 static inline void timers_update_migration(void) { } 320 #endif /* !CONFIG_SMP */ 321 322 static void timer_update_keys(struct work_struct *work) 323 { 324 mutex_lock(&timer_keys_mutex); 325 timers_update_migration(); 326 static_branch_enable(&timers_nohz_active); 327 mutex_unlock(&timer_keys_mutex); 328 } 329 330 void timers_update_nohz(void) 331 { 332 schedule_work(&timer_update_work); 333 } 334 335 static inline bool is_timers_nohz_active(void) 336 { 337 return static_branch_unlikely(&timers_nohz_active); 338 } 339 #else 340 static inline bool is_timers_nohz_active(void) { return false; } 341 #endif /* NO_HZ_COMMON */ 342 343 static unsigned long round_jiffies_common(unsigned long j, int cpu, 344 bool force_up) 345 { 346 int rem; 347 unsigned long original = j; 348 349 /* 350 * We don't want all cpus firing their timers at once hitting the 351 * same lock or cachelines, so we skew each extra cpu with an extra 352 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 353 * already did this. 354 * The skew is done by adding 3*cpunr, then round, then subtract this 355 * extra offset again. 356 */ 357 j += cpu * 3; 358 359 rem = j % HZ; 360 361 /* 362 * If the target jiffie is just after a whole second (which can happen 363 * due to delays of the timer irq, long irq off times etc etc) then 364 * we should round down to the whole second, not up. Use 1/4th second 365 * as cutoff for this rounding as an extreme upper bound for this. 366 * But never round down if @force_up is set. 367 */ 368 if (rem < HZ/4 && !force_up) /* round down */ 369 j = j - rem; 370 else /* round up */ 371 j = j - rem + HZ; 372 373 /* now that we have rounded, subtract the extra skew again */ 374 j -= cpu * 3; 375 376 /* 377 * Make sure j is still in the future. Otherwise return the 378 * unmodified value. 379 */ 380 return time_is_after_jiffies(j) ? j : original; 381 } 382 383 /** 384 * __round_jiffies - function to round jiffies to a full second 385 * @j: the time in (absolute) jiffies that should be rounded 386 * @cpu: the processor number on which the timeout will happen 387 * 388 * __round_jiffies() rounds an absolute time in the future (in jiffies) 389 * up or down to (approximately) full seconds. This is useful for timers 390 * for which the exact time they fire does not matter too much, as long as 391 * they fire approximately every X seconds. 392 * 393 * By rounding these timers to whole seconds, all such timers will fire 394 * at the same time, rather than at various times spread out. The goal 395 * of this is to have the CPU wake up less, which saves power. 396 * 397 * The exact rounding is skewed for each processor to avoid all 398 * processors firing at the exact same time, which could lead 399 * to lock contention or spurious cache line bouncing. 400 * 401 * The return value is the rounded version of the @j parameter. 402 */ 403 unsigned long __round_jiffies(unsigned long j, int cpu) 404 { 405 return round_jiffies_common(j, cpu, false); 406 } 407 EXPORT_SYMBOL_GPL(__round_jiffies); 408 409 /** 410 * __round_jiffies_relative - function to round jiffies to a full second 411 * @j: the time in (relative) jiffies that should be rounded 412 * @cpu: the processor number on which the timeout will happen 413 * 414 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 415 * up or down to (approximately) full seconds. This is useful for timers 416 * for which the exact time they fire does not matter too much, as long as 417 * they fire approximately every X seconds. 418 * 419 * By rounding these timers to whole seconds, all such timers will fire 420 * at the same time, rather than at various times spread out. The goal 421 * of this is to have the CPU wake up less, which saves power. 422 * 423 * The exact rounding is skewed for each processor to avoid all 424 * processors firing at the exact same time, which could lead 425 * to lock contention or spurious cache line bouncing. 426 * 427 * The return value is the rounded version of the @j parameter. 428 */ 429 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 430 { 431 unsigned long j0 = jiffies; 432 433 /* Use j0 because jiffies might change while we run */ 434 return round_jiffies_common(j + j0, cpu, false) - j0; 435 } 436 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 437 438 /** 439 * round_jiffies - function to round jiffies to a full second 440 * @j: the time in (absolute) jiffies that should be rounded 441 * 442 * round_jiffies() rounds an absolute time in the future (in jiffies) 443 * up or down to (approximately) full seconds. This is useful for timers 444 * for which the exact time they fire does not matter too much, as long as 445 * they fire approximately every X seconds. 446 * 447 * By rounding these timers to whole seconds, all such timers will fire 448 * at the same time, rather than at various times spread out. The goal 449 * of this is to have the CPU wake up less, which saves power. 450 * 451 * The return value is the rounded version of the @j parameter. 452 */ 453 unsigned long round_jiffies(unsigned long j) 454 { 455 return round_jiffies_common(j, raw_smp_processor_id(), false); 456 } 457 EXPORT_SYMBOL_GPL(round_jiffies); 458 459 /** 460 * round_jiffies_relative - function to round jiffies to a full second 461 * @j: the time in (relative) jiffies that should be rounded 462 * 463 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 464 * up or down to (approximately) full seconds. This is useful for timers 465 * for which the exact time they fire does not matter too much, as long as 466 * they fire approximately every X seconds. 467 * 468 * By rounding these timers to whole seconds, all such timers will fire 469 * at the same time, rather than at various times spread out. The goal 470 * of this is to have the CPU wake up less, which saves power. 471 * 472 * The return value is the rounded version of the @j parameter. 473 */ 474 unsigned long round_jiffies_relative(unsigned long j) 475 { 476 return __round_jiffies_relative(j, raw_smp_processor_id()); 477 } 478 EXPORT_SYMBOL_GPL(round_jiffies_relative); 479 480 /** 481 * __round_jiffies_up - function to round jiffies up to a full second 482 * @j: the time in (absolute) jiffies that should be rounded 483 * @cpu: the processor number on which the timeout will happen 484 * 485 * This is the same as __round_jiffies() except that it will never 486 * round down. This is useful for timeouts for which the exact time 487 * of firing does not matter too much, as long as they don't fire too 488 * early. 489 */ 490 unsigned long __round_jiffies_up(unsigned long j, int cpu) 491 { 492 return round_jiffies_common(j, cpu, true); 493 } 494 EXPORT_SYMBOL_GPL(__round_jiffies_up); 495 496 /** 497 * __round_jiffies_up_relative - function to round jiffies up to a full second 498 * @j: the time in (relative) jiffies that should be rounded 499 * @cpu: the processor number on which the timeout will happen 500 * 501 * This is the same as __round_jiffies_relative() except that it will never 502 * round down. This is useful for timeouts for which the exact time 503 * of firing does not matter too much, as long as they don't fire too 504 * early. 505 */ 506 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 507 { 508 unsigned long j0 = jiffies; 509 510 /* Use j0 because jiffies might change while we run */ 511 return round_jiffies_common(j + j0, cpu, true) - j0; 512 } 513 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 514 515 /** 516 * round_jiffies_up - function to round jiffies up to a full second 517 * @j: the time in (absolute) jiffies that should be rounded 518 * 519 * This is the same as round_jiffies() except that it will never 520 * round down. This is useful for timeouts for which the exact time 521 * of firing does not matter too much, as long as they don't fire too 522 * early. 523 */ 524 unsigned long round_jiffies_up(unsigned long j) 525 { 526 return round_jiffies_common(j, raw_smp_processor_id(), true); 527 } 528 EXPORT_SYMBOL_GPL(round_jiffies_up); 529 530 /** 531 * round_jiffies_up_relative - function to round jiffies up to a full second 532 * @j: the time in (relative) jiffies that should be rounded 533 * 534 * This is the same as round_jiffies_relative() except that it will never 535 * round down. This is useful for timeouts for which the exact time 536 * of firing does not matter too much, as long as they don't fire too 537 * early. 538 */ 539 unsigned long round_jiffies_up_relative(unsigned long j) 540 { 541 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 542 } 543 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 544 545 546 static inline unsigned int timer_get_idx(struct timer_list *timer) 547 { 548 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 549 } 550 551 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 552 { 553 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 554 idx << TIMER_ARRAYSHIFT; 555 } 556 557 /* 558 * Helper function to calculate the array index for a given expiry 559 * time. 560 */ 561 static inline unsigned calc_index(unsigned long expires, unsigned lvl, 562 unsigned long *bucket_expiry) 563 { 564 565 /* 566 * The timer wheel has to guarantee that a timer does not fire 567 * early. Early expiry can happen due to: 568 * - Timer is armed at the edge of a tick 569 * - Truncation of the expiry time in the outer wheel levels 570 * 571 * Round up with level granularity to prevent this. 572 */ 573 expires = (expires >> LVL_SHIFT(lvl)) + 1; 574 *bucket_expiry = expires << LVL_SHIFT(lvl); 575 return LVL_OFFS(lvl) + (expires & LVL_MASK); 576 } 577 578 static int calc_wheel_index(unsigned long expires, unsigned long clk, 579 unsigned long *bucket_expiry) 580 { 581 unsigned long delta = expires - clk; 582 unsigned int idx; 583 584 if (delta < LVL_START(1)) { 585 idx = calc_index(expires, 0, bucket_expiry); 586 } else if (delta < LVL_START(2)) { 587 idx = calc_index(expires, 1, bucket_expiry); 588 } else if (delta < LVL_START(3)) { 589 idx = calc_index(expires, 2, bucket_expiry); 590 } else if (delta < LVL_START(4)) { 591 idx = calc_index(expires, 3, bucket_expiry); 592 } else if (delta < LVL_START(5)) { 593 idx = calc_index(expires, 4, bucket_expiry); 594 } else if (delta < LVL_START(6)) { 595 idx = calc_index(expires, 5, bucket_expiry); 596 } else if (delta < LVL_START(7)) { 597 idx = calc_index(expires, 6, bucket_expiry); 598 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 599 idx = calc_index(expires, 7, bucket_expiry); 600 } else if ((long) delta < 0) { 601 idx = clk & LVL_MASK; 602 *bucket_expiry = clk; 603 } else { 604 /* 605 * Force expire obscene large timeouts to expire at the 606 * capacity limit of the wheel. 607 */ 608 if (delta >= WHEEL_TIMEOUT_CUTOFF) 609 expires = clk + WHEEL_TIMEOUT_MAX; 610 611 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); 612 } 613 return idx; 614 } 615 616 static void 617 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 618 { 619 /* 620 * Deferrable timers do not prevent the CPU from entering dynticks and 621 * are not taken into account on the idle/nohz_full path. An IPI when a 622 * new deferrable timer is enqueued will wake up the remote CPU but 623 * nothing will be done with the deferrable timer base. Therefore skip 624 * the remote IPI for deferrable timers completely. 625 */ 626 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE) 627 return; 628 629 /* 630 * We might have to IPI the remote CPU if the base is idle and the 631 * timer is not deferrable. If the other CPU is on the way to idle 632 * then it can't set base->is_idle as we hold the base lock: 633 */ 634 if (base->is_idle) 635 wake_up_nohz_cpu(base->cpu); 636 } 637 638 /* 639 * Enqueue the timer into the hash bucket, mark it pending in 640 * the bitmap, store the index in the timer flags then wake up 641 * the target CPU if needed. 642 */ 643 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 644 unsigned int idx, unsigned long bucket_expiry) 645 { 646 647 hlist_add_head(&timer->entry, base->vectors + idx); 648 __set_bit(idx, base->pending_map); 649 timer_set_idx(timer, idx); 650 651 trace_timer_start(timer, bucket_expiry); 652 653 /* 654 * Check whether this is the new first expiring timer. The 655 * effective expiry time of the timer is required here 656 * (bucket_expiry) instead of timer->expires. 657 */ 658 if (time_before(bucket_expiry, base->next_expiry)) { 659 /* 660 * Set the next expiry time and kick the CPU so it 661 * can reevaluate the wheel: 662 */ 663 base->next_expiry = bucket_expiry; 664 base->timers_pending = true; 665 base->next_expiry_recalc = false; 666 trigger_dyntick_cpu(base, timer); 667 } 668 } 669 670 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) 671 { 672 unsigned long bucket_expiry; 673 unsigned int idx; 674 675 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); 676 enqueue_timer(base, timer, idx, bucket_expiry); 677 } 678 679 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 680 681 static const struct debug_obj_descr timer_debug_descr; 682 683 struct timer_hint { 684 void (*function)(struct timer_list *t); 685 long offset; 686 }; 687 688 #define TIMER_HINT(fn, container, timr, hintfn) \ 689 { \ 690 .function = fn, \ 691 .offset = offsetof(container, hintfn) - \ 692 offsetof(container, timr) \ 693 } 694 695 static const struct timer_hint timer_hints[] = { 696 TIMER_HINT(delayed_work_timer_fn, 697 struct delayed_work, timer, work.func), 698 TIMER_HINT(kthread_delayed_work_timer_fn, 699 struct kthread_delayed_work, timer, work.func), 700 }; 701 702 static void *timer_debug_hint(void *addr) 703 { 704 struct timer_list *timer = addr; 705 int i; 706 707 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) { 708 if (timer_hints[i].function == timer->function) { 709 void (**fn)(void) = addr + timer_hints[i].offset; 710 711 return *fn; 712 } 713 } 714 715 return timer->function; 716 } 717 718 static bool timer_is_static_object(void *addr) 719 { 720 struct timer_list *timer = addr; 721 722 return (timer->entry.pprev == NULL && 723 timer->entry.next == TIMER_ENTRY_STATIC); 724 } 725 726 /* 727 * fixup_init is called when: 728 * - an active object is initialized 729 */ 730 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 731 { 732 struct timer_list *timer = addr; 733 734 switch (state) { 735 case ODEBUG_STATE_ACTIVE: 736 del_timer_sync(timer); 737 debug_object_init(timer, &timer_debug_descr); 738 return true; 739 default: 740 return false; 741 } 742 } 743 744 /* Stub timer callback for improperly used timers. */ 745 static void stub_timer(struct timer_list *unused) 746 { 747 WARN_ON(1); 748 } 749 750 /* 751 * fixup_activate is called when: 752 * - an active object is activated 753 * - an unknown non-static object is activated 754 */ 755 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 756 { 757 struct timer_list *timer = addr; 758 759 switch (state) { 760 case ODEBUG_STATE_NOTAVAILABLE: 761 timer_setup(timer, stub_timer, 0); 762 return true; 763 764 case ODEBUG_STATE_ACTIVE: 765 WARN_ON(1); 766 fallthrough; 767 default: 768 return false; 769 } 770 } 771 772 /* 773 * fixup_free is called when: 774 * - an active object is freed 775 */ 776 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 777 { 778 struct timer_list *timer = addr; 779 780 switch (state) { 781 case ODEBUG_STATE_ACTIVE: 782 del_timer_sync(timer); 783 debug_object_free(timer, &timer_debug_descr); 784 return true; 785 default: 786 return false; 787 } 788 } 789 790 /* 791 * fixup_assert_init is called when: 792 * - an untracked/uninit-ed object is found 793 */ 794 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 795 { 796 struct timer_list *timer = addr; 797 798 switch (state) { 799 case ODEBUG_STATE_NOTAVAILABLE: 800 timer_setup(timer, stub_timer, 0); 801 return true; 802 default: 803 return false; 804 } 805 } 806 807 static const struct debug_obj_descr timer_debug_descr = { 808 .name = "timer_list", 809 .debug_hint = timer_debug_hint, 810 .is_static_object = timer_is_static_object, 811 .fixup_init = timer_fixup_init, 812 .fixup_activate = timer_fixup_activate, 813 .fixup_free = timer_fixup_free, 814 .fixup_assert_init = timer_fixup_assert_init, 815 }; 816 817 static inline void debug_timer_init(struct timer_list *timer) 818 { 819 debug_object_init(timer, &timer_debug_descr); 820 } 821 822 static inline void debug_timer_activate(struct timer_list *timer) 823 { 824 debug_object_activate(timer, &timer_debug_descr); 825 } 826 827 static inline void debug_timer_deactivate(struct timer_list *timer) 828 { 829 debug_object_deactivate(timer, &timer_debug_descr); 830 } 831 832 static inline void debug_timer_assert_init(struct timer_list *timer) 833 { 834 debug_object_assert_init(timer, &timer_debug_descr); 835 } 836 837 static void do_init_timer(struct timer_list *timer, 838 void (*func)(struct timer_list *), 839 unsigned int flags, 840 const char *name, struct lock_class_key *key); 841 842 void init_timer_on_stack_key(struct timer_list *timer, 843 void (*func)(struct timer_list *), 844 unsigned int flags, 845 const char *name, struct lock_class_key *key) 846 { 847 debug_object_init_on_stack(timer, &timer_debug_descr); 848 do_init_timer(timer, func, flags, name, key); 849 } 850 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 851 852 void destroy_timer_on_stack(struct timer_list *timer) 853 { 854 debug_object_free(timer, &timer_debug_descr); 855 } 856 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 857 858 #else 859 static inline void debug_timer_init(struct timer_list *timer) { } 860 static inline void debug_timer_activate(struct timer_list *timer) { } 861 static inline void debug_timer_deactivate(struct timer_list *timer) { } 862 static inline void debug_timer_assert_init(struct timer_list *timer) { } 863 #endif 864 865 static inline void debug_init(struct timer_list *timer) 866 { 867 debug_timer_init(timer); 868 trace_timer_init(timer); 869 } 870 871 static inline void debug_deactivate(struct timer_list *timer) 872 { 873 debug_timer_deactivate(timer); 874 trace_timer_cancel(timer); 875 } 876 877 static inline void debug_assert_init(struct timer_list *timer) 878 { 879 debug_timer_assert_init(timer); 880 } 881 882 static void do_init_timer(struct timer_list *timer, 883 void (*func)(struct timer_list *), 884 unsigned int flags, 885 const char *name, struct lock_class_key *key) 886 { 887 timer->entry.pprev = NULL; 888 timer->function = func; 889 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS)) 890 flags &= TIMER_INIT_FLAGS; 891 timer->flags = flags | raw_smp_processor_id(); 892 lockdep_init_map(&timer->lockdep_map, name, key, 0); 893 } 894 895 /** 896 * init_timer_key - initialize a timer 897 * @timer: the timer to be initialized 898 * @func: timer callback function 899 * @flags: timer flags 900 * @name: name of the timer 901 * @key: lockdep class key of the fake lock used for tracking timer 902 * sync lock dependencies 903 * 904 * init_timer_key() must be done to a timer prior calling *any* of the 905 * other timer functions. 906 */ 907 void init_timer_key(struct timer_list *timer, 908 void (*func)(struct timer_list *), unsigned int flags, 909 const char *name, struct lock_class_key *key) 910 { 911 debug_init(timer); 912 do_init_timer(timer, func, flags, name, key); 913 } 914 EXPORT_SYMBOL(init_timer_key); 915 916 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 917 { 918 struct hlist_node *entry = &timer->entry; 919 920 debug_deactivate(timer); 921 922 __hlist_del(entry); 923 if (clear_pending) 924 entry->pprev = NULL; 925 entry->next = LIST_POISON2; 926 } 927 928 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 929 bool clear_pending) 930 { 931 unsigned idx = timer_get_idx(timer); 932 933 if (!timer_pending(timer)) 934 return 0; 935 936 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { 937 __clear_bit(idx, base->pending_map); 938 base->next_expiry_recalc = true; 939 } 940 941 detach_timer(timer, clear_pending); 942 return 1; 943 } 944 945 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 946 { 947 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 948 949 /* 950 * If the timer is deferrable and NO_HZ_COMMON is set then we need 951 * to use the deferrable base. 952 */ 953 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 954 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 955 return base; 956 } 957 958 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 959 { 960 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 961 962 /* 963 * If the timer is deferrable and NO_HZ_COMMON is set then we need 964 * to use the deferrable base. 965 */ 966 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 967 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 968 return base; 969 } 970 971 static inline struct timer_base *get_timer_base(u32 tflags) 972 { 973 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 974 } 975 976 static inline struct timer_base * 977 get_target_base(struct timer_base *base, unsigned tflags) 978 { 979 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 980 if (static_branch_likely(&timers_migration_enabled) && 981 !(tflags & TIMER_PINNED)) 982 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 983 #endif 984 return get_timer_this_cpu_base(tflags); 985 } 986 987 static inline void __forward_timer_base(struct timer_base *base, 988 unsigned long basej) 989 { 990 /* 991 * Check whether we can forward the base. We can only do that when 992 * @basej is past base->clk otherwise we might rewind base->clk. 993 */ 994 if (time_before_eq(basej, base->clk)) 995 return; 996 997 /* 998 * If the next expiry value is > jiffies, then we fast forward to 999 * jiffies otherwise we forward to the next expiry value. 1000 */ 1001 if (time_after(base->next_expiry, basej)) { 1002 base->clk = basej; 1003 } else { 1004 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 1005 return; 1006 base->clk = base->next_expiry; 1007 } 1008 1009 } 1010 1011 static inline void forward_timer_base(struct timer_base *base) 1012 { 1013 __forward_timer_base(base, READ_ONCE(jiffies)); 1014 } 1015 1016 /* 1017 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 1018 * that all timers which are tied to this base are locked, and the base itself 1019 * is locked too. 1020 * 1021 * So __run_timers/migrate_timers can safely modify all timers which could 1022 * be found in the base->vectors array. 1023 * 1024 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 1025 * to wait until the migration is done. 1026 */ 1027 static struct timer_base *lock_timer_base(struct timer_list *timer, 1028 unsigned long *flags) 1029 __acquires(timer->base->lock) 1030 { 1031 for (;;) { 1032 struct timer_base *base; 1033 u32 tf; 1034 1035 /* 1036 * We need to use READ_ONCE() here, otherwise the compiler 1037 * might re-read @tf between the check for TIMER_MIGRATING 1038 * and spin_lock(). 1039 */ 1040 tf = READ_ONCE(timer->flags); 1041 1042 if (!(tf & TIMER_MIGRATING)) { 1043 base = get_timer_base(tf); 1044 raw_spin_lock_irqsave(&base->lock, *flags); 1045 if (timer->flags == tf) 1046 return base; 1047 raw_spin_unlock_irqrestore(&base->lock, *flags); 1048 } 1049 cpu_relax(); 1050 } 1051 } 1052 1053 #define MOD_TIMER_PENDING_ONLY 0x01 1054 #define MOD_TIMER_REDUCE 0x02 1055 #define MOD_TIMER_NOTPENDING 0x04 1056 1057 static inline int 1058 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 1059 { 1060 unsigned long clk = 0, flags, bucket_expiry; 1061 struct timer_base *base, *new_base; 1062 unsigned int idx = UINT_MAX; 1063 int ret = 0; 1064 1065 debug_assert_init(timer); 1066 1067 /* 1068 * This is a common optimization triggered by the networking code - if 1069 * the timer is re-modified to have the same timeout or ends up in the 1070 * same array bucket then just return: 1071 */ 1072 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 1073 /* 1074 * The downside of this optimization is that it can result in 1075 * larger granularity than you would get from adding a new 1076 * timer with this expiry. 1077 */ 1078 long diff = timer->expires - expires; 1079 1080 if (!diff) 1081 return 1; 1082 if (options & MOD_TIMER_REDUCE && diff <= 0) 1083 return 1; 1084 1085 /* 1086 * We lock timer base and calculate the bucket index right 1087 * here. If the timer ends up in the same bucket, then we 1088 * just update the expiry time and avoid the whole 1089 * dequeue/enqueue dance. 1090 */ 1091 base = lock_timer_base(timer, &flags); 1092 /* 1093 * Has @timer been shutdown? This needs to be evaluated 1094 * while holding base lock to prevent a race against the 1095 * shutdown code. 1096 */ 1097 if (!timer->function) 1098 goto out_unlock; 1099 1100 forward_timer_base(base); 1101 1102 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 1103 time_before_eq(timer->expires, expires)) { 1104 ret = 1; 1105 goto out_unlock; 1106 } 1107 1108 clk = base->clk; 1109 idx = calc_wheel_index(expires, clk, &bucket_expiry); 1110 1111 /* 1112 * Retrieve and compare the array index of the pending 1113 * timer. If it matches set the expiry to the new value so a 1114 * subsequent call will exit in the expires check above. 1115 */ 1116 if (idx == timer_get_idx(timer)) { 1117 if (!(options & MOD_TIMER_REDUCE)) 1118 timer->expires = expires; 1119 else if (time_after(timer->expires, expires)) 1120 timer->expires = expires; 1121 ret = 1; 1122 goto out_unlock; 1123 } 1124 } else { 1125 base = lock_timer_base(timer, &flags); 1126 /* 1127 * Has @timer been shutdown? This needs to be evaluated 1128 * while holding base lock to prevent a race against the 1129 * shutdown code. 1130 */ 1131 if (!timer->function) 1132 goto out_unlock; 1133 1134 forward_timer_base(base); 1135 } 1136 1137 ret = detach_if_pending(timer, base, false); 1138 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1139 goto out_unlock; 1140 1141 new_base = get_target_base(base, timer->flags); 1142 1143 if (base != new_base) { 1144 /* 1145 * We are trying to schedule the timer on the new base. 1146 * However we can't change timer's base while it is running, 1147 * otherwise timer_delete_sync() can't detect that the timer's 1148 * handler yet has not finished. This also guarantees that the 1149 * timer is serialized wrt itself. 1150 */ 1151 if (likely(base->running_timer != timer)) { 1152 /* See the comment in lock_timer_base() */ 1153 timer->flags |= TIMER_MIGRATING; 1154 1155 raw_spin_unlock(&base->lock); 1156 base = new_base; 1157 raw_spin_lock(&base->lock); 1158 WRITE_ONCE(timer->flags, 1159 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1160 forward_timer_base(base); 1161 } 1162 } 1163 1164 debug_timer_activate(timer); 1165 1166 timer->expires = expires; 1167 /* 1168 * If 'idx' was calculated above and the base time did not advance 1169 * between calculating 'idx' and possibly switching the base, only 1170 * enqueue_timer() is required. Otherwise we need to (re)calculate 1171 * the wheel index via internal_add_timer(). 1172 */ 1173 if (idx != UINT_MAX && clk == base->clk) 1174 enqueue_timer(base, timer, idx, bucket_expiry); 1175 else 1176 internal_add_timer(base, timer); 1177 1178 out_unlock: 1179 raw_spin_unlock_irqrestore(&base->lock, flags); 1180 1181 return ret; 1182 } 1183 1184 /** 1185 * mod_timer_pending - Modify a pending timer's timeout 1186 * @timer: The pending timer to be modified 1187 * @expires: New absolute timeout in jiffies 1188 * 1189 * mod_timer_pending() is the same for pending timers as mod_timer(), but 1190 * will not activate inactive timers. 1191 * 1192 * If @timer->function == NULL then the start operation is silently 1193 * discarded. 1194 * 1195 * Return: 1196 * * %0 - The timer was inactive and not modified or was in 1197 * shutdown state and the operation was discarded 1198 * * %1 - The timer was active and requeued to expire at @expires 1199 */ 1200 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1201 { 1202 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1203 } 1204 EXPORT_SYMBOL(mod_timer_pending); 1205 1206 /** 1207 * mod_timer - Modify a timer's timeout 1208 * @timer: The timer to be modified 1209 * @expires: New absolute timeout in jiffies 1210 * 1211 * mod_timer(timer, expires) is equivalent to: 1212 * 1213 * del_timer(timer); timer->expires = expires; add_timer(timer); 1214 * 1215 * mod_timer() is more efficient than the above open coded sequence. In 1216 * case that the timer is inactive, the del_timer() part is a NOP. The 1217 * timer is in any case activated with the new expiry time @expires. 1218 * 1219 * Note that if there are multiple unserialized concurrent users of the 1220 * same timer, then mod_timer() is the only safe way to modify the timeout, 1221 * since add_timer() cannot modify an already running timer. 1222 * 1223 * If @timer->function == NULL then the start operation is silently 1224 * discarded. In this case the return value is 0 and meaningless. 1225 * 1226 * Return: 1227 * * %0 - The timer was inactive and started or was in shutdown 1228 * state and the operation was discarded 1229 * * %1 - The timer was active and requeued to expire at @expires or 1230 * the timer was active and not modified because @expires did 1231 * not change the effective expiry time 1232 */ 1233 int mod_timer(struct timer_list *timer, unsigned long expires) 1234 { 1235 return __mod_timer(timer, expires, 0); 1236 } 1237 EXPORT_SYMBOL(mod_timer); 1238 1239 /** 1240 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1241 * @timer: The timer to be modified 1242 * @expires: New absolute timeout in jiffies 1243 * 1244 * timer_reduce() is very similar to mod_timer(), except that it will only 1245 * modify an enqueued timer if that would reduce the expiration time. If 1246 * @timer is not enqueued it starts the timer. 1247 * 1248 * If @timer->function == NULL then the start operation is silently 1249 * discarded. 1250 * 1251 * Return: 1252 * * %0 - The timer was inactive and started or was in shutdown 1253 * state and the operation was discarded 1254 * * %1 - The timer was active and requeued to expire at @expires or 1255 * the timer was active and not modified because @expires 1256 * did not change the effective expiry time such that the 1257 * timer would expire earlier than already scheduled 1258 */ 1259 int timer_reduce(struct timer_list *timer, unsigned long expires) 1260 { 1261 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1262 } 1263 EXPORT_SYMBOL(timer_reduce); 1264 1265 /** 1266 * add_timer - Start a timer 1267 * @timer: The timer to be started 1268 * 1269 * Start @timer to expire at @timer->expires in the future. @timer->expires 1270 * is the absolute expiry time measured in 'jiffies'. When the timer expires 1271 * timer->function(timer) will be invoked from soft interrupt context. 1272 * 1273 * The @timer->expires and @timer->function fields must be set prior 1274 * to calling this function. 1275 * 1276 * If @timer->function == NULL then the start operation is silently 1277 * discarded. 1278 * 1279 * If @timer->expires is already in the past @timer will be queued to 1280 * expire at the next timer tick. 1281 * 1282 * This can only operate on an inactive timer. Attempts to invoke this on 1283 * an active timer are rejected with a warning. 1284 */ 1285 void add_timer(struct timer_list *timer) 1286 { 1287 if (WARN_ON_ONCE(timer_pending(timer))) 1288 return; 1289 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1290 } 1291 EXPORT_SYMBOL(add_timer); 1292 1293 /** 1294 * add_timer_on - Start a timer on a particular CPU 1295 * @timer: The timer to be started 1296 * @cpu: The CPU to start it on 1297 * 1298 * Same as add_timer() except that it starts the timer on the given CPU. 1299 * 1300 * See add_timer() for further details. 1301 */ 1302 void add_timer_on(struct timer_list *timer, int cpu) 1303 { 1304 struct timer_base *new_base, *base; 1305 unsigned long flags; 1306 1307 debug_assert_init(timer); 1308 1309 if (WARN_ON_ONCE(timer_pending(timer))) 1310 return; 1311 1312 new_base = get_timer_cpu_base(timer->flags, cpu); 1313 1314 /* 1315 * If @timer was on a different CPU, it should be migrated with the 1316 * old base locked to prevent other operations proceeding with the 1317 * wrong base locked. See lock_timer_base(). 1318 */ 1319 base = lock_timer_base(timer, &flags); 1320 /* 1321 * Has @timer been shutdown? This needs to be evaluated while 1322 * holding base lock to prevent a race against the shutdown code. 1323 */ 1324 if (!timer->function) 1325 goto out_unlock; 1326 1327 if (base != new_base) { 1328 timer->flags |= TIMER_MIGRATING; 1329 1330 raw_spin_unlock(&base->lock); 1331 base = new_base; 1332 raw_spin_lock(&base->lock); 1333 WRITE_ONCE(timer->flags, 1334 (timer->flags & ~TIMER_BASEMASK) | cpu); 1335 } 1336 forward_timer_base(base); 1337 1338 debug_timer_activate(timer); 1339 internal_add_timer(base, timer); 1340 out_unlock: 1341 raw_spin_unlock_irqrestore(&base->lock, flags); 1342 } 1343 EXPORT_SYMBOL_GPL(add_timer_on); 1344 1345 /** 1346 * __timer_delete - Internal function: Deactivate a timer 1347 * @timer: The timer to be deactivated 1348 * @shutdown: If true, this indicates that the timer is about to be 1349 * shutdown permanently. 1350 * 1351 * If @shutdown is true then @timer->function is set to NULL under the 1352 * timer base lock which prevents further rearming of the time. In that 1353 * case any attempt to rearm @timer after this function returns will be 1354 * silently ignored. 1355 * 1356 * Return: 1357 * * %0 - The timer was not pending 1358 * * %1 - The timer was pending and deactivated 1359 */ 1360 static int __timer_delete(struct timer_list *timer, bool shutdown) 1361 { 1362 struct timer_base *base; 1363 unsigned long flags; 1364 int ret = 0; 1365 1366 debug_assert_init(timer); 1367 1368 /* 1369 * If @shutdown is set then the lock has to be taken whether the 1370 * timer is pending or not to protect against a concurrent rearm 1371 * which might hit between the lockless pending check and the lock 1372 * aquisition. By taking the lock it is ensured that such a newly 1373 * enqueued timer is dequeued and cannot end up with 1374 * timer->function == NULL in the expiry code. 1375 * 1376 * If timer->function is currently executed, then this makes sure 1377 * that the callback cannot requeue the timer. 1378 */ 1379 if (timer_pending(timer) || shutdown) { 1380 base = lock_timer_base(timer, &flags); 1381 ret = detach_if_pending(timer, base, true); 1382 if (shutdown) 1383 timer->function = NULL; 1384 raw_spin_unlock_irqrestore(&base->lock, flags); 1385 } 1386 1387 return ret; 1388 } 1389 1390 /** 1391 * timer_delete - Deactivate a timer 1392 * @timer: The timer to be deactivated 1393 * 1394 * The function only deactivates a pending timer, but contrary to 1395 * timer_delete_sync() it does not take into account whether the timer's 1396 * callback function is concurrently executed on a different CPU or not. 1397 * It neither prevents rearming of the timer. If @timer can be rearmed 1398 * concurrently then the return value of this function is meaningless. 1399 * 1400 * Return: 1401 * * %0 - The timer was not pending 1402 * * %1 - The timer was pending and deactivated 1403 */ 1404 int timer_delete(struct timer_list *timer) 1405 { 1406 return __timer_delete(timer, false); 1407 } 1408 EXPORT_SYMBOL(timer_delete); 1409 1410 /** 1411 * timer_shutdown - Deactivate a timer and prevent rearming 1412 * @timer: The timer to be deactivated 1413 * 1414 * The function does not wait for an eventually running timer callback on a 1415 * different CPU but it prevents rearming of the timer. Any attempt to arm 1416 * @timer after this function returns will be silently ignored. 1417 * 1418 * This function is useful for teardown code and should only be used when 1419 * timer_shutdown_sync() cannot be invoked due to locking or context constraints. 1420 * 1421 * Return: 1422 * * %0 - The timer was not pending 1423 * * %1 - The timer was pending 1424 */ 1425 int timer_shutdown(struct timer_list *timer) 1426 { 1427 return __timer_delete(timer, true); 1428 } 1429 EXPORT_SYMBOL_GPL(timer_shutdown); 1430 1431 /** 1432 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer 1433 * @timer: Timer to deactivate 1434 * @shutdown: If true, this indicates that the timer is about to be 1435 * shutdown permanently. 1436 * 1437 * If @shutdown is true then @timer->function is set to NULL under the 1438 * timer base lock which prevents further rearming of the timer. Any 1439 * attempt to rearm @timer after this function returns will be silently 1440 * ignored. 1441 * 1442 * This function cannot guarantee that the timer cannot be rearmed 1443 * right after dropping the base lock if @shutdown is false. That 1444 * needs to be prevented by the calling code if necessary. 1445 * 1446 * Return: 1447 * * %0 - The timer was not pending 1448 * * %1 - The timer was pending and deactivated 1449 * * %-1 - The timer callback function is running on a different CPU 1450 */ 1451 static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown) 1452 { 1453 struct timer_base *base; 1454 unsigned long flags; 1455 int ret = -1; 1456 1457 debug_assert_init(timer); 1458 1459 base = lock_timer_base(timer, &flags); 1460 1461 if (base->running_timer != timer) 1462 ret = detach_if_pending(timer, base, true); 1463 if (shutdown) 1464 timer->function = NULL; 1465 1466 raw_spin_unlock_irqrestore(&base->lock, flags); 1467 1468 return ret; 1469 } 1470 1471 /** 1472 * try_to_del_timer_sync - Try to deactivate a timer 1473 * @timer: Timer to deactivate 1474 * 1475 * This function tries to deactivate a timer. On success the timer is not 1476 * queued and the timer callback function is not running on any CPU. 1477 * 1478 * This function does not guarantee that the timer cannot be rearmed right 1479 * after dropping the base lock. That needs to be prevented by the calling 1480 * code if necessary. 1481 * 1482 * Return: 1483 * * %0 - The timer was not pending 1484 * * %1 - The timer was pending and deactivated 1485 * * %-1 - The timer callback function is running on a different CPU 1486 */ 1487 int try_to_del_timer_sync(struct timer_list *timer) 1488 { 1489 return __try_to_del_timer_sync(timer, false); 1490 } 1491 EXPORT_SYMBOL(try_to_del_timer_sync); 1492 1493 #ifdef CONFIG_PREEMPT_RT 1494 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1495 { 1496 spin_lock_init(&base->expiry_lock); 1497 } 1498 1499 static inline void timer_base_lock_expiry(struct timer_base *base) 1500 { 1501 spin_lock(&base->expiry_lock); 1502 } 1503 1504 static inline void timer_base_unlock_expiry(struct timer_base *base) 1505 { 1506 spin_unlock(&base->expiry_lock); 1507 } 1508 1509 /* 1510 * The counterpart to del_timer_wait_running(). 1511 * 1512 * If there is a waiter for base->expiry_lock, then it was waiting for the 1513 * timer callback to finish. Drop expiry_lock and reacquire it. That allows 1514 * the waiter to acquire the lock and make progress. 1515 */ 1516 static void timer_sync_wait_running(struct timer_base *base) 1517 { 1518 if (atomic_read(&base->timer_waiters)) { 1519 raw_spin_unlock_irq(&base->lock); 1520 spin_unlock(&base->expiry_lock); 1521 spin_lock(&base->expiry_lock); 1522 raw_spin_lock_irq(&base->lock); 1523 } 1524 } 1525 1526 /* 1527 * This function is called on PREEMPT_RT kernels when the fast path 1528 * deletion of a timer failed because the timer callback function was 1529 * running. 1530 * 1531 * This prevents priority inversion, if the softirq thread on a remote CPU 1532 * got preempted, and it prevents a life lock when the task which tries to 1533 * delete a timer preempted the softirq thread running the timer callback 1534 * function. 1535 */ 1536 static void del_timer_wait_running(struct timer_list *timer) 1537 { 1538 u32 tf; 1539 1540 tf = READ_ONCE(timer->flags); 1541 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) { 1542 struct timer_base *base = get_timer_base(tf); 1543 1544 /* 1545 * Mark the base as contended and grab the expiry lock, 1546 * which is held by the softirq across the timer 1547 * callback. Drop the lock immediately so the softirq can 1548 * expire the next timer. In theory the timer could already 1549 * be running again, but that's more than unlikely and just 1550 * causes another wait loop. 1551 */ 1552 atomic_inc(&base->timer_waiters); 1553 spin_lock_bh(&base->expiry_lock); 1554 atomic_dec(&base->timer_waiters); 1555 spin_unlock_bh(&base->expiry_lock); 1556 } 1557 } 1558 #else 1559 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1560 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1561 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1562 static inline void timer_sync_wait_running(struct timer_base *base) { } 1563 static inline void del_timer_wait_running(struct timer_list *timer) { } 1564 #endif 1565 1566 /** 1567 * __timer_delete_sync - Internal function: Deactivate a timer and wait 1568 * for the handler to finish. 1569 * @timer: The timer to be deactivated 1570 * @shutdown: If true, @timer->function will be set to NULL under the 1571 * timer base lock which prevents rearming of @timer 1572 * 1573 * If @shutdown is not set the timer can be rearmed later. If the timer can 1574 * be rearmed concurrently, i.e. after dropping the base lock then the 1575 * return value is meaningless. 1576 * 1577 * If @shutdown is set then @timer->function is set to NULL under timer 1578 * base lock which prevents rearming of the timer. Any attempt to rearm 1579 * a shutdown timer is silently ignored. 1580 * 1581 * If the timer should be reused after shutdown it has to be initialized 1582 * again. 1583 * 1584 * Return: 1585 * * %0 - The timer was not pending 1586 * * %1 - The timer was pending and deactivated 1587 */ 1588 static int __timer_delete_sync(struct timer_list *timer, bool shutdown) 1589 { 1590 int ret; 1591 1592 #ifdef CONFIG_LOCKDEP 1593 unsigned long flags; 1594 1595 /* 1596 * If lockdep gives a backtrace here, please reference 1597 * the synchronization rules above. 1598 */ 1599 local_irq_save(flags); 1600 lock_map_acquire(&timer->lockdep_map); 1601 lock_map_release(&timer->lockdep_map); 1602 local_irq_restore(flags); 1603 #endif 1604 /* 1605 * don't use it in hardirq context, because it 1606 * could lead to deadlock. 1607 */ 1608 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); 1609 1610 /* 1611 * Must be able to sleep on PREEMPT_RT because of the slowpath in 1612 * del_timer_wait_running(). 1613 */ 1614 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) 1615 lockdep_assert_preemption_enabled(); 1616 1617 do { 1618 ret = __try_to_del_timer_sync(timer, shutdown); 1619 1620 if (unlikely(ret < 0)) { 1621 del_timer_wait_running(timer); 1622 cpu_relax(); 1623 } 1624 } while (ret < 0); 1625 1626 return ret; 1627 } 1628 1629 /** 1630 * timer_delete_sync - Deactivate a timer and wait for the handler to finish. 1631 * @timer: The timer to be deactivated 1632 * 1633 * Synchronization rules: Callers must prevent restarting of the timer, 1634 * otherwise this function is meaningless. It must not be called from 1635 * interrupt contexts unless the timer is an irqsafe one. The caller must 1636 * not hold locks which would prevent completion of the timer's callback 1637 * function. The timer's handler must not call add_timer_on(). Upon exit 1638 * the timer is not queued and the handler is not running on any CPU. 1639 * 1640 * For !irqsafe timers, the caller must not hold locks that are held in 1641 * interrupt context. Even if the lock has nothing to do with the timer in 1642 * question. Here's why:: 1643 * 1644 * CPU0 CPU1 1645 * ---- ---- 1646 * <SOFTIRQ> 1647 * call_timer_fn(); 1648 * base->running_timer = mytimer; 1649 * spin_lock_irq(somelock); 1650 * <IRQ> 1651 * spin_lock(somelock); 1652 * timer_delete_sync(mytimer); 1653 * while (base->running_timer == mytimer); 1654 * 1655 * Now timer_delete_sync() will never return and never release somelock. 1656 * The interrupt on the other CPU is waiting to grab somelock but it has 1657 * interrupted the softirq that CPU0 is waiting to finish. 1658 * 1659 * This function cannot guarantee that the timer is not rearmed again by 1660 * some concurrent or preempting code, right after it dropped the base 1661 * lock. If there is the possibility of a concurrent rearm then the return 1662 * value of the function is meaningless. 1663 * 1664 * If such a guarantee is needed, e.g. for teardown situations then use 1665 * timer_shutdown_sync() instead. 1666 * 1667 * Return: 1668 * * %0 - The timer was not pending 1669 * * %1 - The timer was pending and deactivated 1670 */ 1671 int timer_delete_sync(struct timer_list *timer) 1672 { 1673 return __timer_delete_sync(timer, false); 1674 } 1675 EXPORT_SYMBOL(timer_delete_sync); 1676 1677 /** 1678 * timer_shutdown_sync - Shutdown a timer and prevent rearming 1679 * @timer: The timer to be shutdown 1680 * 1681 * When the function returns it is guaranteed that: 1682 * - @timer is not queued 1683 * - The callback function of @timer is not running 1684 * - @timer cannot be enqueued again. Any attempt to rearm 1685 * @timer is silently ignored. 1686 * 1687 * See timer_delete_sync() for synchronization rules. 1688 * 1689 * This function is useful for final teardown of an infrastructure where 1690 * the timer is subject to a circular dependency problem. 1691 * 1692 * A common pattern for this is a timer and a workqueue where the timer can 1693 * schedule work and work can arm the timer. On shutdown the workqueue must 1694 * be destroyed and the timer must be prevented from rearming. Unless the 1695 * code has conditionals like 'if (mything->in_shutdown)' to prevent that 1696 * there is no way to get this correct with timer_delete_sync(). 1697 * 1698 * timer_shutdown_sync() is solving the problem. The correct ordering of 1699 * calls in this case is: 1700 * 1701 * timer_shutdown_sync(&mything->timer); 1702 * workqueue_destroy(&mything->workqueue); 1703 * 1704 * After this 'mything' can be safely freed. 1705 * 1706 * This obviously implies that the timer is not required to be functional 1707 * for the rest of the shutdown operation. 1708 * 1709 * Return: 1710 * * %0 - The timer was not pending 1711 * * %1 - The timer was pending 1712 */ 1713 int timer_shutdown_sync(struct timer_list *timer) 1714 { 1715 return __timer_delete_sync(timer, true); 1716 } 1717 EXPORT_SYMBOL_GPL(timer_shutdown_sync); 1718 1719 static void call_timer_fn(struct timer_list *timer, 1720 void (*fn)(struct timer_list *), 1721 unsigned long baseclk) 1722 { 1723 int count = preempt_count(); 1724 1725 #ifdef CONFIG_LOCKDEP 1726 /* 1727 * It is permissible to free the timer from inside the 1728 * function that is called from it, this we need to take into 1729 * account for lockdep too. To avoid bogus "held lock freed" 1730 * warnings as well as problems when looking into 1731 * timer->lockdep_map, make a copy and use that here. 1732 */ 1733 struct lockdep_map lockdep_map; 1734 1735 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1736 #endif 1737 /* 1738 * Couple the lock chain with the lock chain at 1739 * timer_delete_sync() by acquiring the lock_map around the fn() 1740 * call here and in timer_delete_sync(). 1741 */ 1742 lock_map_acquire(&lockdep_map); 1743 1744 trace_timer_expire_entry(timer, baseclk); 1745 fn(timer); 1746 trace_timer_expire_exit(timer); 1747 1748 lock_map_release(&lockdep_map); 1749 1750 if (count != preempt_count()) { 1751 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1752 fn, count, preempt_count()); 1753 /* 1754 * Restore the preempt count. That gives us a decent 1755 * chance to survive and extract information. If the 1756 * callback kept a lock held, bad luck, but not worse 1757 * than the BUG() we had. 1758 */ 1759 preempt_count_set(count); 1760 } 1761 } 1762 1763 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1764 { 1765 /* 1766 * This value is required only for tracing. base->clk was 1767 * incremented directly before expire_timers was called. But expiry 1768 * is related to the old base->clk value. 1769 */ 1770 unsigned long baseclk = base->clk - 1; 1771 1772 while (!hlist_empty(head)) { 1773 struct timer_list *timer; 1774 void (*fn)(struct timer_list *); 1775 1776 timer = hlist_entry(head->first, struct timer_list, entry); 1777 1778 base->running_timer = timer; 1779 detach_timer(timer, true); 1780 1781 fn = timer->function; 1782 1783 if (WARN_ON_ONCE(!fn)) { 1784 /* Should never happen. Emphasis on should! */ 1785 base->running_timer = NULL; 1786 continue; 1787 } 1788 1789 if (timer->flags & TIMER_IRQSAFE) { 1790 raw_spin_unlock(&base->lock); 1791 call_timer_fn(timer, fn, baseclk); 1792 raw_spin_lock(&base->lock); 1793 base->running_timer = NULL; 1794 } else { 1795 raw_spin_unlock_irq(&base->lock); 1796 call_timer_fn(timer, fn, baseclk); 1797 raw_spin_lock_irq(&base->lock); 1798 base->running_timer = NULL; 1799 timer_sync_wait_running(base); 1800 } 1801 } 1802 } 1803 1804 static int collect_expired_timers(struct timer_base *base, 1805 struct hlist_head *heads) 1806 { 1807 unsigned long clk = base->clk = base->next_expiry; 1808 struct hlist_head *vec; 1809 int i, levels = 0; 1810 unsigned int idx; 1811 1812 for (i = 0; i < LVL_DEPTH; i++) { 1813 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1814 1815 if (__test_and_clear_bit(idx, base->pending_map)) { 1816 vec = base->vectors + idx; 1817 hlist_move_list(vec, heads++); 1818 levels++; 1819 } 1820 /* Is it time to look at the next level? */ 1821 if (clk & LVL_CLK_MASK) 1822 break; 1823 /* Shift clock for the next level granularity */ 1824 clk >>= LVL_CLK_SHIFT; 1825 } 1826 return levels; 1827 } 1828 1829 /* 1830 * Find the next pending bucket of a level. Search from level start (@offset) 1831 * + @clk upwards and if nothing there, search from start of the level 1832 * (@offset) up to @offset + clk. 1833 */ 1834 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1835 unsigned clk) 1836 { 1837 unsigned pos, start = offset + clk; 1838 unsigned end = offset + LVL_SIZE; 1839 1840 pos = find_next_bit(base->pending_map, end, start); 1841 if (pos < end) 1842 return pos - start; 1843 1844 pos = find_next_bit(base->pending_map, start, offset); 1845 return pos < start ? pos + LVL_SIZE - start : -1; 1846 } 1847 1848 /* 1849 * Search the first expiring timer in the various clock levels. Caller must 1850 * hold base->lock. 1851 * 1852 * Store next expiry time in base->next_expiry. 1853 */ 1854 static void next_expiry_recalc(struct timer_base *base) 1855 { 1856 unsigned long clk, next, adj; 1857 unsigned lvl, offset = 0; 1858 1859 next = base->clk + NEXT_TIMER_MAX_DELTA; 1860 clk = base->clk; 1861 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1862 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1863 unsigned long lvl_clk = clk & LVL_CLK_MASK; 1864 1865 if (pos >= 0) { 1866 unsigned long tmp = clk + (unsigned long) pos; 1867 1868 tmp <<= LVL_SHIFT(lvl); 1869 if (time_before(tmp, next)) 1870 next = tmp; 1871 1872 /* 1873 * If the next expiration happens before we reach 1874 * the next level, no need to check further. 1875 */ 1876 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) 1877 break; 1878 } 1879 /* 1880 * Clock for the next level. If the current level clock lower 1881 * bits are zero, we look at the next level as is. If not we 1882 * need to advance it by one because that's going to be the 1883 * next expiring bucket in that level. base->clk is the next 1884 * expiring jiffie. So in case of: 1885 * 1886 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1887 * 0 0 0 0 0 0 1888 * 1889 * we have to look at all levels @index 0. With 1890 * 1891 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1892 * 0 0 0 0 0 2 1893 * 1894 * LVL0 has the next expiring bucket @index 2. The upper 1895 * levels have the next expiring bucket @index 1. 1896 * 1897 * In case that the propagation wraps the next level the same 1898 * rules apply: 1899 * 1900 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1901 * 0 0 0 0 F 2 1902 * 1903 * So after looking at LVL0 we get: 1904 * 1905 * LVL5 LVL4 LVL3 LVL2 LVL1 1906 * 0 0 0 1 0 1907 * 1908 * So no propagation from LVL1 to LVL2 because that happened 1909 * with the add already, but then we need to propagate further 1910 * from LVL2 to LVL3. 1911 * 1912 * So the simple check whether the lower bits of the current 1913 * level are 0 or not is sufficient for all cases. 1914 */ 1915 adj = lvl_clk ? 1 : 0; 1916 clk >>= LVL_CLK_SHIFT; 1917 clk += adj; 1918 } 1919 1920 base->next_expiry = next; 1921 base->next_expiry_recalc = false; 1922 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); 1923 } 1924 1925 #ifdef CONFIG_NO_HZ_COMMON 1926 /* 1927 * Check, if the next hrtimer event is before the next timer wheel 1928 * event: 1929 */ 1930 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1931 { 1932 u64 nextevt = hrtimer_get_next_event(); 1933 1934 /* 1935 * If high resolution timers are enabled 1936 * hrtimer_get_next_event() returns KTIME_MAX. 1937 */ 1938 if (expires <= nextevt) 1939 return expires; 1940 1941 /* 1942 * If the next timer is already expired, return the tick base 1943 * time so the tick is fired immediately. 1944 */ 1945 if (nextevt <= basem) 1946 return basem; 1947 1948 /* 1949 * Round up to the next jiffie. High resolution timers are 1950 * off, so the hrtimers are expired in the tick and we need to 1951 * make sure that this tick really expires the timer to avoid 1952 * a ping pong of the nohz stop code. 1953 * 1954 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1955 */ 1956 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1957 } 1958 1959 /** 1960 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1961 * @basej: base time jiffies 1962 * @basem: base time clock monotonic 1963 * 1964 * Returns the tick aligned clock monotonic time of the next pending 1965 * timer or KTIME_MAX if no timer is pending. 1966 */ 1967 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1968 { 1969 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1970 unsigned long nextevt = basej + NEXT_TIMER_MAX_DELTA; 1971 u64 expires = KTIME_MAX; 1972 bool was_idle; 1973 1974 /* 1975 * Pretend that there is no timer pending if the cpu is offline. 1976 * Possible pending timers will be migrated later to an active cpu. 1977 */ 1978 if (cpu_is_offline(smp_processor_id())) 1979 return expires; 1980 1981 raw_spin_lock(&base->lock); 1982 if (base->next_expiry_recalc) 1983 next_expiry_recalc(base); 1984 1985 /* 1986 * We have a fresh next event. Check whether we can forward the 1987 * base. 1988 */ 1989 __forward_timer_base(base, basej); 1990 1991 if (base->timers_pending) { 1992 nextevt = base->next_expiry; 1993 1994 /* If we missed a tick already, force 0 delta */ 1995 if (time_before(nextevt, basej)) 1996 nextevt = basej; 1997 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1998 } else { 1999 /* 2000 * Move next_expiry for the empty base into the future to 2001 * prevent a unnecessary raise of the timer softirq when the 2002 * next_expiry value will be reached even if there is no timer 2003 * pending. 2004 */ 2005 base->next_expiry = nextevt; 2006 } 2007 2008 /* 2009 * Base is idle if the next event is more than a tick away. 2010 * 2011 * If the base is marked idle then any timer add operation must forward 2012 * the base clk itself to keep granularity small. This idle logic is 2013 * only maintained for the BASE_STD base, deferrable timers may still 2014 * see large granularity skew (by design). 2015 */ 2016 was_idle = base->is_idle; 2017 base->is_idle = time_after(nextevt, basej + 1); 2018 if (was_idle != base->is_idle) 2019 trace_timer_base_idle(base->is_idle, base->cpu); 2020 2021 raw_spin_unlock(&base->lock); 2022 2023 return cmp_next_hrtimer_event(basem, expires); 2024 } 2025 2026 /** 2027 * timer_clear_idle - Clear the idle state of the timer base 2028 * 2029 * Called with interrupts disabled 2030 */ 2031 void timer_clear_idle(void) 2032 { 2033 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 2034 2035 /* 2036 * We do this unlocked. The worst outcome is a remote enqueue sending 2037 * a pointless IPI, but taking the lock would just make the window for 2038 * sending the IPI a few instructions smaller for the cost of taking 2039 * the lock in the exit from idle path. 2040 */ 2041 if (base->is_idle) { 2042 base->is_idle = false; 2043 trace_timer_base_idle(false, smp_processor_id()); 2044 } 2045 } 2046 #endif 2047 2048 /** 2049 * __run_timers - run all expired timers (if any) on this CPU. 2050 * @base: the timer vector to be processed. 2051 */ 2052 static inline void __run_timers(struct timer_base *base) 2053 { 2054 struct hlist_head heads[LVL_DEPTH]; 2055 int levels; 2056 2057 if (time_before(jiffies, base->next_expiry)) 2058 return; 2059 2060 timer_base_lock_expiry(base); 2061 raw_spin_lock_irq(&base->lock); 2062 2063 while (time_after_eq(jiffies, base->clk) && 2064 time_after_eq(jiffies, base->next_expiry)) { 2065 levels = collect_expired_timers(base, heads); 2066 /* 2067 * The two possible reasons for not finding any expired 2068 * timer at this clk are that all matching timers have been 2069 * dequeued or no timer has been queued since 2070 * base::next_expiry was set to base::clk + 2071 * NEXT_TIMER_MAX_DELTA. 2072 */ 2073 WARN_ON_ONCE(!levels && !base->next_expiry_recalc 2074 && base->timers_pending); 2075 /* 2076 * While executing timers, base->clk is set 1 offset ahead of 2077 * jiffies to avoid endless requeuing to current jiffies. 2078 */ 2079 base->clk++; 2080 next_expiry_recalc(base); 2081 2082 while (levels--) 2083 expire_timers(base, heads + levels); 2084 } 2085 raw_spin_unlock_irq(&base->lock); 2086 timer_base_unlock_expiry(base); 2087 } 2088 2089 /* 2090 * This function runs timers and the timer-tq in bottom half context. 2091 */ 2092 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 2093 { 2094 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 2095 2096 __run_timers(base); 2097 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) 2098 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 2099 } 2100 2101 /* 2102 * Called by the local, per-CPU timer interrupt on SMP. 2103 */ 2104 static void run_local_timers(void) 2105 { 2106 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 2107 2108 hrtimer_run_queues(); 2109 /* Raise the softirq only if required. */ 2110 if (time_before(jiffies, base->next_expiry)) { 2111 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) 2112 return; 2113 /* CPU is awake, so check the deferrable base. */ 2114 base++; 2115 if (time_before(jiffies, base->next_expiry)) 2116 return; 2117 } 2118 raise_softirq(TIMER_SOFTIRQ); 2119 } 2120 2121 /* 2122 * Called from the timer interrupt handler to charge one tick to the current 2123 * process. user_tick is 1 if the tick is user time, 0 for system. 2124 */ 2125 void update_process_times(int user_tick) 2126 { 2127 struct task_struct *p = current; 2128 2129 /* Note: this timer irq context must be accounted for as well. */ 2130 account_process_tick(p, user_tick); 2131 run_local_timers(); 2132 rcu_sched_clock_irq(user_tick); 2133 #ifdef CONFIG_IRQ_WORK 2134 if (in_irq()) 2135 irq_work_tick(); 2136 #endif 2137 scheduler_tick(); 2138 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 2139 run_posix_cpu_timers(); 2140 } 2141 2142 /* 2143 * Since schedule_timeout()'s timer is defined on the stack, it must store 2144 * the target task on the stack as well. 2145 */ 2146 struct process_timer { 2147 struct timer_list timer; 2148 struct task_struct *task; 2149 }; 2150 2151 static void process_timeout(struct timer_list *t) 2152 { 2153 struct process_timer *timeout = from_timer(timeout, t, timer); 2154 2155 wake_up_process(timeout->task); 2156 } 2157 2158 /** 2159 * schedule_timeout - sleep until timeout 2160 * @timeout: timeout value in jiffies 2161 * 2162 * Make the current task sleep until @timeout jiffies have elapsed. 2163 * The function behavior depends on the current task state 2164 * (see also set_current_state() description): 2165 * 2166 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 2167 * at all. That happens because sched_submit_work() does nothing for 2168 * tasks in %TASK_RUNNING state. 2169 * 2170 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 2171 * pass before the routine returns unless the current task is explicitly 2172 * woken up, (e.g. by wake_up_process()). 2173 * 2174 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 2175 * delivered to the current task or the current task is explicitly woken 2176 * up. 2177 * 2178 * The current task state is guaranteed to be %TASK_RUNNING when this 2179 * routine returns. 2180 * 2181 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 2182 * the CPU away without a bound on the timeout. In this case the return 2183 * value will be %MAX_SCHEDULE_TIMEOUT. 2184 * 2185 * Returns 0 when the timer has expired otherwise the remaining time in 2186 * jiffies will be returned. In all cases the return value is guaranteed 2187 * to be non-negative. 2188 */ 2189 signed long __sched schedule_timeout(signed long timeout) 2190 { 2191 struct process_timer timer; 2192 unsigned long expire; 2193 2194 switch (timeout) 2195 { 2196 case MAX_SCHEDULE_TIMEOUT: 2197 /* 2198 * These two special cases are useful to be comfortable 2199 * in the caller. Nothing more. We could take 2200 * MAX_SCHEDULE_TIMEOUT from one of the negative value 2201 * but I' d like to return a valid offset (>=0) to allow 2202 * the caller to do everything it want with the retval. 2203 */ 2204 schedule(); 2205 goto out; 2206 default: 2207 /* 2208 * Another bit of PARANOID. Note that the retval will be 2209 * 0 since no piece of kernel is supposed to do a check 2210 * for a negative retval of schedule_timeout() (since it 2211 * should never happens anyway). You just have the printk() 2212 * that will tell you if something is gone wrong and where. 2213 */ 2214 if (timeout < 0) { 2215 printk(KERN_ERR "schedule_timeout: wrong timeout " 2216 "value %lx\n", timeout); 2217 dump_stack(); 2218 __set_current_state(TASK_RUNNING); 2219 goto out; 2220 } 2221 } 2222 2223 expire = timeout + jiffies; 2224 2225 timer.task = current; 2226 timer_setup_on_stack(&timer.timer, process_timeout, 0); 2227 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 2228 schedule(); 2229 del_timer_sync(&timer.timer); 2230 2231 /* Remove the timer from the object tracker */ 2232 destroy_timer_on_stack(&timer.timer); 2233 2234 timeout = expire - jiffies; 2235 2236 out: 2237 return timeout < 0 ? 0 : timeout; 2238 } 2239 EXPORT_SYMBOL(schedule_timeout); 2240 2241 /* 2242 * We can use __set_current_state() here because schedule_timeout() calls 2243 * schedule() unconditionally. 2244 */ 2245 signed long __sched schedule_timeout_interruptible(signed long timeout) 2246 { 2247 __set_current_state(TASK_INTERRUPTIBLE); 2248 return schedule_timeout(timeout); 2249 } 2250 EXPORT_SYMBOL(schedule_timeout_interruptible); 2251 2252 signed long __sched schedule_timeout_killable(signed long timeout) 2253 { 2254 __set_current_state(TASK_KILLABLE); 2255 return schedule_timeout(timeout); 2256 } 2257 EXPORT_SYMBOL(schedule_timeout_killable); 2258 2259 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 2260 { 2261 __set_current_state(TASK_UNINTERRUPTIBLE); 2262 return schedule_timeout(timeout); 2263 } 2264 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 2265 2266 /* 2267 * Like schedule_timeout_uninterruptible(), except this task will not contribute 2268 * to load average. 2269 */ 2270 signed long __sched schedule_timeout_idle(signed long timeout) 2271 { 2272 __set_current_state(TASK_IDLE); 2273 return schedule_timeout(timeout); 2274 } 2275 EXPORT_SYMBOL(schedule_timeout_idle); 2276 2277 #ifdef CONFIG_HOTPLUG_CPU 2278 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 2279 { 2280 struct timer_list *timer; 2281 int cpu = new_base->cpu; 2282 2283 while (!hlist_empty(head)) { 2284 timer = hlist_entry(head->first, struct timer_list, entry); 2285 detach_timer(timer, false); 2286 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 2287 internal_add_timer(new_base, timer); 2288 } 2289 } 2290 2291 int timers_prepare_cpu(unsigned int cpu) 2292 { 2293 struct timer_base *base; 2294 int b; 2295 2296 for (b = 0; b < NR_BASES; b++) { 2297 base = per_cpu_ptr(&timer_bases[b], cpu); 2298 base->clk = jiffies; 2299 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2300 base->next_expiry_recalc = false; 2301 base->timers_pending = false; 2302 base->is_idle = false; 2303 } 2304 return 0; 2305 } 2306 2307 int timers_dead_cpu(unsigned int cpu) 2308 { 2309 struct timer_base *old_base; 2310 struct timer_base *new_base; 2311 int b, i; 2312 2313 for (b = 0; b < NR_BASES; b++) { 2314 old_base = per_cpu_ptr(&timer_bases[b], cpu); 2315 new_base = get_cpu_ptr(&timer_bases[b]); 2316 /* 2317 * The caller is globally serialized and nobody else 2318 * takes two locks at once, deadlock is not possible. 2319 */ 2320 raw_spin_lock_irq(&new_base->lock); 2321 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 2322 2323 /* 2324 * The current CPUs base clock might be stale. Update it 2325 * before moving the timers over. 2326 */ 2327 forward_timer_base(new_base); 2328 2329 WARN_ON_ONCE(old_base->running_timer); 2330 old_base->running_timer = NULL; 2331 2332 for (i = 0; i < WHEEL_SIZE; i++) 2333 migrate_timer_list(new_base, old_base->vectors + i); 2334 2335 raw_spin_unlock(&old_base->lock); 2336 raw_spin_unlock_irq(&new_base->lock); 2337 put_cpu_ptr(&timer_bases); 2338 } 2339 return 0; 2340 } 2341 2342 #endif /* CONFIG_HOTPLUG_CPU */ 2343 2344 static void __init init_timer_cpu(int cpu) 2345 { 2346 struct timer_base *base; 2347 int i; 2348 2349 for (i = 0; i < NR_BASES; i++) { 2350 base = per_cpu_ptr(&timer_bases[i], cpu); 2351 base->cpu = cpu; 2352 raw_spin_lock_init(&base->lock); 2353 base->clk = jiffies; 2354 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2355 timer_base_init_expiry_lock(base); 2356 } 2357 } 2358 2359 static void __init init_timer_cpus(void) 2360 { 2361 int cpu; 2362 2363 for_each_possible_cpu(cpu) 2364 init_timer_cpu(cpu); 2365 } 2366 2367 void __init init_timers(void) 2368 { 2369 init_timer_cpus(); 2370 posix_cputimers_init_work(); 2371 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2372 } 2373 2374 /** 2375 * msleep - sleep safely even with waitqueue interruptions 2376 * @msecs: Time in milliseconds to sleep for 2377 */ 2378 void msleep(unsigned int msecs) 2379 { 2380 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2381 2382 while (timeout) 2383 timeout = schedule_timeout_uninterruptible(timeout); 2384 } 2385 2386 EXPORT_SYMBOL(msleep); 2387 2388 /** 2389 * msleep_interruptible - sleep waiting for signals 2390 * @msecs: Time in milliseconds to sleep for 2391 */ 2392 unsigned long msleep_interruptible(unsigned int msecs) 2393 { 2394 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2395 2396 while (timeout && !signal_pending(current)) 2397 timeout = schedule_timeout_interruptible(timeout); 2398 return jiffies_to_msecs(timeout); 2399 } 2400 2401 EXPORT_SYMBOL(msleep_interruptible); 2402 2403 /** 2404 * usleep_range_state - Sleep for an approximate time in a given state 2405 * @min: Minimum time in usecs to sleep 2406 * @max: Maximum time in usecs to sleep 2407 * @state: State of the current task that will be while sleeping 2408 * 2409 * In non-atomic context where the exact wakeup time is flexible, use 2410 * usleep_range_state() instead of udelay(). The sleep improves responsiveness 2411 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2412 * power usage by allowing hrtimers to take advantage of an already- 2413 * scheduled interrupt instead of scheduling a new one just for this sleep. 2414 */ 2415 void __sched usleep_range_state(unsigned long min, unsigned long max, 2416 unsigned int state) 2417 { 2418 ktime_t exp = ktime_add_us(ktime_get(), min); 2419 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2420 2421 for (;;) { 2422 __set_current_state(state); 2423 /* Do not return before the requested sleep time has elapsed */ 2424 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2425 break; 2426 } 2427 } 2428 EXPORT_SYMBOL(usleep_range_state); 2429