1 /* 2 * linux/kernel/time/clocksource.c 3 * 4 * This file contains the functions which manage clocksource drivers. 5 * 6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 * TODO WishList: 23 * o Allow clocksource drivers to be unregistered 24 */ 25 26 #include <linux/clocksource.h> 27 #include <linux/sysdev.h> 28 #include <linux/init.h> 29 #include <linux/module.h> 30 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31 #include <linux/tick.h> 32 #include <linux/kthread.h> 33 34 void timecounter_init(struct timecounter *tc, 35 const struct cyclecounter *cc, 36 u64 start_tstamp) 37 { 38 tc->cc = cc; 39 tc->cycle_last = cc->read(cc); 40 tc->nsec = start_tstamp; 41 } 42 EXPORT_SYMBOL_GPL(timecounter_init); 43 44 /** 45 * timecounter_read_delta - get nanoseconds since last call of this function 46 * @tc: Pointer to time counter 47 * 48 * When the underlying cycle counter runs over, this will be handled 49 * correctly as long as it does not run over more than once between 50 * calls. 51 * 52 * The first call to this function for a new time counter initializes 53 * the time tracking and returns an undefined result. 54 */ 55 static u64 timecounter_read_delta(struct timecounter *tc) 56 { 57 cycle_t cycle_now, cycle_delta; 58 u64 ns_offset; 59 60 /* read cycle counter: */ 61 cycle_now = tc->cc->read(tc->cc); 62 63 /* calculate the delta since the last timecounter_read_delta(): */ 64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; 65 66 /* convert to nanoseconds: */ 67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); 68 69 /* update time stamp of timecounter_read_delta() call: */ 70 tc->cycle_last = cycle_now; 71 72 return ns_offset; 73 } 74 75 u64 timecounter_read(struct timecounter *tc) 76 { 77 u64 nsec; 78 79 /* increment time by nanoseconds since last call */ 80 nsec = timecounter_read_delta(tc); 81 nsec += tc->nsec; 82 tc->nsec = nsec; 83 84 return nsec; 85 } 86 EXPORT_SYMBOL_GPL(timecounter_read); 87 88 u64 timecounter_cyc2time(struct timecounter *tc, 89 cycle_t cycle_tstamp) 90 { 91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; 92 u64 nsec; 93 94 /* 95 * Instead of always treating cycle_tstamp as more recent 96 * than tc->cycle_last, detect when it is too far in the 97 * future and treat it as old time stamp instead. 98 */ 99 if (cycle_delta > tc->cc->mask / 2) { 100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; 101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); 102 } else { 103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; 104 } 105 106 return nsec; 107 } 108 EXPORT_SYMBOL_GPL(timecounter_cyc2time); 109 110 /** 111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 112 * @mult: pointer to mult variable 113 * @shift: pointer to shift variable 114 * @from: frequency to convert from 115 * @to: frequency to convert to 116 * @maxsec: guaranteed runtime conversion range in seconds 117 * 118 * The function evaluates the shift/mult pair for the scaled math 119 * operations of clocksources and clockevents. 120 * 121 * @to and @from are frequency values in HZ. For clock sources @to is 122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock 123 * event @to is the counter frequency and @from is NSEC_PER_SEC. 124 * 125 * The @maxsec conversion range argument controls the time frame in 126 * seconds which must be covered by the runtime conversion with the 127 * calculated mult and shift factors. This guarantees that no 64bit 128 * overflow happens when the input value of the conversion is 129 * multiplied with the calculated mult factor. Larger ranges may 130 * reduce the conversion accuracy by chosing smaller mult and shift 131 * factors. 132 */ 133 void 134 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) 135 { 136 u64 tmp; 137 u32 sft, sftacc= 32; 138 139 /* 140 * Calculate the shift factor which is limiting the conversion 141 * range: 142 */ 143 tmp = ((u64)maxsec * from) >> 32; 144 while (tmp) { 145 tmp >>=1; 146 sftacc--; 147 } 148 149 /* 150 * Find the conversion shift/mult pair which has the best 151 * accuracy and fits the maxsec conversion range: 152 */ 153 for (sft = 32; sft > 0; sft--) { 154 tmp = (u64) to << sft; 155 tmp += from / 2; 156 do_div(tmp, from); 157 if ((tmp >> sftacc) == 0) 158 break; 159 } 160 *mult = tmp; 161 *shift = sft; 162 } 163 164 /*[Clocksource internal variables]--------- 165 * curr_clocksource: 166 * currently selected clocksource. 167 * clocksource_list: 168 * linked list with the registered clocksources 169 * clocksource_mutex: 170 * protects manipulations to curr_clocksource and the clocksource_list 171 * override_name: 172 * Name of the user-specified clocksource. 173 */ 174 static struct clocksource *curr_clocksource; 175 static LIST_HEAD(clocksource_list); 176 static DEFINE_MUTEX(clocksource_mutex); 177 static char override_name[32]; 178 static int finished_booting; 179 180 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 181 static void clocksource_watchdog_work(struct work_struct *work); 182 183 static LIST_HEAD(watchdog_list); 184 static struct clocksource *watchdog; 185 static struct timer_list watchdog_timer; 186 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 187 static DEFINE_SPINLOCK(watchdog_lock); 188 static int watchdog_running; 189 190 static int clocksource_watchdog_kthread(void *data); 191 static void __clocksource_change_rating(struct clocksource *cs, int rating); 192 193 /* 194 * Interval: 0.5sec Threshold: 0.0625s 195 */ 196 #define WATCHDOG_INTERVAL (HZ >> 1) 197 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 198 199 static void clocksource_watchdog_work(struct work_struct *work) 200 { 201 /* 202 * If kthread_run fails the next watchdog scan over the 203 * watchdog_list will find the unstable clock again. 204 */ 205 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); 206 } 207 208 static void __clocksource_unstable(struct clocksource *cs) 209 { 210 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 211 cs->flags |= CLOCK_SOURCE_UNSTABLE; 212 if (finished_booting) 213 schedule_work(&watchdog_work); 214 } 215 216 static void clocksource_unstable(struct clocksource *cs, int64_t delta) 217 { 218 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 219 cs->name, delta); 220 __clocksource_unstable(cs); 221 } 222 223 /** 224 * clocksource_mark_unstable - mark clocksource unstable via watchdog 225 * @cs: clocksource to be marked unstable 226 * 227 * This function is called instead of clocksource_change_rating from 228 * cpu hotplug code to avoid a deadlock between the clocksource mutex 229 * and the cpu hotplug mutex. It defers the update of the clocksource 230 * to the watchdog thread. 231 */ 232 void clocksource_mark_unstable(struct clocksource *cs) 233 { 234 unsigned long flags; 235 236 spin_lock_irqsave(&watchdog_lock, flags); 237 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { 238 if (list_empty(&cs->wd_list)) 239 list_add(&cs->wd_list, &watchdog_list); 240 __clocksource_unstable(cs); 241 } 242 spin_unlock_irqrestore(&watchdog_lock, flags); 243 } 244 245 static void clocksource_watchdog(unsigned long data) 246 { 247 struct clocksource *cs; 248 cycle_t csnow, wdnow; 249 int64_t wd_nsec, cs_nsec; 250 int next_cpu; 251 252 spin_lock(&watchdog_lock); 253 if (!watchdog_running) 254 goto out; 255 256 list_for_each_entry(cs, &watchdog_list, wd_list) { 257 258 /* Clocksource already marked unstable? */ 259 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 260 if (finished_booting) 261 schedule_work(&watchdog_work); 262 continue; 263 } 264 265 local_irq_disable(); 266 csnow = cs->read(cs); 267 wdnow = watchdog->read(watchdog); 268 local_irq_enable(); 269 270 /* Clocksource initialized ? */ 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 272 cs->flags |= CLOCK_SOURCE_WATCHDOG; 273 cs->wd_last = wdnow; 274 cs->cs_last = csnow; 275 continue; 276 } 277 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, 279 watchdog->mult, watchdog->shift); 280 281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & 282 cs->mask, cs->mult, cs->shift); 283 cs->cs_last = csnow; 284 cs->wd_last = wdnow; 285 286 /* Check the deviation from the watchdog clocksource. */ 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 288 clocksource_unstable(cs, cs_nsec - wd_nsec); 289 continue; 290 } 291 292 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 293 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 294 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { 295 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 296 /* 297 * We just marked the clocksource as highres-capable, 298 * notify the rest of the system as well so that we 299 * transition into high-res mode: 300 */ 301 tick_clock_notify(); 302 } 303 } 304 305 /* 306 * Cycle through CPUs to check if the CPUs stay synchronized 307 * to each other. 308 */ 309 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); 310 if (next_cpu >= nr_cpu_ids) 311 next_cpu = cpumask_first(cpu_online_mask); 312 watchdog_timer.expires += WATCHDOG_INTERVAL; 313 add_timer_on(&watchdog_timer, next_cpu); 314 out: 315 spin_unlock(&watchdog_lock); 316 } 317 318 static inline void clocksource_start_watchdog(void) 319 { 320 if (watchdog_running || !watchdog || list_empty(&watchdog_list)) 321 return; 322 init_timer(&watchdog_timer); 323 watchdog_timer.function = clocksource_watchdog; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 326 watchdog_running = 1; 327 } 328 329 static inline void clocksource_stop_watchdog(void) 330 { 331 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 332 return; 333 del_timer(&watchdog_timer); 334 watchdog_running = 0; 335 } 336 337 static inline void clocksource_reset_watchdog(void) 338 { 339 struct clocksource *cs; 340 341 list_for_each_entry(cs, &watchdog_list, wd_list) 342 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 343 } 344 345 static void clocksource_resume_watchdog(void) 346 { 347 unsigned long flags; 348 349 /* 350 * We use trylock here to avoid a potential dead lock when 351 * kgdb calls this code after the kernel has been stopped with 352 * watchdog_lock held. When watchdog_lock is held we just 353 * return and accept, that the watchdog might trigger and mark 354 * the monitored clock source (usually TSC) unstable. 355 * 356 * This does not affect the other caller clocksource_resume() 357 * because at this point the kernel is UP, interrupts are 358 * disabled and nothing can hold watchdog_lock. 359 */ 360 if (!spin_trylock_irqsave(&watchdog_lock, flags)) 361 return; 362 clocksource_reset_watchdog(); 363 spin_unlock_irqrestore(&watchdog_lock, flags); 364 } 365 366 static void clocksource_enqueue_watchdog(struct clocksource *cs) 367 { 368 unsigned long flags; 369 370 spin_lock_irqsave(&watchdog_lock, flags); 371 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 372 /* cs is a clocksource to be watched. */ 373 list_add(&cs->wd_list, &watchdog_list); 374 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 375 } else { 376 /* cs is a watchdog. */ 377 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 378 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 379 /* Pick the best watchdog. */ 380 if (!watchdog || cs->rating > watchdog->rating) { 381 watchdog = cs; 382 /* Reset watchdog cycles */ 383 clocksource_reset_watchdog(); 384 } 385 } 386 /* Check if the watchdog timer needs to be started. */ 387 clocksource_start_watchdog(); 388 spin_unlock_irqrestore(&watchdog_lock, flags); 389 } 390 391 static void clocksource_dequeue_watchdog(struct clocksource *cs) 392 { 393 struct clocksource *tmp; 394 unsigned long flags; 395 396 spin_lock_irqsave(&watchdog_lock, flags); 397 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { 398 /* cs is a watched clocksource. */ 399 list_del_init(&cs->wd_list); 400 } else if (cs == watchdog) { 401 /* Reset watchdog cycles */ 402 clocksource_reset_watchdog(); 403 /* Current watchdog is removed. Find an alternative. */ 404 watchdog = NULL; 405 list_for_each_entry(tmp, &clocksource_list, list) { 406 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) 407 continue; 408 if (!watchdog || tmp->rating > watchdog->rating) 409 watchdog = tmp; 410 } 411 } 412 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; 413 /* Check if the watchdog timer needs to be stopped. */ 414 clocksource_stop_watchdog(); 415 spin_unlock_irqrestore(&watchdog_lock, flags); 416 } 417 418 static int clocksource_watchdog_kthread(void *data) 419 { 420 struct clocksource *cs, *tmp; 421 unsigned long flags; 422 LIST_HEAD(unstable); 423 424 mutex_lock(&clocksource_mutex); 425 spin_lock_irqsave(&watchdog_lock, flags); 426 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) 427 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { 428 list_del_init(&cs->wd_list); 429 list_add(&cs->wd_list, &unstable); 430 } 431 /* Check if the watchdog timer needs to be stopped. */ 432 clocksource_stop_watchdog(); 433 spin_unlock_irqrestore(&watchdog_lock, flags); 434 435 /* Needs to be done outside of watchdog lock */ 436 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { 437 list_del_init(&cs->wd_list); 438 __clocksource_change_rating(cs, 0); 439 } 440 mutex_unlock(&clocksource_mutex); 441 return 0; 442 } 443 444 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ 445 446 static void clocksource_enqueue_watchdog(struct clocksource *cs) 447 { 448 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 449 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 450 } 451 452 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 453 static inline void clocksource_resume_watchdog(void) { } 454 static inline int clocksource_watchdog_kthread(void *data) { return 0; } 455 456 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 457 458 /** 459 * clocksource_suspend - suspend the clocksource(s) 460 */ 461 void clocksource_suspend(void) 462 { 463 struct clocksource *cs; 464 465 list_for_each_entry_reverse(cs, &clocksource_list, list) 466 if (cs->suspend) 467 cs->suspend(cs); 468 } 469 470 /** 471 * clocksource_resume - resume the clocksource(s) 472 */ 473 void clocksource_resume(void) 474 { 475 struct clocksource *cs; 476 477 list_for_each_entry(cs, &clocksource_list, list) 478 if (cs->resume) 479 cs->resume(cs); 480 481 clocksource_resume_watchdog(); 482 } 483 484 /** 485 * clocksource_touch_watchdog - Update watchdog 486 * 487 * Update the watchdog after exception contexts such as kgdb so as not 488 * to incorrectly trip the watchdog. This might fail when the kernel 489 * was stopped in code which holds watchdog_lock. 490 */ 491 void clocksource_touch_watchdog(void) 492 { 493 clocksource_resume_watchdog(); 494 } 495 496 /** 497 * clocksource_max_deferment - Returns max time the clocksource can be deferred 498 * @cs: Pointer to clocksource 499 * 500 */ 501 static u64 clocksource_max_deferment(struct clocksource *cs) 502 { 503 u64 max_nsecs, max_cycles; 504 505 /* 506 * Calculate the maximum number of cycles that we can pass to the 507 * cyc2ns function without overflowing a 64-bit signed result. The 508 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which 509 * is equivalent to the below. 510 * max_cycles < (2^63)/cs->mult 511 * max_cycles < 2^(log2((2^63)/cs->mult)) 512 * max_cycles < 2^(log2(2^63) - log2(cs->mult)) 513 * max_cycles < 2^(63 - log2(cs->mult)) 514 * max_cycles < 1 << (63 - log2(cs->mult)) 515 * Please note that we add 1 to the result of the log2 to account for 516 * any rounding errors, ensure the above inequality is satisfied and 517 * no overflow will occur. 518 */ 519 max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); 520 521 /* 522 * The actual maximum number of cycles we can defer the clocksource is 523 * determined by the minimum of max_cycles and cs->mask. 524 */ 525 max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 526 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); 527 528 /* 529 * To ensure that the clocksource does not wrap whilst we are idle, 530 * limit the time the clocksource can be deferred by 12.5%. Please 531 * note a margin of 12.5% is used because this can be computed with 532 * a shift, versus say 10% which would require division. 533 */ 534 return max_nsecs - (max_nsecs >> 5); 535 } 536 537 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 538 539 /** 540 * clocksource_select - Select the best clocksource available 541 * 542 * Private function. Must hold clocksource_mutex when called. 543 * 544 * Select the clocksource with the best rating, or the clocksource, 545 * which is selected by userspace override. 546 */ 547 static void clocksource_select(void) 548 { 549 struct clocksource *best, *cs; 550 551 if (!finished_booting || list_empty(&clocksource_list)) 552 return; 553 /* First clocksource on the list has the best rating. */ 554 best = list_first_entry(&clocksource_list, struct clocksource, list); 555 /* Check for the override clocksource. */ 556 list_for_each_entry(cs, &clocksource_list, list) { 557 if (strcmp(cs->name, override_name) != 0) 558 continue; 559 /* 560 * Check to make sure we don't switch to a non-highres 561 * capable clocksource if the tick code is in oneshot 562 * mode (highres or nohz) 563 */ 564 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && 565 tick_oneshot_mode_active()) { 566 /* Override clocksource cannot be used. */ 567 printk(KERN_WARNING "Override clocksource %s is not " 568 "HRT compatible. Cannot switch while in " 569 "HRT/NOHZ mode\n", cs->name); 570 override_name[0] = 0; 571 } else 572 /* Override clocksource can be used. */ 573 best = cs; 574 break; 575 } 576 if (curr_clocksource != best) { 577 printk(KERN_INFO "Switching to clocksource %s\n", best->name); 578 curr_clocksource = best; 579 timekeeping_notify(curr_clocksource); 580 } 581 } 582 583 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 584 585 static inline void clocksource_select(void) { } 586 587 #endif 588 589 /* 590 * clocksource_done_booting - Called near the end of core bootup 591 * 592 * Hack to avoid lots of clocksource churn at boot time. 593 * We use fs_initcall because we want this to start before 594 * device_initcall but after subsys_initcall. 595 */ 596 static int __init clocksource_done_booting(void) 597 { 598 mutex_lock(&clocksource_mutex); 599 curr_clocksource = clocksource_default_clock(); 600 mutex_unlock(&clocksource_mutex); 601 602 finished_booting = 1; 603 604 /* 605 * Run the watchdog first to eliminate unstable clock sources 606 */ 607 clocksource_watchdog_kthread(NULL); 608 609 mutex_lock(&clocksource_mutex); 610 clocksource_select(); 611 mutex_unlock(&clocksource_mutex); 612 return 0; 613 } 614 fs_initcall(clocksource_done_booting); 615 616 /* 617 * Enqueue the clocksource sorted by rating 618 */ 619 static void clocksource_enqueue(struct clocksource *cs) 620 { 621 struct list_head *entry = &clocksource_list; 622 struct clocksource *tmp; 623 624 list_for_each_entry(tmp, &clocksource_list, list) 625 /* Keep track of the place, where to insert */ 626 if (tmp->rating >= cs->rating) 627 entry = &tmp->list; 628 list_add(&cs->list, entry); 629 } 630 631 /** 632 * __clocksource_updatefreq_scale - Used update clocksource with new freq 633 * @t: clocksource to be registered 634 * @scale: Scale factor multiplied against freq to get clocksource hz 635 * @freq: clocksource frequency (cycles per second) divided by scale 636 * 637 * This should only be called from the clocksource->enable() method. 638 * 639 * This *SHOULD NOT* be called directly! Please use the 640 * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions. 641 */ 642 void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) 643 { 644 u64 sec; 645 646 /* 647 * Calc the maximum number of seconds which we can run before 648 * wrapping around. For clocksources which have a mask > 32bit 649 * we need to limit the max sleep time to have a good 650 * conversion precision. 10 minutes is still a reasonable 651 * amount. That results in a shift value of 24 for a 652 * clocksource with mask >= 40bit and f >= 4GHz. That maps to 653 * ~ 0.06ppm granularity for NTP. We apply the same 12.5% 654 * margin as we do in clocksource_max_deferment() 655 */ 656 sec = (cs->mask - (cs->mask >> 5)); 657 do_div(sec, freq); 658 do_div(sec, scale); 659 if (!sec) 660 sec = 1; 661 else if (sec > 600 && cs->mask > UINT_MAX) 662 sec = 600; 663 664 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 665 NSEC_PER_SEC / scale, sec * scale); 666 cs->max_idle_ns = clocksource_max_deferment(cs); 667 } 668 EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 669 670 /** 671 * __clocksource_register_scale - Used to install new clocksources 672 * @t: clocksource to be registered 673 * @scale: Scale factor multiplied against freq to get clocksource hz 674 * @freq: clocksource frequency (cycles per second) divided by scale 675 * 676 * Returns -EBUSY if registration fails, zero otherwise. 677 * 678 * This *SHOULD NOT* be called directly! Please use the 679 * clocksource_register_hz() or clocksource_register_khz helper functions. 680 */ 681 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) 682 { 683 684 /* Initialize mult/shift and max_idle_ns */ 685 __clocksource_updatefreq_scale(cs, scale, freq); 686 687 /* Add clocksource to the clcoksource list */ 688 mutex_lock(&clocksource_mutex); 689 clocksource_enqueue(cs); 690 clocksource_enqueue_watchdog(cs); 691 clocksource_select(); 692 mutex_unlock(&clocksource_mutex); 693 return 0; 694 } 695 EXPORT_SYMBOL_GPL(__clocksource_register_scale); 696 697 698 /** 699 * clocksource_register - Used to install new clocksources 700 * @t: clocksource to be registered 701 * 702 * Returns -EBUSY if registration fails, zero otherwise. 703 */ 704 int clocksource_register(struct clocksource *cs) 705 { 706 /* calculate max idle time permitted for this clocksource */ 707 cs->max_idle_ns = clocksource_max_deferment(cs); 708 709 mutex_lock(&clocksource_mutex); 710 clocksource_enqueue(cs); 711 clocksource_enqueue_watchdog(cs); 712 clocksource_select(); 713 mutex_unlock(&clocksource_mutex); 714 return 0; 715 } 716 EXPORT_SYMBOL(clocksource_register); 717 718 static void __clocksource_change_rating(struct clocksource *cs, int rating) 719 { 720 list_del(&cs->list); 721 cs->rating = rating; 722 clocksource_enqueue(cs); 723 clocksource_select(); 724 } 725 726 /** 727 * clocksource_change_rating - Change the rating of a registered clocksource 728 */ 729 void clocksource_change_rating(struct clocksource *cs, int rating) 730 { 731 mutex_lock(&clocksource_mutex); 732 __clocksource_change_rating(cs, rating); 733 mutex_unlock(&clocksource_mutex); 734 } 735 EXPORT_SYMBOL(clocksource_change_rating); 736 737 /** 738 * clocksource_unregister - remove a registered clocksource 739 */ 740 void clocksource_unregister(struct clocksource *cs) 741 { 742 mutex_lock(&clocksource_mutex); 743 clocksource_dequeue_watchdog(cs); 744 list_del(&cs->list); 745 clocksource_select(); 746 mutex_unlock(&clocksource_mutex); 747 } 748 EXPORT_SYMBOL(clocksource_unregister); 749 750 #ifdef CONFIG_SYSFS 751 /** 752 * sysfs_show_current_clocksources - sysfs interface for current clocksource 753 * @dev: unused 754 * @buf: char buffer to be filled with clocksource list 755 * 756 * Provides sysfs interface for listing current clocksource. 757 */ 758 static ssize_t 759 sysfs_show_current_clocksources(struct sys_device *dev, 760 struct sysdev_attribute *attr, char *buf) 761 { 762 ssize_t count = 0; 763 764 mutex_lock(&clocksource_mutex); 765 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); 766 mutex_unlock(&clocksource_mutex); 767 768 return count; 769 } 770 771 /** 772 * sysfs_override_clocksource - interface for manually overriding clocksource 773 * @dev: unused 774 * @buf: name of override clocksource 775 * @count: length of buffer 776 * 777 * Takes input from sysfs interface for manually overriding the default 778 * clocksource selection. 779 */ 780 static ssize_t sysfs_override_clocksource(struct sys_device *dev, 781 struct sysdev_attribute *attr, 782 const char *buf, size_t count) 783 { 784 size_t ret = count; 785 786 /* strings from sysfs write are not 0 terminated! */ 787 if (count >= sizeof(override_name)) 788 return -EINVAL; 789 790 /* strip of \n: */ 791 if (buf[count-1] == '\n') 792 count--; 793 794 mutex_lock(&clocksource_mutex); 795 796 if (count > 0) 797 memcpy(override_name, buf, count); 798 override_name[count] = 0; 799 clocksource_select(); 800 801 mutex_unlock(&clocksource_mutex); 802 803 return ret; 804 } 805 806 /** 807 * sysfs_show_available_clocksources - sysfs interface for listing clocksource 808 * @dev: unused 809 * @buf: char buffer to be filled with clocksource list 810 * 811 * Provides sysfs interface for listing registered clocksources 812 */ 813 static ssize_t 814 sysfs_show_available_clocksources(struct sys_device *dev, 815 struct sysdev_attribute *attr, 816 char *buf) 817 { 818 struct clocksource *src; 819 ssize_t count = 0; 820 821 mutex_lock(&clocksource_mutex); 822 list_for_each_entry(src, &clocksource_list, list) { 823 /* 824 * Don't show non-HRES clocksource if the tick code is 825 * in one shot mode (highres=on or nohz=on) 826 */ 827 if (!tick_oneshot_mode_active() || 828 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) 829 count += snprintf(buf + count, 830 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), 831 "%s ", src->name); 832 } 833 mutex_unlock(&clocksource_mutex); 834 835 count += snprintf(buf + count, 836 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); 837 838 return count; 839 } 840 841 /* 842 * Sysfs setup bits: 843 */ 844 static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, 845 sysfs_override_clocksource); 846 847 static SYSDEV_ATTR(available_clocksource, 0444, 848 sysfs_show_available_clocksources, NULL); 849 850 static struct sysdev_class clocksource_sysclass = { 851 .name = "clocksource", 852 }; 853 854 static struct sys_device device_clocksource = { 855 .id = 0, 856 .cls = &clocksource_sysclass, 857 }; 858 859 static int __init init_clocksource_sysfs(void) 860 { 861 int error = sysdev_class_register(&clocksource_sysclass); 862 863 if (!error) 864 error = sysdev_register(&device_clocksource); 865 if (!error) 866 error = sysdev_create_file( 867 &device_clocksource, 868 &attr_current_clocksource); 869 if (!error) 870 error = sysdev_create_file( 871 &device_clocksource, 872 &attr_available_clocksource); 873 return error; 874 } 875 876 device_initcall(init_clocksource_sysfs); 877 #endif /* CONFIG_SYSFS */ 878 879 /** 880 * boot_override_clocksource - boot clock override 881 * @str: override name 882 * 883 * Takes a clocksource= boot argument and uses it 884 * as the clocksource override name. 885 */ 886 static int __init boot_override_clocksource(char* str) 887 { 888 mutex_lock(&clocksource_mutex); 889 if (str) 890 strlcpy(override_name, str, sizeof(override_name)); 891 mutex_unlock(&clocksource_mutex); 892 return 1; 893 } 894 895 __setup("clocksource=", boot_override_clocksource); 896 897 /** 898 * boot_override_clock - Compatibility layer for deprecated boot option 899 * @str: override name 900 * 901 * DEPRECATED! Takes a clock= boot argument and uses it 902 * as the clocksource override name 903 */ 904 static int __init boot_override_clock(char* str) 905 { 906 if (!strcmp(str, "pmtmr")) { 907 printk("Warning: clock=pmtmr is deprecated. " 908 "Use clocksource=acpi_pm.\n"); 909 return boot_override_clocksource("acpi_pm"); 910 } 911 printk("Warning! clock= boot option is deprecated. " 912 "Use clocksource=xyz\n"); 913 return boot_override_clocksource(str); 914 } 915 916 __setup("clock=", boot_override_clock); 917