1 #include <linux/init.h> 2 3 #include <linux/mm.h> 4 #include <linux/spinlock.h> 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/export.h> 8 #include <linux/cpu.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/mmu_context.h> 12 #include <asm/cache.h> 13 #include <asm/apic.h> 14 #include <asm/uv/uv.h> 15 #include <linux/debugfs.h> 16 17 /* 18 * TLB flushing, formerly SMP-only 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 32 33 34 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 35 u16 *new_asid, bool *need_flush) 36 { 37 u16 asid; 38 39 if (!static_cpu_has(X86_FEATURE_PCID)) { 40 *new_asid = 0; 41 *need_flush = true; 42 return; 43 } 44 45 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 46 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != 47 next->context.ctx_id) 48 continue; 49 50 *new_asid = asid; 51 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < 52 next_tlb_gen); 53 return; 54 } 55 56 /* 57 * We don't currently own an ASID slot on this CPU. 58 * Allocate a slot. 59 */ 60 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; 61 if (*new_asid >= TLB_NR_DYN_ASIDS) { 62 *new_asid = 0; 63 this_cpu_write(cpu_tlbstate.next_asid, 1); 64 } 65 *need_flush = true; 66 } 67 68 void leave_mm(int cpu) 69 { 70 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 71 72 /* 73 * It's plausible that we're in lazy TLB mode while our mm is init_mm. 74 * If so, our callers still expect us to flush the TLB, but there 75 * aren't any user TLB entries in init_mm to worry about. 76 * 77 * This needs to happen before any other sanity checks due to 78 * intel_idle's shenanigans. 79 */ 80 if (loaded_mm == &init_mm) 81 return; 82 83 /* Warn if we're not lazy. */ 84 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); 85 86 switch_mm(NULL, &init_mm, NULL); 87 } 88 89 void switch_mm(struct mm_struct *prev, struct mm_struct *next, 90 struct task_struct *tsk) 91 { 92 unsigned long flags; 93 94 local_irq_save(flags); 95 switch_mm_irqs_off(prev, next, tsk); 96 local_irq_restore(flags); 97 } 98 99 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 100 struct task_struct *tsk) 101 { 102 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); 103 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 104 unsigned cpu = smp_processor_id(); 105 u64 next_tlb_gen; 106 107 /* 108 * NB: The scheduler will call us with prev == next when switching 109 * from lazy TLB mode to normal mode if active_mm isn't changing. 110 * When this happens, we don't assume that CR3 (and hence 111 * cpu_tlbstate.loaded_mm) matches next. 112 * 113 * NB: leave_mm() calls us with prev == NULL and tsk == NULL. 114 */ 115 116 /* We don't want flush_tlb_func_* to run concurrently with us. */ 117 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 118 WARN_ON_ONCE(!irqs_disabled()); 119 120 /* 121 * Verify that CR3 is what we think it is. This will catch 122 * hypothetical buggy code that directly switches to swapper_pg_dir 123 * without going through leave_mm() / switch_mm_irqs_off() or that 124 * does something like write_cr3(read_cr3_pa()). 125 * 126 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() 127 * isn't free. 128 */ 129 #ifdef CONFIG_DEBUG_VM 130 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { 131 /* 132 * If we were to BUG here, we'd be very likely to kill 133 * the system so hard that we don't see the call trace. 134 * Try to recover instead by ignoring the error and doing 135 * a global flush to minimize the chance of corruption. 136 * 137 * (This is far from being a fully correct recovery. 138 * Architecturally, the CPU could prefetch something 139 * back into an incorrect ASID slot and leave it there 140 * to cause trouble down the road. It's better than 141 * nothing, though.) 142 */ 143 __flush_tlb_all(); 144 } 145 #endif 146 this_cpu_write(cpu_tlbstate.is_lazy, false); 147 148 if (real_prev == next) { 149 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 150 next->context.ctx_id); 151 152 /* 153 * We don't currently support having a real mm loaded without 154 * our cpu set in mm_cpumask(). We have all the bookkeeping 155 * in place to figure out whether we would need to flush 156 * if our cpu were cleared in mm_cpumask(), but we don't 157 * currently use it. 158 */ 159 if (WARN_ON_ONCE(real_prev != &init_mm && 160 !cpumask_test_cpu(cpu, mm_cpumask(next)))) 161 cpumask_set_cpu(cpu, mm_cpumask(next)); 162 163 return; 164 } else { 165 u16 new_asid; 166 bool need_flush; 167 168 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 169 /* 170 * If our current stack is in vmalloc space and isn't 171 * mapped in the new pgd, we'll double-fault. Forcibly 172 * map it. 173 */ 174 unsigned int index = pgd_index(current_stack_pointer); 175 pgd_t *pgd = next->pgd + index; 176 177 if (unlikely(pgd_none(*pgd))) 178 set_pgd(pgd, init_mm.pgd[index]); 179 } 180 181 /* Stop remote flushes for the previous mm */ 182 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && 183 real_prev != &init_mm); 184 cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); 185 186 /* 187 * Start remote flushes and then read tlb_gen. 188 */ 189 cpumask_set_cpu(cpu, mm_cpumask(next)); 190 next_tlb_gen = atomic64_read(&next->context.tlb_gen); 191 192 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); 193 194 if (need_flush) { 195 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 196 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 197 write_cr3(build_cr3(next, new_asid)); 198 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 199 TLB_FLUSH_ALL); 200 } else { 201 /* The new ASID is already up to date. */ 202 write_cr3(build_cr3_noflush(next, new_asid)); 203 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); 204 } 205 206 this_cpu_write(cpu_tlbstate.loaded_mm, next); 207 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); 208 } 209 210 load_mm_cr4(next); 211 switch_ldt(real_prev, next); 212 } 213 214 /* 215 * Please ignore the name of this function. It should be called 216 * switch_to_kernel_thread(). 217 * 218 * enter_lazy_tlb() is a hint from the scheduler that we are entering a 219 * kernel thread or other context without an mm. Acceptable implementations 220 * include doing nothing whatsoever, switching to init_mm, or various clever 221 * lazy tricks to try to minimize TLB flushes. 222 * 223 * The scheduler reserves the right to call enter_lazy_tlb() several times 224 * in a row. It will notify us that we're going back to a real mm by 225 * calling switch_mm_irqs_off(). 226 */ 227 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 228 { 229 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) 230 return; 231 232 if (tlb_defer_switch_to_init_mm()) { 233 /* 234 * There's a significant optimization that may be possible 235 * here. We have accurate enough TLB flush tracking that we 236 * don't need to maintain coherence of TLB per se when we're 237 * lazy. We do, however, need to maintain coherence of 238 * paging-structure caches. We could, in principle, leave our 239 * old mm loaded and only switch to init_mm when 240 * tlb_remove_page() happens. 241 */ 242 this_cpu_write(cpu_tlbstate.is_lazy, true); 243 } else { 244 switch_mm(NULL, &init_mm, NULL); 245 } 246 } 247 248 /* 249 * Call this when reinitializing a CPU. It fixes the following potential 250 * problems: 251 * 252 * - The ASID changed from what cpu_tlbstate thinks it is (most likely 253 * because the CPU was taken down and came back up with CR3's PCID 254 * bits clear. CPU hotplug can do this. 255 * 256 * - The TLB contains junk in slots corresponding to inactive ASIDs. 257 * 258 * - The CPU went so far out to lunch that it may have missed a TLB 259 * flush. 260 */ 261 void initialize_tlbstate_and_flush(void) 262 { 263 int i; 264 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); 265 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); 266 unsigned long cr3 = __read_cr3(); 267 268 /* Assert that CR3 already references the right mm. */ 269 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); 270 271 /* 272 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization 273 * doesn't work like other CR4 bits because it can only be set from 274 * long mode.) 275 */ 276 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && 277 !(cr4_read_shadow() & X86_CR4_PCIDE)); 278 279 /* Force ASID 0 and force a TLB flush. */ 280 write_cr3(build_cr3(mm, 0)); 281 282 /* Reinitialize tlbstate. */ 283 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 284 this_cpu_write(cpu_tlbstate.next_asid, 1); 285 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); 286 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); 287 288 for (i = 1; i < TLB_NR_DYN_ASIDS; i++) 289 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); 290 } 291 292 /* 293 * flush_tlb_func_common()'s memory ordering requirement is that any 294 * TLB fills that happen after we flush the TLB are ordered after we 295 * read active_mm's tlb_gen. We don't need any explicit barriers 296 * because all x86 flush operations are serializing and the 297 * atomic64_read operation won't be reordered by the compiler. 298 */ 299 static void flush_tlb_func_common(const struct flush_tlb_info *f, 300 bool local, enum tlb_flush_reason reason) 301 { 302 /* 303 * We have three different tlb_gen values in here. They are: 304 * 305 * - mm_tlb_gen: the latest generation. 306 * - local_tlb_gen: the generation that this CPU has already caught 307 * up to. 308 * - f->new_tlb_gen: the generation that the requester of the flush 309 * wants us to catch up to. 310 */ 311 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 312 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 313 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); 314 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); 315 316 /* This code cannot presently handle being reentered. */ 317 VM_WARN_ON(!irqs_disabled()); 318 319 if (unlikely(loaded_mm == &init_mm)) 320 return; 321 322 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != 323 loaded_mm->context.ctx_id); 324 325 if (this_cpu_read(cpu_tlbstate.is_lazy)) { 326 /* 327 * We're in lazy mode. We need to at least flush our 328 * paging-structure cache to avoid speculatively reading 329 * garbage into our TLB. Since switching to init_mm is barely 330 * slower than a minimal flush, just switch to init_mm. 331 */ 332 switch_mm_irqs_off(NULL, &init_mm, NULL); 333 return; 334 } 335 336 if (unlikely(local_tlb_gen == mm_tlb_gen)) { 337 /* 338 * There's nothing to do: we're already up to date. This can 339 * happen if two concurrent flushes happen -- the first flush to 340 * be handled can catch us all the way up, leaving no work for 341 * the second flush. 342 */ 343 trace_tlb_flush(reason, 0); 344 return; 345 } 346 347 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); 348 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); 349 350 /* 351 * If we get to this point, we know that our TLB is out of date. 352 * This does not strictly imply that we need to flush (it's 353 * possible that f->new_tlb_gen <= local_tlb_gen), but we're 354 * going to need to flush in the very near future, so we might 355 * as well get it over with. 356 * 357 * The only question is whether to do a full or partial flush. 358 * 359 * We do a partial flush if requested and two extra conditions 360 * are met: 361 * 362 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that 363 * we've always done all needed flushes to catch up to 364 * local_tlb_gen. If, for example, local_tlb_gen == 2 and 365 * f->new_tlb_gen == 3, then we know that the flush needed to bring 366 * us up to date for tlb_gen 3 is the partial flush we're 367 * processing. 368 * 369 * As an example of why this check is needed, suppose that there 370 * are two concurrent flushes. The first is a full flush that 371 * changes context.tlb_gen from 1 to 2. The second is a partial 372 * flush that changes context.tlb_gen from 2 to 3. If they get 373 * processed on this CPU in reverse order, we'll see 374 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. 375 * If we were to use __flush_tlb_single() and set local_tlb_gen to 376 * 3, we'd be break the invariant: we'd update local_tlb_gen above 377 * 1 without the full flush that's needed for tlb_gen 2. 378 * 379 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation. 380 * Partial TLB flushes are not all that much cheaper than full TLB 381 * flushes, so it seems unlikely that it would be a performance win 382 * to do a partial flush if that won't bring our TLB fully up to 383 * date. By doing a full flush instead, we can increase 384 * local_tlb_gen all the way to mm_tlb_gen and we can probably 385 * avoid another flush in the very near future. 386 */ 387 if (f->end != TLB_FLUSH_ALL && 388 f->new_tlb_gen == local_tlb_gen + 1 && 389 f->new_tlb_gen == mm_tlb_gen) { 390 /* Partial flush */ 391 unsigned long addr; 392 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT; 393 394 addr = f->start; 395 while (addr < f->end) { 396 __flush_tlb_single(addr); 397 addr += PAGE_SIZE; 398 } 399 if (local) 400 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages); 401 trace_tlb_flush(reason, nr_pages); 402 } else { 403 /* Full flush. */ 404 local_flush_tlb(); 405 if (local) 406 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 407 trace_tlb_flush(reason, TLB_FLUSH_ALL); 408 } 409 410 /* Both paths above update our state to mm_tlb_gen. */ 411 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); 412 } 413 414 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) 415 { 416 const struct flush_tlb_info *f = info; 417 418 flush_tlb_func_common(f, true, reason); 419 } 420 421 static void flush_tlb_func_remote(void *info) 422 { 423 const struct flush_tlb_info *f = info; 424 425 inc_irq_stat(irq_tlb_count); 426 427 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) 428 return; 429 430 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 431 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); 432 } 433 434 void native_flush_tlb_others(const struct cpumask *cpumask, 435 const struct flush_tlb_info *info) 436 { 437 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 438 if (info->end == TLB_FLUSH_ALL) 439 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); 440 else 441 trace_tlb_flush(TLB_REMOTE_SEND_IPI, 442 (info->end - info->start) >> PAGE_SHIFT); 443 444 if (is_uv_system()) { 445 /* 446 * This whole special case is confused. UV has a "Broadcast 447 * Assist Unit", which seems to be a fancy way to send IPIs. 448 * Back when x86 used an explicit TLB flush IPI, UV was 449 * optimized to use its own mechanism. These days, x86 uses 450 * smp_call_function_many(), but UV still uses a manual IPI, 451 * and that IPI's action is out of date -- it does a manual 452 * flush instead of calling flush_tlb_func_remote(). This 453 * means that the percpu tlb_gen variables won't be updated 454 * and we'll do pointless flushes on future context switches. 455 * 456 * Rather than hooking native_flush_tlb_others() here, I think 457 * that UV should be updated so that smp_call_function_many(), 458 * etc, are optimal on UV. 459 */ 460 unsigned int cpu; 461 462 cpu = smp_processor_id(); 463 cpumask = uv_flush_tlb_others(cpumask, info); 464 if (cpumask) 465 smp_call_function_many(cpumask, flush_tlb_func_remote, 466 (void *)info, 1); 467 return; 468 } 469 smp_call_function_many(cpumask, flush_tlb_func_remote, 470 (void *)info, 1); 471 } 472 473 /* 474 * See Documentation/x86/tlb.txt for details. We choose 33 475 * because it is large enough to cover the vast majority (at 476 * least 95%) of allocations, and is small enough that we are 477 * confident it will not cause too much overhead. Each single 478 * flush is about 100 ns, so this caps the maximum overhead at 479 * _about_ 3,000 ns. 480 * 481 * This is in units of pages. 482 */ 483 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 484 485 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 486 unsigned long end, unsigned long vmflag) 487 { 488 int cpu; 489 490 struct flush_tlb_info info = { 491 .mm = mm, 492 }; 493 494 cpu = get_cpu(); 495 496 /* This is also a barrier that synchronizes with switch_mm(). */ 497 info.new_tlb_gen = inc_mm_tlb_gen(mm); 498 499 /* Should we flush just the requested range? */ 500 if ((end != TLB_FLUSH_ALL) && 501 !(vmflag & VM_HUGETLB) && 502 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) { 503 info.start = start; 504 info.end = end; 505 } else { 506 info.start = 0UL; 507 info.end = TLB_FLUSH_ALL; 508 } 509 510 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { 511 VM_WARN_ON(irqs_disabled()); 512 local_irq_disable(); 513 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); 514 local_irq_enable(); 515 } 516 517 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) 518 flush_tlb_others(mm_cpumask(mm), &info); 519 520 put_cpu(); 521 } 522 523 524 static void do_flush_tlb_all(void *info) 525 { 526 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 527 __flush_tlb_all(); 528 } 529 530 void flush_tlb_all(void) 531 { 532 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 533 on_each_cpu(do_flush_tlb_all, NULL, 1); 534 } 535 536 static void do_kernel_range_flush(void *info) 537 { 538 struct flush_tlb_info *f = info; 539 unsigned long addr; 540 541 /* flush range by one by one 'invlpg' */ 542 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 543 __flush_tlb_single(addr); 544 } 545 546 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 547 { 548 549 /* Balance as user space task's flush, a bit conservative */ 550 if (end == TLB_FLUSH_ALL || 551 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { 552 on_each_cpu(do_flush_tlb_all, NULL, 1); 553 } else { 554 struct flush_tlb_info info; 555 info.start = start; 556 info.end = end; 557 on_each_cpu(do_kernel_range_flush, &info, 1); 558 } 559 } 560 561 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 562 { 563 struct flush_tlb_info info = { 564 .mm = NULL, 565 .start = 0UL, 566 .end = TLB_FLUSH_ALL, 567 }; 568 569 int cpu = get_cpu(); 570 571 if (cpumask_test_cpu(cpu, &batch->cpumask)) { 572 VM_WARN_ON(irqs_disabled()); 573 local_irq_disable(); 574 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); 575 local_irq_enable(); 576 } 577 578 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) 579 flush_tlb_others(&batch->cpumask, &info); 580 581 cpumask_clear(&batch->cpumask); 582 583 put_cpu(); 584 } 585 586 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 587 size_t count, loff_t *ppos) 588 { 589 char buf[32]; 590 unsigned int len; 591 592 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 593 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 594 } 595 596 static ssize_t tlbflush_write_file(struct file *file, 597 const char __user *user_buf, size_t count, loff_t *ppos) 598 { 599 char buf[32]; 600 ssize_t len; 601 int ceiling; 602 603 len = min(count, sizeof(buf) - 1); 604 if (copy_from_user(buf, user_buf, len)) 605 return -EFAULT; 606 607 buf[len] = '\0'; 608 if (kstrtoint(buf, 0, &ceiling)) 609 return -EINVAL; 610 611 if (ceiling < 0) 612 return -EINVAL; 613 614 tlb_single_page_flush_ceiling = ceiling; 615 return count; 616 } 617 618 static const struct file_operations fops_tlbflush = { 619 .read = tlbflush_read_file, 620 .write = tlbflush_write_file, 621 .llseek = default_llseek, 622 }; 623 624 static int __init create_tlb_single_page_flush_ceiling(void) 625 { 626 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 627 arch_debugfs_dir, NULL, &fops_tlbflush); 628 return 0; 629 } 630 late_initcall(create_tlb_single_page_flush_ceiling); 631