1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 * 6 * KVM Xen emulation 7 */ 8 9 #include "x86.h" 10 #include "xen.h" 11 #include "hyperv.h" 12 #include "lapic.h" 13 14 #include <linux/eventfd.h> 15 #include <linux/kvm_host.h> 16 #include <linux/sched/stat.h> 17 18 #include <trace/events/kvm.h> 19 #include <xen/interface/xen.h> 20 #include <xen/interface/vcpu.h> 21 #include <xen/interface/version.h> 22 #include <xen/interface/event_channel.h> 23 #include <xen/interface/sched.h> 24 25 #include "trace.h" 26 27 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm); 28 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 29 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); 30 31 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); 32 33 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) 34 { 35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 36 struct pvclock_wall_clock *wc; 37 gpa_t gpa = gfn_to_gpa(gfn); 38 u32 *wc_sec_hi; 39 u32 wc_version; 40 u64 wall_nsec; 41 int ret = 0; 42 int idx = srcu_read_lock(&kvm->srcu); 43 44 if (gfn == KVM_XEN_INVALID_GFN) { 45 kvm_gpc_deactivate(gpc); 46 goto out; 47 } 48 49 do { 50 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); 51 if (ret) 52 goto out; 53 54 /* 55 * This code mirrors kvm_write_wall_clock() except that it writes 56 * directly through the pfn cache and doesn't mark the page dirty. 57 */ 58 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 59 60 /* It could be invalid again already, so we need to check */ 61 read_lock_irq(&gpc->lock); 62 63 if (gpc->valid) 64 break; 65 66 read_unlock_irq(&gpc->lock); 67 } while (1); 68 69 /* Paranoia checks on the 32-bit struct layout */ 70 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); 71 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); 72 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 73 74 #ifdef CONFIG_X86_64 75 /* Paranoia checks on the 64-bit struct layout */ 76 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); 77 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); 78 79 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 80 struct shared_info *shinfo = gpc->khva; 81 82 wc_sec_hi = &shinfo->wc_sec_hi; 83 wc = &shinfo->wc; 84 } else 85 #endif 86 { 87 struct compat_shared_info *shinfo = gpc->khva; 88 89 wc_sec_hi = &shinfo->arch.wc_sec_hi; 90 wc = &shinfo->wc; 91 } 92 93 /* Increment and ensure an odd value */ 94 wc_version = wc->version = (wc->version + 1) | 1; 95 smp_wmb(); 96 97 wc->nsec = do_div(wall_nsec, 1000000000); 98 wc->sec = (u32)wall_nsec; 99 *wc_sec_hi = wall_nsec >> 32; 100 smp_wmb(); 101 102 wc->version = wc_version + 1; 103 read_unlock_irq(&gpc->lock); 104 105 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); 106 107 out: 108 srcu_read_unlock(&kvm->srcu, idx); 109 return ret; 110 } 111 112 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) 113 { 114 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { 115 struct kvm_xen_evtchn e; 116 117 e.vcpu_id = vcpu->vcpu_id; 118 e.vcpu_idx = vcpu->vcpu_idx; 119 e.port = vcpu->arch.xen.timer_virq; 120 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 121 122 kvm_xen_set_evtchn(&e, vcpu->kvm); 123 124 vcpu->arch.xen.timer_expires = 0; 125 atomic_set(&vcpu->arch.xen.timer_pending, 0); 126 } 127 } 128 129 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer) 130 { 131 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu, 132 arch.xen.timer); 133 if (atomic_read(&vcpu->arch.xen.timer_pending)) 134 return HRTIMER_NORESTART; 135 136 atomic_inc(&vcpu->arch.xen.timer_pending); 137 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 138 kvm_vcpu_kick(vcpu); 139 140 return HRTIMER_NORESTART; 141 } 142 143 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns) 144 { 145 atomic_set(&vcpu->arch.xen.timer_pending, 0); 146 vcpu->arch.xen.timer_expires = guest_abs; 147 148 if (delta_ns <= 0) { 149 xen_timer_callback(&vcpu->arch.xen.timer); 150 } else { 151 ktime_t ktime_now = ktime_get(); 152 hrtimer_start(&vcpu->arch.xen.timer, 153 ktime_add_ns(ktime_now, delta_ns), 154 HRTIMER_MODE_ABS_HARD); 155 } 156 } 157 158 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu) 159 { 160 hrtimer_cancel(&vcpu->arch.xen.timer); 161 vcpu->arch.xen.timer_expires = 0; 162 atomic_set(&vcpu->arch.xen.timer_pending, 0); 163 } 164 165 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu) 166 { 167 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, 168 HRTIMER_MODE_ABS_HARD); 169 vcpu->arch.xen.timer.function = xen_timer_callback; 170 } 171 172 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) 173 { 174 struct kvm_vcpu_xen *vx = &v->arch.xen; 175 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache; 176 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; 177 size_t user_len, user_len1, user_len2; 178 struct vcpu_runstate_info rs; 179 unsigned long flags; 180 size_t times_ofs; 181 uint8_t *update_bit = NULL; 182 uint64_t entry_time; 183 uint64_t *rs_times; 184 int *rs_state; 185 186 /* 187 * The only difference between 32-bit and 64-bit versions of the 188 * runstate struct is the alignment of uint64_t in 32-bit, which 189 * means that the 64-bit version has an additional 4 bytes of 190 * padding after the first field 'state'. Let's be really really 191 * paranoid about that, and matching it with our internal data 192 * structures that we memcpy into it... 193 */ 194 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0); 195 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0); 196 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); 197 #ifdef CONFIG_X86_64 198 /* 199 * The 64-bit structure has 4 bytes of padding before 'state_entry_time' 200 * so each subsequent field is shifted by 4, and it's 4 bytes longer. 201 */ 202 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 203 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); 204 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != 205 offsetof(struct compat_vcpu_runstate_info, time) + 4); 206 BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4); 207 #endif 208 /* 209 * The state field is in the same place at the start of both structs, 210 * and is the same size (int) as vx->current_runstate. 211 */ 212 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 213 offsetof(struct compat_vcpu_runstate_info, state)); 214 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) != 215 sizeof(vx->current_runstate)); 216 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) != 217 sizeof(vx->current_runstate)); 218 219 /* 220 * The state_entry_time field is 64 bits in both versions, and the 221 * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86 222 * is little-endian means that it's in the last *byte* of the word. 223 * That detail is important later. 224 */ 225 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != 226 sizeof(uint64_t)); 227 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != 228 sizeof(uint64_t)); 229 BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80); 230 231 /* 232 * The time array is four 64-bit quantities in both versions, matching 233 * the vx->runstate_times and immediately following state_entry_time. 234 */ 235 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 236 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t)); 237 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != 238 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t)); 239 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 240 sizeof_field(struct compat_vcpu_runstate_info, time)); 241 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 242 sizeof(vx->runstate_times)); 243 244 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { 245 user_len = sizeof(struct vcpu_runstate_info); 246 times_ofs = offsetof(struct vcpu_runstate_info, 247 state_entry_time); 248 } else { 249 user_len = sizeof(struct compat_vcpu_runstate_info); 250 times_ofs = offsetof(struct compat_vcpu_runstate_info, 251 state_entry_time); 252 } 253 254 /* 255 * There are basically no alignment constraints. The guest can set it 256 * up so it crosses from one page to the next, and at arbitrary byte 257 * alignment (and the 32-bit ABI doesn't align the 64-bit integers 258 * anyway, even if the overall struct had been 64-bit aligned). 259 */ 260 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { 261 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); 262 user_len2 = user_len - user_len1; 263 } else { 264 user_len1 = user_len; 265 user_len2 = 0; 266 } 267 BUG_ON(user_len1 + user_len2 != user_len); 268 269 retry: 270 /* 271 * Attempt to obtain the GPC lock on *both* (if there are two) 272 * gfn_to_pfn caches that cover the region. 273 */ 274 read_lock_irqsave(&gpc1->lock, flags); 275 while (!kvm_gpc_check(gpc1, user_len1)) { 276 read_unlock_irqrestore(&gpc1->lock, flags); 277 278 /* When invoked from kvm_sched_out() we cannot sleep */ 279 if (atomic) 280 return; 281 282 if (kvm_gpc_refresh(gpc1, user_len1)) 283 return; 284 285 read_lock_irqsave(&gpc1->lock, flags); 286 } 287 288 if (likely(!user_len2)) { 289 /* 290 * Set up three pointers directly to the runstate_info 291 * struct in the guest (via the GPC). 292 * 293 * • @rs_state → state field 294 * • @rs_times → state_entry_time field. 295 * • @update_bit → last byte of state_entry_time, which 296 * contains the XEN_RUNSTATE_UPDATE bit. 297 */ 298 rs_state = gpc1->khva; 299 rs_times = gpc1->khva + times_ofs; 300 if (v->kvm->arch.xen.runstate_update_flag) 301 update_bit = ((void *)(&rs_times[1])) - 1; 302 } else { 303 /* 304 * The guest's runstate_info is split across two pages and we 305 * need to hold and validate both GPCs simultaneously. We can 306 * declare a lock ordering GPC1 > GPC2 because nothing else 307 * takes them more than one at a time. 308 */ 309 read_lock(&gpc2->lock); 310 311 if (!kvm_gpc_check(gpc2, user_len2)) { 312 read_unlock(&gpc2->lock); 313 read_unlock_irqrestore(&gpc1->lock, flags); 314 315 /* When invoked from kvm_sched_out() we cannot sleep */ 316 if (atomic) 317 return; 318 319 /* 320 * Use kvm_gpc_activate() here because if the runstate 321 * area was configured in 32-bit mode and only extends 322 * to the second page now because the guest changed to 323 * 64-bit mode, the second GPC won't have been set up. 324 */ 325 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, 326 user_len2)) 327 return; 328 329 /* 330 * We dropped the lock on GPC1 so we have to go all the 331 * way back and revalidate that too. 332 */ 333 goto retry; 334 } 335 336 /* 337 * In this case, the runstate_info struct will be assembled on 338 * the kernel stack (compat or not as appropriate) and will 339 * be copied to GPC1/GPC2 with a dual memcpy. Set up the three 340 * rs pointers accordingly. 341 */ 342 rs_times = &rs.state_entry_time; 343 344 /* 345 * The rs_state pointer points to the start of what we'll 346 * copy to the guest, which in the case of a compat guest 347 * is the 32-bit field that the compiler thinks is padding. 348 */ 349 rs_state = ((void *)rs_times) - times_ofs; 350 351 /* 352 * The update_bit is still directly in the guest memory, 353 * via one GPC or the other. 354 */ 355 if (v->kvm->arch.xen.runstate_update_flag) { 356 if (user_len1 >= times_ofs + sizeof(uint64_t)) 357 update_bit = gpc1->khva + times_ofs + 358 sizeof(uint64_t) - 1; 359 else 360 update_bit = gpc2->khva + times_ofs + 361 sizeof(uint64_t) - 1 - user_len1; 362 } 363 364 #ifdef CONFIG_X86_64 365 /* 366 * Don't leak kernel memory through the padding in the 64-bit 367 * version of the struct. 368 */ 369 memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time)); 370 #endif 371 } 372 373 /* 374 * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the 375 * state_entry_time field, directly in the guest. We need to set 376 * that (and write-barrier) before writing to the rest of the 377 * structure, and clear it last. Just as Xen does, we address the 378 * single *byte* in which it resides because it might be in a 379 * different cache line to the rest of the 64-bit word, due to 380 * the (lack of) alignment constraints. 381 */ 382 entry_time = vx->runstate_entry_time; 383 if (update_bit) { 384 entry_time |= XEN_RUNSTATE_UPDATE; 385 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56; 386 smp_wmb(); 387 } 388 389 /* 390 * Now assemble the actual structure, either on our kernel stack 391 * or directly in the guest according to how the rs_state and 392 * rs_times pointers were set up above. 393 */ 394 *rs_state = vx->current_runstate; 395 rs_times[0] = entry_time; 396 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); 397 398 /* For the split case, we have to then copy it to the guest. */ 399 if (user_len2) { 400 memcpy(gpc1->khva, rs_state, user_len1); 401 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2); 402 } 403 smp_wmb(); 404 405 /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */ 406 if (update_bit) { 407 entry_time &= ~XEN_RUNSTATE_UPDATE; 408 *update_bit = entry_time >> 56; 409 smp_wmb(); 410 } 411 412 if (user_len2) 413 read_unlock(&gpc2->lock); 414 415 read_unlock_irqrestore(&gpc1->lock, flags); 416 417 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); 418 if (user_len2) 419 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); 420 } 421 422 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) 423 { 424 struct kvm_vcpu_xen *vx = &v->arch.xen; 425 u64 now = get_kvmclock_ns(v->kvm); 426 u64 delta_ns = now - vx->runstate_entry_time; 427 u64 run_delay = current->sched_info.run_delay; 428 429 if (unlikely(!vx->runstate_entry_time)) 430 vx->current_runstate = RUNSTATE_offline; 431 432 /* 433 * Time waiting for the scheduler isn't "stolen" if the 434 * vCPU wasn't running anyway. 435 */ 436 if (vx->current_runstate == RUNSTATE_running) { 437 u64 steal_ns = run_delay - vx->last_steal; 438 439 delta_ns -= steal_ns; 440 441 vx->runstate_times[RUNSTATE_runnable] += steal_ns; 442 } 443 vx->last_steal = run_delay; 444 445 vx->runstate_times[vx->current_runstate] += delta_ns; 446 vx->current_runstate = state; 447 vx->runstate_entry_time = now; 448 449 if (vx->runstate_cache.active) 450 kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable); 451 } 452 453 static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) 454 { 455 struct kvm_lapic_irq irq = { }; 456 int r; 457 458 irq.dest_id = v->vcpu_id; 459 irq.vector = v->arch.xen.upcall_vector; 460 irq.dest_mode = APIC_DEST_PHYSICAL; 461 irq.shorthand = APIC_DEST_NOSHORT; 462 irq.delivery_mode = APIC_DM_FIXED; 463 irq.level = 1; 464 465 /* The fast version will always work for physical unicast */ 466 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); 467 } 468 469 /* 470 * On event channel delivery, the vcpu_info may not have been accessible. 471 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which 472 * need to be marked into the vcpu_info (and evtchn_upcall_pending set). 473 * Do so now that we can sleep in the context of the vCPU to bring the 474 * page in, and refresh the pfn cache for it. 475 */ 476 void kvm_xen_inject_pending_events(struct kvm_vcpu *v) 477 { 478 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); 479 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 480 unsigned long flags; 481 482 if (!evtchn_pending_sel) 483 return; 484 485 /* 486 * Yes, this is an open-coded loop. But that's just what put_user() 487 * does anyway. Page it in and retry the instruction. We're just a 488 * little more honest about it. 489 */ 490 read_lock_irqsave(&gpc->lock, flags); 491 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 492 read_unlock_irqrestore(&gpc->lock, flags); 493 494 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) 495 return; 496 497 read_lock_irqsave(&gpc->lock, flags); 498 } 499 500 /* Now gpc->khva is a valid kernel address for the vcpu_info */ 501 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { 502 struct vcpu_info *vi = gpc->khva; 503 504 asm volatile(LOCK_PREFIX "orq %0, %1\n" 505 "notq %0\n" 506 LOCK_PREFIX "andq %0, %2\n" 507 : "=r" (evtchn_pending_sel), 508 "+m" (vi->evtchn_pending_sel), 509 "+m" (v->arch.xen.evtchn_pending_sel) 510 : "0" (evtchn_pending_sel)); 511 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 512 } else { 513 u32 evtchn_pending_sel32 = evtchn_pending_sel; 514 struct compat_vcpu_info *vi = gpc->khva; 515 516 asm volatile(LOCK_PREFIX "orl %0, %1\n" 517 "notl %0\n" 518 LOCK_PREFIX "andl %0, %2\n" 519 : "=r" (evtchn_pending_sel32), 520 "+m" (vi->evtchn_pending_sel), 521 "+m" (v->arch.xen.evtchn_pending_sel) 522 : "0" (evtchn_pending_sel32)); 523 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 524 } 525 read_unlock_irqrestore(&gpc->lock, flags); 526 527 /* For the per-vCPU lapic vector, deliver it as MSI. */ 528 if (v->arch.xen.upcall_vector) 529 kvm_xen_inject_vcpu_vector(v); 530 531 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 532 } 533 534 int __kvm_xen_has_interrupt(struct kvm_vcpu *v) 535 { 536 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 537 unsigned long flags; 538 u8 rc = 0; 539 540 /* 541 * If the global upcall vector (HVMIRQ_callback_vector) is set and 542 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. 543 */ 544 545 /* No need for compat handling here */ 546 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != 547 offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); 548 BUILD_BUG_ON(sizeof(rc) != 549 sizeof_field(struct vcpu_info, evtchn_upcall_pending)); 550 BUILD_BUG_ON(sizeof(rc) != 551 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); 552 553 read_lock_irqsave(&gpc->lock, flags); 554 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 555 read_unlock_irqrestore(&gpc->lock, flags); 556 557 /* 558 * This function gets called from kvm_vcpu_block() after setting the 559 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately 560 * from a HLT. So we really mustn't sleep. If the page ended up absent 561 * at that point, just return 1 in order to trigger an immediate wake, 562 * and we'll end up getting called again from a context where we *can* 563 * fault in the page and wait for it. 564 */ 565 if (in_atomic() || !task_is_running(current)) 566 return 1; 567 568 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) { 569 /* 570 * If this failed, userspace has screwed up the 571 * vcpu_info mapping. No interrupts for you. 572 */ 573 return 0; 574 } 575 read_lock_irqsave(&gpc->lock, flags); 576 } 577 578 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; 579 read_unlock_irqrestore(&gpc->lock, flags); 580 return rc; 581 } 582 583 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 584 { 585 int r = -ENOENT; 586 587 588 switch (data->type) { 589 case KVM_XEN_ATTR_TYPE_LONG_MODE: 590 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { 591 r = -EINVAL; 592 } else { 593 mutex_lock(&kvm->lock); 594 kvm->arch.xen.long_mode = !!data->u.long_mode; 595 mutex_unlock(&kvm->lock); 596 r = 0; 597 } 598 break; 599 600 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 601 mutex_lock(&kvm->lock); 602 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); 603 mutex_unlock(&kvm->lock); 604 break; 605 606 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 607 if (data->u.vector && data->u.vector < 0x10) 608 r = -EINVAL; 609 else { 610 mutex_lock(&kvm->lock); 611 kvm->arch.xen.upcall_vector = data->u.vector; 612 mutex_unlock(&kvm->lock); 613 r = 0; 614 } 615 break; 616 617 case KVM_XEN_ATTR_TYPE_EVTCHN: 618 r = kvm_xen_setattr_evtchn(kvm, data); 619 break; 620 621 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 622 mutex_lock(&kvm->lock); 623 kvm->arch.xen.xen_version = data->u.xen_version; 624 mutex_unlock(&kvm->lock); 625 r = 0; 626 break; 627 628 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: 629 if (!sched_info_on()) { 630 r = -EOPNOTSUPP; 631 break; 632 } 633 mutex_lock(&kvm->lock); 634 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; 635 mutex_unlock(&kvm->lock); 636 r = 0; 637 break; 638 639 default: 640 break; 641 } 642 643 return r; 644 } 645 646 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 647 { 648 int r = -ENOENT; 649 650 mutex_lock(&kvm->lock); 651 652 switch (data->type) { 653 case KVM_XEN_ATTR_TYPE_LONG_MODE: 654 data->u.long_mode = kvm->arch.xen.long_mode; 655 r = 0; 656 break; 657 658 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 659 if (kvm->arch.xen.shinfo_cache.active) 660 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); 661 else 662 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN; 663 r = 0; 664 break; 665 666 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 667 data->u.vector = kvm->arch.xen.upcall_vector; 668 r = 0; 669 break; 670 671 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 672 data->u.xen_version = kvm->arch.xen.xen_version; 673 r = 0; 674 break; 675 676 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: 677 if (!sched_info_on()) { 678 r = -EOPNOTSUPP; 679 break; 680 } 681 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; 682 r = 0; 683 break; 684 685 default: 686 break; 687 } 688 689 mutex_unlock(&kvm->lock); 690 return r; 691 } 692 693 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 694 { 695 int idx, r = -ENOENT; 696 697 mutex_lock(&vcpu->kvm->lock); 698 idx = srcu_read_lock(&vcpu->kvm->srcu); 699 700 switch (data->type) { 701 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 702 /* No compat necessary here. */ 703 BUILD_BUG_ON(sizeof(struct vcpu_info) != 704 sizeof(struct compat_vcpu_info)); 705 BUILD_BUG_ON(offsetof(struct vcpu_info, time) != 706 offsetof(struct compat_vcpu_info, time)); 707 708 if (data->u.gpa == KVM_XEN_INVALID_GPA) { 709 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 710 r = 0; 711 break; 712 } 713 714 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, 715 data->u.gpa, sizeof(struct vcpu_info)); 716 if (!r) 717 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 718 719 break; 720 721 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 722 if (data->u.gpa == KVM_XEN_INVALID_GPA) { 723 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 724 r = 0; 725 break; 726 } 727 728 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, 729 data->u.gpa, 730 sizeof(struct pvclock_vcpu_time_info)); 731 if (!r) 732 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 733 break; 734 735 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: { 736 size_t sz, sz1, sz2; 737 738 if (!sched_info_on()) { 739 r = -EOPNOTSUPP; 740 break; 741 } 742 if (data->u.gpa == KVM_XEN_INVALID_GPA) { 743 r = 0; 744 deactivate_out: 745 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); 746 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 747 break; 748 } 749 750 /* 751 * If the guest switches to 64-bit mode after setting the runstate 752 * address, that's actually OK. kvm_xen_update_runstate_guest() 753 * will cope. 754 */ 755 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) 756 sz = sizeof(struct vcpu_runstate_info); 757 else 758 sz = sizeof(struct compat_vcpu_runstate_info); 759 760 /* How much fits in the (first) page? */ 761 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); 762 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, 763 data->u.gpa, sz1); 764 if (r) 765 goto deactivate_out; 766 767 /* Either map the second page, or deactivate the second GPC */ 768 if (sz1 >= sz) { 769 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 770 } else { 771 sz2 = sz - sz1; 772 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); 773 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, 774 data->u.gpa + sz1, sz2); 775 if (r) 776 goto deactivate_out; 777 } 778 779 kvm_xen_update_runstate_guest(vcpu, false); 780 break; 781 } 782 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 783 if (!sched_info_on()) { 784 r = -EOPNOTSUPP; 785 break; 786 } 787 if (data->u.runstate.state > RUNSTATE_offline) { 788 r = -EINVAL; 789 break; 790 } 791 792 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 793 r = 0; 794 break; 795 796 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 797 if (!sched_info_on()) { 798 r = -EOPNOTSUPP; 799 break; 800 } 801 if (data->u.runstate.state > RUNSTATE_offline) { 802 r = -EINVAL; 803 break; 804 } 805 if (data->u.runstate.state_entry_time != 806 (data->u.runstate.time_running + 807 data->u.runstate.time_runnable + 808 data->u.runstate.time_blocked + 809 data->u.runstate.time_offline)) { 810 r = -EINVAL; 811 break; 812 } 813 if (get_kvmclock_ns(vcpu->kvm) < 814 data->u.runstate.state_entry_time) { 815 r = -EINVAL; 816 break; 817 } 818 819 vcpu->arch.xen.current_runstate = data->u.runstate.state; 820 vcpu->arch.xen.runstate_entry_time = 821 data->u.runstate.state_entry_time; 822 vcpu->arch.xen.runstate_times[RUNSTATE_running] = 823 data->u.runstate.time_running; 824 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = 825 data->u.runstate.time_runnable; 826 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = 827 data->u.runstate.time_blocked; 828 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = 829 data->u.runstate.time_offline; 830 vcpu->arch.xen.last_steal = current->sched_info.run_delay; 831 r = 0; 832 break; 833 834 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 835 if (!sched_info_on()) { 836 r = -EOPNOTSUPP; 837 break; 838 } 839 if (data->u.runstate.state > RUNSTATE_offline && 840 data->u.runstate.state != (u64)-1) { 841 r = -EINVAL; 842 break; 843 } 844 /* The adjustment must add up */ 845 if (data->u.runstate.state_entry_time != 846 (data->u.runstate.time_running + 847 data->u.runstate.time_runnable + 848 data->u.runstate.time_blocked + 849 data->u.runstate.time_offline)) { 850 r = -EINVAL; 851 break; 852 } 853 854 if (get_kvmclock_ns(vcpu->kvm) < 855 (vcpu->arch.xen.runstate_entry_time + 856 data->u.runstate.state_entry_time)) { 857 r = -EINVAL; 858 break; 859 } 860 861 vcpu->arch.xen.runstate_entry_time += 862 data->u.runstate.state_entry_time; 863 vcpu->arch.xen.runstate_times[RUNSTATE_running] += 864 data->u.runstate.time_running; 865 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += 866 data->u.runstate.time_runnable; 867 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += 868 data->u.runstate.time_blocked; 869 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += 870 data->u.runstate.time_offline; 871 872 if (data->u.runstate.state <= RUNSTATE_offline) 873 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 874 else if (vcpu->arch.xen.runstate_cache.active) 875 kvm_xen_update_runstate_guest(vcpu, false); 876 r = 0; 877 break; 878 879 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 880 if (data->u.vcpu_id >= KVM_MAX_VCPUS) 881 r = -EINVAL; 882 else { 883 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; 884 r = 0; 885 } 886 break; 887 888 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 889 if (data->u.timer.port && 890 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { 891 r = -EINVAL; 892 break; 893 } 894 895 if (!vcpu->arch.xen.timer.function) 896 kvm_xen_init_timer(vcpu); 897 898 /* Stop the timer (if it's running) before changing the vector */ 899 kvm_xen_stop_timer(vcpu); 900 vcpu->arch.xen.timer_virq = data->u.timer.port; 901 902 /* Start the timer if the new value has a valid vector+expiry. */ 903 if (data->u.timer.port && data->u.timer.expires_ns) 904 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, 905 data->u.timer.expires_ns - 906 get_kvmclock_ns(vcpu->kvm)); 907 908 r = 0; 909 break; 910 911 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 912 if (data->u.vector && data->u.vector < 0x10) 913 r = -EINVAL; 914 else { 915 vcpu->arch.xen.upcall_vector = data->u.vector; 916 r = 0; 917 } 918 break; 919 920 default: 921 break; 922 } 923 924 srcu_read_unlock(&vcpu->kvm->srcu, idx); 925 mutex_unlock(&vcpu->kvm->lock); 926 return r; 927 } 928 929 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 930 { 931 int r = -ENOENT; 932 933 mutex_lock(&vcpu->kvm->lock); 934 935 switch (data->type) { 936 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 937 if (vcpu->arch.xen.vcpu_info_cache.active) 938 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; 939 else 940 data->u.gpa = KVM_XEN_INVALID_GPA; 941 r = 0; 942 break; 943 944 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 945 if (vcpu->arch.xen.vcpu_time_info_cache.active) 946 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; 947 else 948 data->u.gpa = KVM_XEN_INVALID_GPA; 949 r = 0; 950 break; 951 952 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: 953 if (!sched_info_on()) { 954 r = -EOPNOTSUPP; 955 break; 956 } 957 if (vcpu->arch.xen.runstate_cache.active) { 958 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; 959 r = 0; 960 } 961 break; 962 963 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 964 if (!sched_info_on()) { 965 r = -EOPNOTSUPP; 966 break; 967 } 968 data->u.runstate.state = vcpu->arch.xen.current_runstate; 969 r = 0; 970 break; 971 972 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 973 if (!sched_info_on()) { 974 r = -EOPNOTSUPP; 975 break; 976 } 977 data->u.runstate.state = vcpu->arch.xen.current_runstate; 978 data->u.runstate.state_entry_time = 979 vcpu->arch.xen.runstate_entry_time; 980 data->u.runstate.time_running = 981 vcpu->arch.xen.runstate_times[RUNSTATE_running]; 982 data->u.runstate.time_runnable = 983 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; 984 data->u.runstate.time_blocked = 985 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; 986 data->u.runstate.time_offline = 987 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; 988 r = 0; 989 break; 990 991 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 992 r = -EINVAL; 993 break; 994 995 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 996 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; 997 r = 0; 998 break; 999 1000 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 1001 data->u.timer.port = vcpu->arch.xen.timer_virq; 1002 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 1003 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; 1004 r = 0; 1005 break; 1006 1007 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 1008 data->u.vector = vcpu->arch.xen.upcall_vector; 1009 r = 0; 1010 break; 1011 1012 default: 1013 break; 1014 } 1015 1016 mutex_unlock(&vcpu->kvm->lock); 1017 return r; 1018 } 1019 1020 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 1021 { 1022 struct kvm *kvm = vcpu->kvm; 1023 u32 page_num = data & ~PAGE_MASK; 1024 u64 page_addr = data & PAGE_MASK; 1025 bool lm = is_long_mode(vcpu); 1026 1027 /* Latch long_mode for shared_info pages etc. */ 1028 vcpu->kvm->arch.xen.long_mode = lm; 1029 1030 /* 1031 * If Xen hypercall intercept is enabled, fill the hypercall 1032 * page with VMCALL/VMMCALL instructions since that's what 1033 * we catch. Else the VMM has provided the hypercall pages 1034 * with instructions of its own choosing, so use those. 1035 */ 1036 if (kvm_xen_hypercall_enabled(kvm)) { 1037 u8 instructions[32]; 1038 int i; 1039 1040 if (page_num) 1041 return 1; 1042 1043 /* mov imm32, %eax */ 1044 instructions[0] = 0xb8; 1045 1046 /* vmcall / vmmcall */ 1047 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5); 1048 1049 /* ret */ 1050 instructions[8] = 0xc3; 1051 1052 /* int3 to pad */ 1053 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); 1054 1055 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { 1056 *(u32 *)&instructions[1] = i; 1057 if (kvm_vcpu_write_guest(vcpu, 1058 page_addr + (i * sizeof(instructions)), 1059 instructions, sizeof(instructions))) 1060 return 1; 1061 } 1062 } else { 1063 /* 1064 * Note, truncation is a non-issue as 'lm' is guaranteed to be 1065 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. 1066 */ 1067 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 1068 : kvm->arch.xen_hvm_config.blob_addr_32; 1069 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 1070 : kvm->arch.xen_hvm_config.blob_size_32; 1071 u8 *page; 1072 int ret; 1073 1074 if (page_num >= blob_size) 1075 return 1; 1076 1077 blob_addr += page_num * PAGE_SIZE; 1078 1079 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); 1080 if (IS_ERR(page)) 1081 return PTR_ERR(page); 1082 1083 ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE); 1084 kfree(page); 1085 if (ret) 1086 return 1; 1087 } 1088 return 0; 1089 } 1090 1091 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) 1092 { 1093 /* Only some feature flags need to be *enabled* by userspace */ 1094 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 1095 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 1096 1097 if (xhc->flags & ~permitted_flags) 1098 return -EINVAL; 1099 1100 /* 1101 * With hypercall interception the kernel generates its own 1102 * hypercall page so it must not be provided. 1103 */ 1104 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && 1105 (xhc->blob_addr_32 || xhc->blob_addr_64 || 1106 xhc->blob_size_32 || xhc->blob_size_64)) 1107 return -EINVAL; 1108 1109 mutex_lock(&kvm->lock); 1110 1111 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) 1112 static_branch_inc(&kvm_xen_enabled.key); 1113 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) 1114 static_branch_slow_dec_deferred(&kvm_xen_enabled); 1115 1116 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); 1117 1118 mutex_unlock(&kvm->lock); 1119 return 0; 1120 } 1121 1122 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) 1123 { 1124 kvm_rax_write(vcpu, result); 1125 return kvm_skip_emulated_instruction(vcpu); 1126 } 1127 1128 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1129 { 1130 struct kvm_run *run = vcpu->run; 1131 1132 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) 1133 return 1; 1134 1135 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); 1136 } 1137 1138 static inline int max_evtchn_port(struct kvm *kvm) 1139 { 1140 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) 1141 return EVTCHN_2L_NR_CHANNELS; 1142 else 1143 return COMPAT_EVTCHN_2L_NR_CHANNELS; 1144 } 1145 1146 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, 1147 evtchn_port_t *ports) 1148 { 1149 struct kvm *kvm = vcpu->kvm; 1150 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1151 unsigned long *pending_bits; 1152 unsigned long flags; 1153 bool ret = true; 1154 int idx, i; 1155 1156 idx = srcu_read_lock(&kvm->srcu); 1157 read_lock_irqsave(&gpc->lock, flags); 1158 if (!kvm_gpc_check(gpc, PAGE_SIZE)) 1159 goto out_rcu; 1160 1161 ret = false; 1162 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1163 struct shared_info *shinfo = gpc->khva; 1164 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1165 } else { 1166 struct compat_shared_info *shinfo = gpc->khva; 1167 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1168 } 1169 1170 for (i = 0; i < nr_ports; i++) { 1171 if (test_bit(ports[i], pending_bits)) { 1172 ret = true; 1173 break; 1174 } 1175 } 1176 1177 out_rcu: 1178 read_unlock_irqrestore(&gpc->lock, flags); 1179 srcu_read_unlock(&kvm->srcu, idx); 1180 1181 return ret; 1182 } 1183 1184 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, 1185 u64 param, u64 *r) 1186 { 1187 struct sched_poll sched_poll; 1188 evtchn_port_t port, *ports; 1189 struct x86_exception e; 1190 int i; 1191 1192 if (!lapic_in_kernel(vcpu) || 1193 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) 1194 return false; 1195 1196 if (IS_ENABLED(CONFIG_64BIT) && !longmode) { 1197 struct compat_sched_poll sp32; 1198 1199 /* Sanity check that the compat struct definition is correct */ 1200 BUILD_BUG_ON(sizeof(sp32) != 16); 1201 1202 if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) { 1203 *r = -EFAULT; 1204 return true; 1205 } 1206 1207 /* 1208 * This is a 32-bit pointer to an array of evtchn_port_t which 1209 * are uint32_t, so once it's converted no further compat 1210 * handling is needed. 1211 */ 1212 sched_poll.ports = (void *)(unsigned long)(sp32.ports); 1213 sched_poll.nr_ports = sp32.nr_ports; 1214 sched_poll.timeout = sp32.timeout; 1215 } else { 1216 if (kvm_read_guest_virt(vcpu, param, &sched_poll, 1217 sizeof(sched_poll), &e)) { 1218 *r = -EFAULT; 1219 return true; 1220 } 1221 } 1222 1223 if (unlikely(sched_poll.nr_ports > 1)) { 1224 /* Xen (unofficially) limits number of pollers to 128 */ 1225 if (sched_poll.nr_ports > 128) { 1226 *r = -EINVAL; 1227 return true; 1228 } 1229 1230 ports = kmalloc_array(sched_poll.nr_ports, 1231 sizeof(*ports), GFP_KERNEL); 1232 if (!ports) { 1233 *r = -ENOMEM; 1234 return true; 1235 } 1236 } else 1237 ports = &port; 1238 1239 if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports, 1240 sched_poll.nr_ports * sizeof(*ports), &e)) { 1241 *r = -EFAULT; 1242 return true; 1243 } 1244 1245 for (i = 0; i < sched_poll.nr_ports; i++) { 1246 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { 1247 *r = -EINVAL; 1248 goto out; 1249 } 1250 } 1251 1252 if (sched_poll.nr_ports == 1) 1253 vcpu->arch.xen.poll_evtchn = port; 1254 else 1255 vcpu->arch.xen.poll_evtchn = -1; 1256 1257 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1258 1259 if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) { 1260 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 1261 1262 if (sched_poll.timeout) 1263 mod_timer(&vcpu->arch.xen.poll_timer, 1264 jiffies + nsecs_to_jiffies(sched_poll.timeout)); 1265 1266 kvm_vcpu_halt(vcpu); 1267 1268 if (sched_poll.timeout) 1269 del_timer(&vcpu->arch.xen.poll_timer); 1270 1271 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1272 } 1273 1274 vcpu->arch.xen.poll_evtchn = 0; 1275 *r = 0; 1276 out: 1277 /* Really, this is only needed in case of timeout */ 1278 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1279 1280 if (unlikely(sched_poll.nr_ports > 1)) 1281 kfree(ports); 1282 return true; 1283 } 1284 1285 static void cancel_evtchn_poll(struct timer_list *t) 1286 { 1287 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer); 1288 1289 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1290 kvm_vcpu_kick(vcpu); 1291 } 1292 1293 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode, 1294 int cmd, u64 param, u64 *r) 1295 { 1296 switch (cmd) { 1297 case SCHEDOP_poll: 1298 if (kvm_xen_schedop_poll(vcpu, longmode, param, r)) 1299 return true; 1300 fallthrough; 1301 case SCHEDOP_yield: 1302 kvm_vcpu_on_spin(vcpu, true); 1303 *r = 0; 1304 return true; 1305 default: 1306 break; 1307 } 1308 1309 return false; 1310 } 1311 1312 struct compat_vcpu_set_singleshot_timer { 1313 uint64_t timeout_abs_ns; 1314 uint32_t flags; 1315 } __attribute__((packed)); 1316 1317 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, 1318 int vcpu_id, u64 param, u64 *r) 1319 { 1320 struct vcpu_set_singleshot_timer oneshot; 1321 struct x86_exception e; 1322 s64 delta; 1323 1324 if (!kvm_xen_timer_enabled(vcpu)) 1325 return false; 1326 1327 switch (cmd) { 1328 case VCPUOP_set_singleshot_timer: 1329 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1330 *r = -EINVAL; 1331 return true; 1332 } 1333 1334 /* 1335 * The only difference for 32-bit compat is the 4 bytes of 1336 * padding after the interesting part of the structure. So 1337 * for a faithful emulation of Xen we have to *try* to copy 1338 * the padding and return -EFAULT if we can't. Otherwise we 1339 * might as well just have copied the 12-byte 32-bit struct. 1340 */ 1341 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1342 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1343 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1344 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1345 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) != 1346 offsetof(struct vcpu_set_singleshot_timer, flags)); 1347 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != 1348 sizeof_field(struct vcpu_set_singleshot_timer, flags)); 1349 1350 if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) : 1351 sizeof(struct compat_vcpu_set_singleshot_timer), &e)) { 1352 *r = -EFAULT; 1353 return true; 1354 } 1355 1356 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm); 1357 if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) { 1358 *r = -ETIME; 1359 return true; 1360 } 1361 1362 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta); 1363 *r = 0; 1364 return true; 1365 1366 case VCPUOP_stop_singleshot_timer: 1367 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1368 *r = -EINVAL; 1369 return true; 1370 } 1371 kvm_xen_stop_timer(vcpu); 1372 *r = 0; 1373 return true; 1374 } 1375 1376 return false; 1377 } 1378 1379 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, 1380 u64 *r) 1381 { 1382 if (!kvm_xen_timer_enabled(vcpu)) 1383 return false; 1384 1385 if (timeout) { 1386 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm); 1387 int64_t delta = timeout - guest_now; 1388 1389 /* Xen has a 'Linux workaround' in do_set_timer_op() which 1390 * checks for negative absolute timeout values (caused by 1391 * integer overflow), and for values about 13 days in the 1392 * future (2^50ns) which would be caused by jiffies 1393 * overflow. For those cases, it sets the timeout 100ms in 1394 * the future (not *too* soon, since if a guest really did 1395 * set a long timeout on purpose we don't want to keep 1396 * churning CPU time by waking it up). 1397 */ 1398 if (unlikely((int64_t)timeout < 0 || 1399 (delta > 0 && (uint32_t) (delta >> 50) != 0))) { 1400 delta = 100 * NSEC_PER_MSEC; 1401 timeout = guest_now + delta; 1402 } 1403 1404 kvm_xen_start_timer(vcpu, timeout, delta); 1405 } else { 1406 kvm_xen_stop_timer(vcpu); 1407 } 1408 1409 *r = 0; 1410 return true; 1411 } 1412 1413 int kvm_xen_hypercall(struct kvm_vcpu *vcpu) 1414 { 1415 bool longmode; 1416 u64 input, params[6], r = -ENOSYS; 1417 bool handled = false; 1418 u8 cpl; 1419 1420 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); 1421 1422 /* Hyper-V hypercalls get bit 31 set in EAX */ 1423 if ((input & 0x80000000) && 1424 kvm_hv_hypercall_enabled(vcpu)) 1425 return kvm_hv_hypercall(vcpu); 1426 1427 longmode = is_64_bit_hypercall(vcpu); 1428 if (!longmode) { 1429 params[0] = (u32)kvm_rbx_read(vcpu); 1430 params[1] = (u32)kvm_rcx_read(vcpu); 1431 params[2] = (u32)kvm_rdx_read(vcpu); 1432 params[3] = (u32)kvm_rsi_read(vcpu); 1433 params[4] = (u32)kvm_rdi_read(vcpu); 1434 params[5] = (u32)kvm_rbp_read(vcpu); 1435 } 1436 #ifdef CONFIG_X86_64 1437 else { 1438 params[0] = (u64)kvm_rdi_read(vcpu); 1439 params[1] = (u64)kvm_rsi_read(vcpu); 1440 params[2] = (u64)kvm_rdx_read(vcpu); 1441 params[3] = (u64)kvm_r10_read(vcpu); 1442 params[4] = (u64)kvm_r8_read(vcpu); 1443 params[5] = (u64)kvm_r9_read(vcpu); 1444 } 1445 #endif 1446 cpl = static_call(kvm_x86_get_cpl)(vcpu); 1447 trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2], 1448 params[3], params[4], params[5]); 1449 1450 /* 1451 * Only allow hypercall acceleration for CPL0. The rare hypercalls that 1452 * are permitted in guest userspace can be handled by the VMM. 1453 */ 1454 if (unlikely(cpl > 0)) 1455 goto handle_in_userspace; 1456 1457 switch (input) { 1458 case __HYPERVISOR_xen_version: 1459 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { 1460 r = vcpu->kvm->arch.xen.xen_version; 1461 handled = true; 1462 } 1463 break; 1464 case __HYPERVISOR_event_channel_op: 1465 if (params[0] == EVTCHNOP_send) 1466 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); 1467 break; 1468 case __HYPERVISOR_sched_op: 1469 handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0], 1470 params[1], &r); 1471 break; 1472 case __HYPERVISOR_vcpu_op: 1473 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1], 1474 params[2], &r); 1475 break; 1476 case __HYPERVISOR_set_timer_op: { 1477 u64 timeout = params[0]; 1478 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ 1479 if (!longmode) 1480 timeout |= params[1] << 32; 1481 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r); 1482 break; 1483 } 1484 default: 1485 break; 1486 } 1487 1488 if (handled) 1489 return kvm_xen_hypercall_set_result(vcpu, r); 1490 1491 handle_in_userspace: 1492 vcpu->run->exit_reason = KVM_EXIT_XEN; 1493 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; 1494 vcpu->run->xen.u.hcall.longmode = longmode; 1495 vcpu->run->xen.u.hcall.cpl = cpl; 1496 vcpu->run->xen.u.hcall.input = input; 1497 vcpu->run->xen.u.hcall.params[0] = params[0]; 1498 vcpu->run->xen.u.hcall.params[1] = params[1]; 1499 vcpu->run->xen.u.hcall.params[2] = params[2]; 1500 vcpu->run->xen.u.hcall.params[3] = params[3]; 1501 vcpu->run->xen.u.hcall.params[4] = params[4]; 1502 vcpu->run->xen.u.hcall.params[5] = params[5]; 1503 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); 1504 vcpu->arch.complete_userspace_io = 1505 kvm_xen_hypercall_complete_userspace; 1506 1507 return 0; 1508 } 1509 1510 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port) 1511 { 1512 int poll_evtchn = vcpu->arch.xen.poll_evtchn; 1513 1514 if ((poll_evtchn == port || poll_evtchn == -1) && 1515 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { 1516 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1517 kvm_vcpu_kick(vcpu); 1518 } 1519 } 1520 1521 /* 1522 * The return value from this function is propagated to kvm_set_irq() API, 1523 * so it returns: 1524 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 1525 * = 0 Interrupt was coalesced (previous irq is still pending) 1526 * > 0 Number of CPUs interrupt was delivered to 1527 * 1528 * It is also called directly from kvm_arch_set_irq_inatomic(), where the 1529 * only check on its return value is a comparison with -EWOULDBLOCK'. 1530 */ 1531 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1532 { 1533 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1534 struct kvm_vcpu *vcpu; 1535 unsigned long *pending_bits, *mask_bits; 1536 unsigned long flags; 1537 int port_word_bit; 1538 bool kick_vcpu = false; 1539 int vcpu_idx, idx, rc; 1540 1541 vcpu_idx = READ_ONCE(xe->vcpu_idx); 1542 if (vcpu_idx >= 0) 1543 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 1544 else { 1545 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); 1546 if (!vcpu) 1547 return -EINVAL; 1548 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); 1549 } 1550 1551 if (!vcpu->arch.xen.vcpu_info_cache.active) 1552 return -EINVAL; 1553 1554 if (xe->port >= max_evtchn_port(kvm)) 1555 return -EINVAL; 1556 1557 rc = -EWOULDBLOCK; 1558 1559 idx = srcu_read_lock(&kvm->srcu); 1560 1561 read_lock_irqsave(&gpc->lock, flags); 1562 if (!kvm_gpc_check(gpc, PAGE_SIZE)) 1563 goto out_rcu; 1564 1565 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1566 struct shared_info *shinfo = gpc->khva; 1567 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1568 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1569 port_word_bit = xe->port / 64; 1570 } else { 1571 struct compat_shared_info *shinfo = gpc->khva; 1572 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1573 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1574 port_word_bit = xe->port / 32; 1575 } 1576 1577 /* 1578 * If this port wasn't already set, and if it isn't masked, then 1579 * we try to set the corresponding bit in the in-kernel shadow of 1580 * evtchn_pending_sel for the target vCPU. And if *that* wasn't 1581 * already set, then we kick the vCPU in question to write to the 1582 * *real* evtchn_pending_sel in its own guest vcpu_info struct. 1583 */ 1584 if (test_and_set_bit(xe->port, pending_bits)) { 1585 rc = 0; /* It was already raised */ 1586 } else if (test_bit(xe->port, mask_bits)) { 1587 rc = -ENOTCONN; /* Masked */ 1588 kvm_xen_check_poller(vcpu, xe->port); 1589 } else { 1590 rc = 1; /* Delivered to the bitmap in shared_info. */ 1591 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */ 1592 read_unlock_irqrestore(&gpc->lock, flags); 1593 gpc = &vcpu->arch.xen.vcpu_info_cache; 1594 1595 read_lock_irqsave(&gpc->lock, flags); 1596 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 1597 /* 1598 * Could not access the vcpu_info. Set the bit in-kernel 1599 * and prod the vCPU to deliver it for itself. 1600 */ 1601 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) 1602 kick_vcpu = true; 1603 goto out_rcu; 1604 } 1605 1606 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1607 struct vcpu_info *vcpu_info = gpc->khva; 1608 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { 1609 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1610 kick_vcpu = true; 1611 } 1612 } else { 1613 struct compat_vcpu_info *vcpu_info = gpc->khva; 1614 if (!test_and_set_bit(port_word_bit, 1615 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { 1616 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1617 kick_vcpu = true; 1618 } 1619 } 1620 1621 /* For the per-vCPU lapic vector, deliver it as MSI. */ 1622 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { 1623 kvm_xen_inject_vcpu_vector(vcpu); 1624 kick_vcpu = false; 1625 } 1626 } 1627 1628 out_rcu: 1629 read_unlock_irqrestore(&gpc->lock, flags); 1630 srcu_read_unlock(&kvm->srcu, idx); 1631 1632 if (kick_vcpu) { 1633 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1634 kvm_vcpu_kick(vcpu); 1635 } 1636 1637 return rc; 1638 } 1639 1640 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1641 { 1642 bool mm_borrowed = false; 1643 int rc; 1644 1645 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1646 if (rc != -EWOULDBLOCK) 1647 return rc; 1648 1649 if (current->mm != kvm->mm) { 1650 /* 1651 * If not on a thread which already belongs to this KVM, 1652 * we'd better be in the irqfd workqueue. 1653 */ 1654 if (WARN_ON_ONCE(current->mm)) 1655 return -EINVAL; 1656 1657 kthread_use_mm(kvm->mm); 1658 mm_borrowed = true; 1659 } 1660 1661 /* 1662 * For the irqfd workqueue, using the main kvm->lock mutex is 1663 * fine since this function is invoked from kvm_set_irq() with 1664 * no other lock held, no srcu. In future if it will be called 1665 * directly from a vCPU thread (e.g. on hypercall for an IPI) 1666 * then it may need to switch to using a leaf-node mutex for 1667 * serializing the shared_info mapping. 1668 */ 1669 mutex_lock(&kvm->lock); 1670 1671 /* 1672 * It is theoretically possible for the page to be unmapped 1673 * and the MMU notifier to invalidate the shared_info before 1674 * we even get to use it. In that case, this looks like an 1675 * infinite loop. It was tempting to do it via the userspace 1676 * HVA instead... but that just *hides* the fact that it's 1677 * an infinite loop, because if a fault occurs and it waits 1678 * for the page to come back, it can *still* immediately 1679 * fault and have to wait again, repeatedly. 1680 * 1681 * Conversely, the page could also have been reinstated by 1682 * another thread before we even obtain the mutex above, so 1683 * check again *first* before remapping it. 1684 */ 1685 do { 1686 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1687 int idx; 1688 1689 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1690 if (rc != -EWOULDBLOCK) 1691 break; 1692 1693 idx = srcu_read_lock(&kvm->srcu); 1694 rc = kvm_gpc_refresh(gpc, PAGE_SIZE); 1695 srcu_read_unlock(&kvm->srcu, idx); 1696 } while(!rc); 1697 1698 mutex_unlock(&kvm->lock); 1699 1700 if (mm_borrowed) 1701 kthread_unuse_mm(kvm->mm); 1702 1703 return rc; 1704 } 1705 1706 /* This is the version called from kvm_set_irq() as the .set function */ 1707 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1708 int irq_source_id, int level, bool line_status) 1709 { 1710 if (!level) 1711 return -EINVAL; 1712 1713 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); 1714 } 1715 1716 /* 1717 * Set up an event channel interrupt from the KVM IRQ routing table. 1718 * Used for e.g. PIRQ from passed through physical devices. 1719 */ 1720 int kvm_xen_setup_evtchn(struct kvm *kvm, 1721 struct kvm_kernel_irq_routing_entry *e, 1722 const struct kvm_irq_routing_entry *ue) 1723 1724 { 1725 struct kvm_vcpu *vcpu; 1726 1727 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) 1728 return -EINVAL; 1729 1730 /* We only support 2 level event channels for now */ 1731 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1732 return -EINVAL; 1733 1734 /* 1735 * Xen gives us interesting mappings from vCPU index to APIC ID, 1736 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs 1737 * to find it. Do that once at setup time, instead of every time. 1738 * But beware that on live update / live migration, the routing 1739 * table might be reinstated before the vCPU threads have finished 1740 * recreating their vCPUs. 1741 */ 1742 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); 1743 if (vcpu) 1744 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; 1745 else 1746 e->xen_evtchn.vcpu_idx = -1; 1747 1748 e->xen_evtchn.port = ue->u.xen_evtchn.port; 1749 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; 1750 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; 1751 e->set = evtchn_set_fn; 1752 1753 return 0; 1754 } 1755 1756 /* 1757 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl. 1758 */ 1759 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe) 1760 { 1761 struct kvm_xen_evtchn e; 1762 int ret; 1763 1764 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) 1765 return -EINVAL; 1766 1767 /* We only support 2 level event channels for now */ 1768 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1769 return -EINVAL; 1770 1771 e.port = uxe->port; 1772 e.vcpu_id = uxe->vcpu; 1773 e.vcpu_idx = -1; 1774 e.priority = uxe->priority; 1775 1776 ret = kvm_xen_set_evtchn(&e, kvm); 1777 1778 /* 1779 * None of that 'return 1 if it actually got delivered' nonsense. 1780 * We don't care if it was masked (-ENOTCONN) either. 1781 */ 1782 if (ret > 0 || ret == -ENOTCONN) 1783 ret = 0; 1784 1785 return ret; 1786 } 1787 1788 /* 1789 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall. 1790 */ 1791 struct evtchnfd { 1792 u32 send_port; 1793 u32 type; 1794 union { 1795 struct kvm_xen_evtchn port; 1796 struct { 1797 u32 port; /* zero */ 1798 struct eventfd_ctx *ctx; 1799 } eventfd; 1800 } deliver; 1801 }; 1802 1803 /* 1804 * Update target vCPU or priority for a registered sending channel. 1805 */ 1806 static int kvm_xen_eventfd_update(struct kvm *kvm, 1807 struct kvm_xen_hvm_attr *data) 1808 { 1809 u32 port = data->u.evtchn.send_port; 1810 struct evtchnfd *evtchnfd; 1811 int ret; 1812 1813 /* Protect writes to evtchnfd as well as the idr lookup. */ 1814 mutex_lock(&kvm->lock); 1815 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); 1816 1817 ret = -ENOENT; 1818 if (!evtchnfd) 1819 goto out_unlock; 1820 1821 /* For an UPDATE, nothing may change except the priority/vcpu */ 1822 ret = -EINVAL; 1823 if (evtchnfd->type != data->u.evtchn.type) 1824 goto out_unlock; 1825 1826 /* 1827 * Port cannot change, and if it's zero that was an eventfd 1828 * which can't be changed either. 1829 */ 1830 if (!evtchnfd->deliver.port.port || 1831 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) 1832 goto out_unlock; 1833 1834 /* We only support 2 level event channels for now */ 1835 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1836 goto out_unlock; 1837 1838 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1839 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { 1840 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1841 evtchnfd->deliver.port.vcpu_idx = -1; 1842 } 1843 ret = 0; 1844 out_unlock: 1845 mutex_unlock(&kvm->lock); 1846 return ret; 1847 } 1848 1849 /* 1850 * Configure the target (eventfd or local port delivery) for sending on 1851 * a given event channel. 1852 */ 1853 static int kvm_xen_eventfd_assign(struct kvm *kvm, 1854 struct kvm_xen_hvm_attr *data) 1855 { 1856 u32 port = data->u.evtchn.send_port; 1857 struct eventfd_ctx *eventfd = NULL; 1858 struct evtchnfd *evtchnfd; 1859 int ret = -EINVAL; 1860 1861 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL); 1862 if (!evtchnfd) 1863 return -ENOMEM; 1864 1865 switch(data->u.evtchn.type) { 1866 case EVTCHNSTAT_ipi: 1867 /* IPI must map back to the same port# */ 1868 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) 1869 goto out_noeventfd; /* -EINVAL */ 1870 break; 1871 1872 case EVTCHNSTAT_interdomain: 1873 if (data->u.evtchn.deliver.port.port) { 1874 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) 1875 goto out_noeventfd; /* -EINVAL */ 1876 } else { 1877 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); 1878 if (IS_ERR(eventfd)) { 1879 ret = PTR_ERR(eventfd); 1880 goto out_noeventfd; 1881 } 1882 } 1883 break; 1884 1885 case EVTCHNSTAT_virq: 1886 case EVTCHNSTAT_closed: 1887 case EVTCHNSTAT_unbound: 1888 case EVTCHNSTAT_pirq: 1889 default: /* Unknown event channel type */ 1890 goto out; /* -EINVAL */ 1891 } 1892 1893 evtchnfd->send_port = data->u.evtchn.send_port; 1894 evtchnfd->type = data->u.evtchn.type; 1895 if (eventfd) { 1896 evtchnfd->deliver.eventfd.ctx = eventfd; 1897 } else { 1898 /* We only support 2 level event channels for now */ 1899 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1900 goto out; /* -EINVAL; */ 1901 1902 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; 1903 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1904 evtchnfd->deliver.port.vcpu_idx = -1; 1905 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1906 } 1907 1908 mutex_lock(&kvm->lock); 1909 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, 1910 GFP_KERNEL); 1911 mutex_unlock(&kvm->lock); 1912 if (ret >= 0) 1913 return 0; 1914 1915 if (ret == -ENOSPC) 1916 ret = -EEXIST; 1917 out: 1918 if (eventfd) 1919 eventfd_ctx_put(eventfd); 1920 out_noeventfd: 1921 kfree(evtchnfd); 1922 return ret; 1923 } 1924 1925 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) 1926 { 1927 struct evtchnfd *evtchnfd; 1928 1929 mutex_lock(&kvm->lock); 1930 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); 1931 mutex_unlock(&kvm->lock); 1932 1933 if (!evtchnfd) 1934 return -ENOENT; 1935 1936 synchronize_srcu(&kvm->srcu); 1937 if (!evtchnfd->deliver.port.port) 1938 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1939 kfree(evtchnfd); 1940 return 0; 1941 } 1942 1943 static int kvm_xen_eventfd_reset(struct kvm *kvm) 1944 { 1945 struct evtchnfd *evtchnfd, **all_evtchnfds; 1946 int i; 1947 int n = 0; 1948 1949 mutex_lock(&kvm->lock); 1950 1951 /* 1952 * Because synchronize_srcu() cannot be called inside the 1953 * critical section, first collect all the evtchnfd objects 1954 * in an array as they are removed from evtchn_ports. 1955 */ 1956 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) 1957 n++; 1958 1959 all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL); 1960 if (!all_evtchnfds) { 1961 mutex_unlock(&kvm->lock); 1962 return -ENOMEM; 1963 } 1964 1965 n = 0; 1966 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 1967 all_evtchnfds[n++] = evtchnfd; 1968 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); 1969 } 1970 mutex_unlock(&kvm->lock); 1971 1972 synchronize_srcu(&kvm->srcu); 1973 1974 while (n--) { 1975 evtchnfd = all_evtchnfds[n]; 1976 if (!evtchnfd->deliver.port.port) 1977 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1978 kfree(evtchnfd); 1979 } 1980 kfree(all_evtchnfds); 1981 1982 return 0; 1983 } 1984 1985 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 1986 { 1987 u32 port = data->u.evtchn.send_port; 1988 1989 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) 1990 return kvm_xen_eventfd_reset(kvm); 1991 1992 if (!port || port >= max_evtchn_port(kvm)) 1993 return -EINVAL; 1994 1995 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) 1996 return kvm_xen_eventfd_deassign(kvm, port); 1997 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) 1998 return kvm_xen_eventfd_update(kvm, data); 1999 if (data->u.evtchn.flags) 2000 return -EINVAL; 2001 2002 return kvm_xen_eventfd_assign(kvm, data); 2003 } 2004 2005 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) 2006 { 2007 struct evtchnfd *evtchnfd; 2008 struct evtchn_send send; 2009 struct x86_exception e; 2010 2011 /* Sanity check: this structure is the same for 32-bit and 64-bit */ 2012 BUILD_BUG_ON(sizeof(send) != 4); 2013 if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) { 2014 *r = -EFAULT; 2015 return true; 2016 } 2017 2018 /* 2019 * evtchnfd is protected by kvm->srcu; the idr lookup instead 2020 * is protected by RCU. 2021 */ 2022 rcu_read_lock(); 2023 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); 2024 rcu_read_unlock(); 2025 if (!evtchnfd) 2026 return false; 2027 2028 if (evtchnfd->deliver.port.port) { 2029 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); 2030 if (ret < 0 && ret != -ENOTCONN) 2031 return false; 2032 } else { 2033 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1); 2034 } 2035 2036 *r = 0; 2037 return true; 2038 } 2039 2040 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) 2041 { 2042 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; 2043 vcpu->arch.xen.poll_evtchn = 0; 2044 2045 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); 2046 2047 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL, 2048 KVM_HOST_USES_PFN); 2049 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL, 2050 KVM_HOST_USES_PFN); 2051 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL, 2052 KVM_HOST_USES_PFN); 2053 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL, 2054 KVM_HOST_USES_PFN); 2055 } 2056 2057 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) 2058 { 2059 if (kvm_xen_timer_enabled(vcpu)) 2060 kvm_xen_stop_timer(vcpu); 2061 2062 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); 2063 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 2064 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 2065 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 2066 2067 del_timer_sync(&vcpu->arch.xen.poll_timer); 2068 } 2069 2070 void kvm_xen_init_vm(struct kvm *kvm) 2071 { 2072 idr_init(&kvm->arch.xen.evtchn_ports); 2073 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN); 2074 } 2075 2076 void kvm_xen_destroy_vm(struct kvm *kvm) 2077 { 2078 struct evtchnfd *evtchnfd; 2079 int i; 2080 2081 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); 2082 2083 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 2084 if (!evtchnfd->deliver.port.port) 2085 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 2086 kfree(evtchnfd); 2087 } 2088 idr_destroy(&kvm->arch.xen.evtchn_ports); 2089 2090 if (kvm->arch.xen_hvm_config.msr) 2091 static_branch_slow_dec_deferred(&kvm_xen_enabled); 2092 } 2093