1 /* 2 * 3 * Copyright (c) 2011, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 25 #ifndef _HYPERV_H 26 #define _HYPERV_H 27 28 #include <uapi/linux/hyperv.h> 29 #include <uapi/asm/hyperv.h> 30 31 #include <linux/types.h> 32 #include <linux/scatterlist.h> 33 #include <linux/list.h> 34 #include <linux/timer.h> 35 #include <linux/workqueue.h> 36 #include <linux/completion.h> 37 #include <linux/device.h> 38 #include <linux/mod_devicetable.h> 39 40 41 #define MAX_PAGE_BUFFER_COUNT 32 42 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 43 44 #pragma pack(push, 1) 45 46 /* Single-page buffer */ 47 struct hv_page_buffer { 48 u32 len; 49 u32 offset; 50 u64 pfn; 51 }; 52 53 /* Multiple-page buffer */ 54 struct hv_multipage_buffer { 55 /* Length and Offset determines the # of pfns in the array */ 56 u32 len; 57 u32 offset; 58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 59 }; 60 61 /* 62 * Multiple-page buffer array; the pfn array is variable size: 63 * The number of entries in the PFN array is determined by 64 * "len" and "offset". 65 */ 66 struct hv_mpb_array { 67 /* Length and Offset determines the # of pfns in the array */ 68 u32 len; 69 u32 offset; 70 u64 pfn_array[]; 71 }; 72 73 /* 0x18 includes the proprietary packet header */ 74 #define MAX_PAGE_BUFFER_PACKET (0x18 + \ 75 (sizeof(struct hv_page_buffer) * \ 76 MAX_PAGE_BUFFER_COUNT)) 77 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 78 sizeof(struct hv_multipage_buffer)) 79 80 81 #pragma pack(pop) 82 83 struct hv_ring_buffer { 84 /* Offset in bytes from the start of ring data below */ 85 u32 write_index; 86 87 /* Offset in bytes from the start of ring data below */ 88 u32 read_index; 89 90 u32 interrupt_mask; 91 92 /* 93 * Win8 uses some of the reserved bits to implement 94 * interrupt driven flow management. On the send side 95 * we can request that the receiver interrupt the sender 96 * when the ring transitions from being full to being able 97 * to handle a message of size "pending_send_sz". 98 * 99 * Add necessary state for this enhancement. 100 */ 101 u32 pending_send_sz; 102 103 u32 reserved1[12]; 104 105 union { 106 struct { 107 u32 feat_pending_send_sz:1; 108 }; 109 u32 value; 110 } feature_bits; 111 112 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 113 u8 reserved2[4028]; 114 115 /* 116 * Ring data starts here + RingDataStartOffset 117 * !!! DO NOT place any fields below this !!! 118 */ 119 u8 buffer[0]; 120 } __packed; 121 122 struct hv_ring_buffer_info { 123 struct hv_ring_buffer *ring_buffer; 124 u32 ring_size; /* Include the shared header */ 125 spinlock_t ring_lock; 126 127 u32 ring_datasize; /* < ring_size */ 128 u32 ring_data_startoffset; 129 }; 130 131 /* 132 * 133 * hv_get_ringbuffer_availbytes() 134 * 135 * Get number of bytes available to read and to write to 136 * for the specified ring buffer 137 */ 138 static inline void 139 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, 140 u32 *read, u32 *write) 141 { 142 u32 read_loc, write_loc, dsize; 143 144 smp_read_barrier_depends(); 145 146 /* Capture the read/write indices before they changed */ 147 read_loc = rbi->ring_buffer->read_index; 148 write_loc = rbi->ring_buffer->write_index; 149 dsize = rbi->ring_datasize; 150 151 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 152 read_loc - write_loc; 153 *read = dsize - *write; 154 } 155 156 /* 157 * VMBUS version is 32 bit entity broken up into 158 * two 16 bit quantities: major_number. minor_number. 159 * 160 * 0 . 13 (Windows Server 2008) 161 * 1 . 1 (Windows 7) 162 * 2 . 4 (Windows 8) 163 * 3 . 0 (Windows 8 R2) 164 * 4 . 0 (Windows 10) 165 */ 166 167 #define VERSION_WS2008 ((0 << 16) | (13)) 168 #define VERSION_WIN7 ((1 << 16) | (1)) 169 #define VERSION_WIN8 ((2 << 16) | (4)) 170 #define VERSION_WIN8_1 ((3 << 16) | (0)) 171 #define VERSION_WIN10 ((4 << 16) | (0)) 172 173 #define VERSION_INVAL -1 174 175 #define VERSION_CURRENT VERSION_WIN10 176 177 /* Make maximum size of pipe payload of 16K */ 178 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 179 180 /* Define PipeMode values. */ 181 #define VMBUS_PIPE_TYPE_BYTE 0x00000000 182 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 183 184 /* The size of the user defined data buffer for non-pipe offers. */ 185 #define MAX_USER_DEFINED_BYTES 120 186 187 /* The size of the user defined data buffer for pipe offers. */ 188 #define MAX_PIPE_USER_DEFINED_BYTES 116 189 190 /* 191 * At the center of the Channel Management library is the Channel Offer. This 192 * struct contains the fundamental information about an offer. 193 */ 194 struct vmbus_channel_offer { 195 uuid_le if_type; 196 uuid_le if_instance; 197 198 /* 199 * These two fields are not currently used. 200 */ 201 u64 reserved1; 202 u64 reserved2; 203 204 u16 chn_flags; 205 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 206 207 union { 208 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 209 struct { 210 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 211 } std; 212 213 /* 214 * Pipes: 215 * The following sructure is an integrated pipe protocol, which 216 * is implemented on top of standard user-defined data. Pipe 217 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 218 * use. 219 */ 220 struct { 221 u32 pipe_mode; 222 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 223 } pipe; 224 } u; 225 /* 226 * The sub_channel_index is defined in win8. 227 */ 228 u16 sub_channel_index; 229 u16 reserved3; 230 } __packed; 231 232 /* Server Flags */ 233 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 234 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 235 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 236 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 237 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 238 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 239 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 240 241 struct vmpacket_descriptor { 242 u16 type; 243 u16 offset8; 244 u16 len8; 245 u16 flags; 246 u64 trans_id; 247 } __packed; 248 249 struct vmpacket_header { 250 u32 prev_pkt_start_offset; 251 struct vmpacket_descriptor descriptor; 252 } __packed; 253 254 struct vmtransfer_page_range { 255 u32 byte_count; 256 u32 byte_offset; 257 } __packed; 258 259 struct vmtransfer_page_packet_header { 260 struct vmpacket_descriptor d; 261 u16 xfer_pageset_id; 262 u8 sender_owns_set; 263 u8 reserved; 264 u32 range_cnt; 265 struct vmtransfer_page_range ranges[1]; 266 } __packed; 267 268 struct vmgpadl_packet_header { 269 struct vmpacket_descriptor d; 270 u32 gpadl; 271 u32 reserved; 272 } __packed; 273 274 struct vmadd_remove_transfer_page_set { 275 struct vmpacket_descriptor d; 276 u32 gpadl; 277 u16 xfer_pageset_id; 278 u16 reserved; 279 } __packed; 280 281 /* 282 * This structure defines a range in guest physical space that can be made to 283 * look virtually contiguous. 284 */ 285 struct gpa_range { 286 u32 byte_count; 287 u32 byte_offset; 288 u64 pfn_array[0]; 289 }; 290 291 /* 292 * This is the format for an Establish Gpadl packet, which contains a handle by 293 * which this GPADL will be known and a set of GPA ranges associated with it. 294 * This can be converted to a MDL by the guest OS. If there are multiple GPA 295 * ranges, then the resulting MDL will be "chained," representing multiple VA 296 * ranges. 297 */ 298 struct vmestablish_gpadl { 299 struct vmpacket_descriptor d; 300 u32 gpadl; 301 u32 range_cnt; 302 struct gpa_range range[1]; 303 } __packed; 304 305 /* 306 * This is the format for a Teardown Gpadl packet, which indicates that the 307 * GPADL handle in the Establish Gpadl packet will never be referenced again. 308 */ 309 struct vmteardown_gpadl { 310 struct vmpacket_descriptor d; 311 u32 gpadl; 312 u32 reserved; /* for alignment to a 8-byte boundary */ 313 } __packed; 314 315 /* 316 * This is the format for a GPA-Direct packet, which contains a set of GPA 317 * ranges, in addition to commands and/or data. 318 */ 319 struct vmdata_gpa_direct { 320 struct vmpacket_descriptor d; 321 u32 reserved; 322 u32 range_cnt; 323 struct gpa_range range[1]; 324 } __packed; 325 326 /* This is the format for a Additional Data Packet. */ 327 struct vmadditional_data { 328 struct vmpacket_descriptor d; 329 u64 total_bytes; 330 u32 offset; 331 u32 byte_cnt; 332 unsigned char data[1]; 333 } __packed; 334 335 union vmpacket_largest_possible_header { 336 struct vmpacket_descriptor simple_hdr; 337 struct vmtransfer_page_packet_header xfer_page_hdr; 338 struct vmgpadl_packet_header gpadl_hdr; 339 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 340 struct vmestablish_gpadl establish_gpadl_hdr; 341 struct vmteardown_gpadl teardown_gpadl_hdr; 342 struct vmdata_gpa_direct data_gpa_direct_hdr; 343 }; 344 345 #define VMPACKET_DATA_START_ADDRESS(__packet) \ 346 (void *)(((unsigned char *)__packet) + \ 347 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 348 349 #define VMPACKET_DATA_LENGTH(__packet) \ 350 ((((struct vmpacket_descriptor)__packet)->len8 - \ 351 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 352 353 #define VMPACKET_TRANSFER_MODE(__packet) \ 354 (((struct IMPACT)__packet)->type) 355 356 enum vmbus_packet_type { 357 VM_PKT_INVALID = 0x0, 358 VM_PKT_SYNCH = 0x1, 359 VM_PKT_ADD_XFER_PAGESET = 0x2, 360 VM_PKT_RM_XFER_PAGESET = 0x3, 361 VM_PKT_ESTABLISH_GPADL = 0x4, 362 VM_PKT_TEARDOWN_GPADL = 0x5, 363 VM_PKT_DATA_INBAND = 0x6, 364 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 365 VM_PKT_DATA_USING_GPADL = 0x8, 366 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 367 VM_PKT_CANCEL_REQUEST = 0xa, 368 VM_PKT_COMP = 0xb, 369 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 370 VM_PKT_ADDITIONAL_DATA = 0xd 371 }; 372 373 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 374 375 376 /* Version 1 messages */ 377 enum vmbus_channel_message_type { 378 CHANNELMSG_INVALID = 0, 379 CHANNELMSG_OFFERCHANNEL = 1, 380 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 381 CHANNELMSG_REQUESTOFFERS = 3, 382 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 383 CHANNELMSG_OPENCHANNEL = 5, 384 CHANNELMSG_OPENCHANNEL_RESULT = 6, 385 CHANNELMSG_CLOSECHANNEL = 7, 386 CHANNELMSG_GPADL_HEADER = 8, 387 CHANNELMSG_GPADL_BODY = 9, 388 CHANNELMSG_GPADL_CREATED = 10, 389 CHANNELMSG_GPADL_TEARDOWN = 11, 390 CHANNELMSG_GPADL_TORNDOWN = 12, 391 CHANNELMSG_RELID_RELEASED = 13, 392 CHANNELMSG_INITIATE_CONTACT = 14, 393 CHANNELMSG_VERSION_RESPONSE = 15, 394 CHANNELMSG_UNLOAD = 16, 395 CHANNELMSG_UNLOAD_RESPONSE = 17, 396 CHANNELMSG_COUNT 397 }; 398 399 struct vmbus_channel_message_header { 400 enum vmbus_channel_message_type msgtype; 401 u32 padding; 402 } __packed; 403 404 /* Query VMBus Version parameters */ 405 struct vmbus_channel_query_vmbus_version { 406 struct vmbus_channel_message_header header; 407 u32 version; 408 } __packed; 409 410 /* VMBus Version Supported parameters */ 411 struct vmbus_channel_version_supported { 412 struct vmbus_channel_message_header header; 413 u8 version_supported; 414 } __packed; 415 416 /* Offer Channel parameters */ 417 struct vmbus_channel_offer_channel { 418 struct vmbus_channel_message_header header; 419 struct vmbus_channel_offer offer; 420 u32 child_relid; 421 u8 monitorid; 422 /* 423 * win7 and beyond splits this field into a bit field. 424 */ 425 u8 monitor_allocated:1; 426 u8 reserved:7; 427 /* 428 * These are new fields added in win7 and later. 429 * Do not access these fields without checking the 430 * negotiated protocol. 431 * 432 * If "is_dedicated_interrupt" is set, we must not set the 433 * associated bit in the channel bitmap while sending the 434 * interrupt to the host. 435 * 436 * connection_id is to be used in signaling the host. 437 */ 438 u16 is_dedicated_interrupt:1; 439 u16 reserved1:15; 440 u32 connection_id; 441 } __packed; 442 443 /* Rescind Offer parameters */ 444 struct vmbus_channel_rescind_offer { 445 struct vmbus_channel_message_header header; 446 u32 child_relid; 447 } __packed; 448 449 /* 450 * Request Offer -- no parameters, SynIC message contains the partition ID 451 * Set Snoop -- no parameters, SynIC message contains the partition ID 452 * Clear Snoop -- no parameters, SynIC message contains the partition ID 453 * All Offers Delivered -- no parameters, SynIC message contains the partition 454 * ID 455 * Flush Client -- no parameters, SynIC message contains the partition ID 456 */ 457 458 /* Open Channel parameters */ 459 struct vmbus_channel_open_channel { 460 struct vmbus_channel_message_header header; 461 462 /* Identifies the specific VMBus channel that is being opened. */ 463 u32 child_relid; 464 465 /* ID making a particular open request at a channel offer unique. */ 466 u32 openid; 467 468 /* GPADL for the channel's ring buffer. */ 469 u32 ringbuffer_gpadlhandle; 470 471 /* 472 * Starting with win8, this field will be used to specify 473 * the target virtual processor on which to deliver the interrupt for 474 * the host to guest communication. 475 * Prior to win8, incoming channel interrupts would only 476 * be delivered on cpu 0. Setting this value to 0 would 477 * preserve the earlier behavior. 478 */ 479 u32 target_vp; 480 481 /* 482 * The upstream ring buffer begins at offset zero in the memory 483 * described by RingBufferGpadlHandle. The downstream ring buffer 484 * follows it at this offset (in pages). 485 */ 486 u32 downstream_ringbuffer_pageoffset; 487 488 /* User-specific data to be passed along to the server endpoint. */ 489 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 490 } __packed; 491 492 /* Open Channel Result parameters */ 493 struct vmbus_channel_open_result { 494 struct vmbus_channel_message_header header; 495 u32 child_relid; 496 u32 openid; 497 u32 status; 498 } __packed; 499 500 /* Close channel parameters; */ 501 struct vmbus_channel_close_channel { 502 struct vmbus_channel_message_header header; 503 u32 child_relid; 504 } __packed; 505 506 /* Channel Message GPADL */ 507 #define GPADL_TYPE_RING_BUFFER 1 508 #define GPADL_TYPE_SERVER_SAVE_AREA 2 509 #define GPADL_TYPE_TRANSACTION 8 510 511 /* 512 * The number of PFNs in a GPADL message is defined by the number of 513 * pages that would be spanned by ByteCount and ByteOffset. If the 514 * implied number of PFNs won't fit in this packet, there will be a 515 * follow-up packet that contains more. 516 */ 517 struct vmbus_channel_gpadl_header { 518 struct vmbus_channel_message_header header; 519 u32 child_relid; 520 u32 gpadl; 521 u16 range_buflen; 522 u16 rangecount; 523 struct gpa_range range[0]; 524 } __packed; 525 526 /* This is the followup packet that contains more PFNs. */ 527 struct vmbus_channel_gpadl_body { 528 struct vmbus_channel_message_header header; 529 u32 msgnumber; 530 u32 gpadl; 531 u64 pfn[0]; 532 } __packed; 533 534 struct vmbus_channel_gpadl_created { 535 struct vmbus_channel_message_header header; 536 u32 child_relid; 537 u32 gpadl; 538 u32 creation_status; 539 } __packed; 540 541 struct vmbus_channel_gpadl_teardown { 542 struct vmbus_channel_message_header header; 543 u32 child_relid; 544 u32 gpadl; 545 } __packed; 546 547 struct vmbus_channel_gpadl_torndown { 548 struct vmbus_channel_message_header header; 549 u32 gpadl; 550 } __packed; 551 552 struct vmbus_channel_relid_released { 553 struct vmbus_channel_message_header header; 554 u32 child_relid; 555 } __packed; 556 557 struct vmbus_channel_initiate_contact { 558 struct vmbus_channel_message_header header; 559 u32 vmbus_version_requested; 560 u32 target_vcpu; /* The VCPU the host should respond to */ 561 u64 interrupt_page; 562 u64 monitor_page1; 563 u64 monitor_page2; 564 } __packed; 565 566 struct vmbus_channel_version_response { 567 struct vmbus_channel_message_header header; 568 u8 version_supported; 569 } __packed; 570 571 enum vmbus_channel_state { 572 CHANNEL_OFFER_STATE, 573 CHANNEL_OPENING_STATE, 574 CHANNEL_OPEN_STATE, 575 CHANNEL_OPENED_STATE, 576 }; 577 578 /* 579 * Represents each channel msg on the vmbus connection This is a 580 * variable-size data structure depending on the msg type itself 581 */ 582 struct vmbus_channel_msginfo { 583 /* Bookkeeping stuff */ 584 struct list_head msglistentry; 585 586 /* So far, this is only used to handle gpadl body message */ 587 struct list_head submsglist; 588 589 /* Synchronize the request/response if needed */ 590 struct completion waitevent; 591 union { 592 struct vmbus_channel_version_supported version_supported; 593 struct vmbus_channel_open_result open_result; 594 struct vmbus_channel_gpadl_torndown gpadl_torndown; 595 struct vmbus_channel_gpadl_created gpadl_created; 596 struct vmbus_channel_version_response version_response; 597 } response; 598 599 u32 msgsize; 600 /* 601 * The channel message that goes out on the "wire". 602 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 603 */ 604 unsigned char msg[0]; 605 }; 606 607 struct vmbus_close_msg { 608 struct vmbus_channel_msginfo info; 609 struct vmbus_channel_close_channel msg; 610 }; 611 612 /* Define connection identifier type. */ 613 union hv_connection_id { 614 u32 asu32; 615 struct { 616 u32 id:24; 617 u32 reserved:8; 618 } u; 619 }; 620 621 /* Definition of the hv_signal_event hypercall input structure. */ 622 struct hv_input_signal_event { 623 union hv_connection_id connectionid; 624 u16 flag_number; 625 u16 rsvdz; 626 }; 627 628 struct hv_input_signal_event_buffer { 629 u64 align8; 630 struct hv_input_signal_event event; 631 }; 632 633 struct vmbus_channel { 634 /* Unique channel id */ 635 int id; 636 637 struct list_head listentry; 638 639 struct hv_device *device_obj; 640 641 enum vmbus_channel_state state; 642 643 struct vmbus_channel_offer_channel offermsg; 644 /* 645 * These are based on the OfferMsg.MonitorId. 646 * Save it here for easy access. 647 */ 648 u8 monitor_grp; 649 u8 monitor_bit; 650 651 bool rescind; /* got rescind msg */ 652 653 u32 ringbuffer_gpadlhandle; 654 655 /* Allocated memory for ring buffer */ 656 void *ringbuffer_pages; 657 u32 ringbuffer_pagecount; 658 struct hv_ring_buffer_info outbound; /* send to parent */ 659 struct hv_ring_buffer_info inbound; /* receive from parent */ 660 spinlock_t inbound_lock; 661 662 struct vmbus_close_msg close_msg; 663 664 /* Channel callback are invoked in this workqueue context */ 665 /* HANDLE dataWorkQueue; */ 666 667 void (*onchannel_callback)(void *context); 668 void *channel_callback_context; 669 670 /* 671 * A channel can be marked for efficient (batched) 672 * reading: 673 * If batched_reading is set to "true", we read until the 674 * channel is empty and hold off interrupts from the host 675 * during the entire read process. 676 * If batched_reading is set to "false", the client is not 677 * going to perform batched reading. 678 * 679 * By default we will enable batched reading; specific 680 * drivers that don't want this behavior can turn it off. 681 */ 682 683 bool batched_reading; 684 685 bool is_dedicated_interrupt; 686 struct hv_input_signal_event_buffer sig_buf; 687 struct hv_input_signal_event *sig_event; 688 689 /* 690 * Starting with win8, this field will be used to specify 691 * the target virtual processor on which to deliver the interrupt for 692 * the host to guest communication. 693 * Prior to win8, incoming channel interrupts would only 694 * be delivered on cpu 0. Setting this value to 0 would 695 * preserve the earlier behavior. 696 */ 697 u32 target_vp; 698 /* The corresponding CPUID in the guest */ 699 u32 target_cpu; 700 /* 701 * State to manage the CPU affiliation of channels. 702 */ 703 struct cpumask alloced_cpus_in_node; 704 int numa_node; 705 /* 706 * Support for sub-channels. For high performance devices, 707 * it will be useful to have multiple sub-channels to support 708 * a scalable communication infrastructure with the host. 709 * The support for sub-channels is implemented as an extention 710 * to the current infrastructure. 711 * The initial offer is considered the primary channel and this 712 * offer message will indicate if the host supports sub-channels. 713 * The guest is free to ask for sub-channels to be offerred and can 714 * open these sub-channels as a normal "primary" channel. However, 715 * all sub-channels will have the same type and instance guids as the 716 * primary channel. Requests sent on a given channel will result in a 717 * response on the same channel. 718 */ 719 720 /* 721 * Sub-channel creation callback. This callback will be called in 722 * process context when a sub-channel offer is received from the host. 723 * The guest can open the sub-channel in the context of this callback. 724 */ 725 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 726 727 /* 728 * The spinlock to protect the structure. It is being used to protect 729 * test-and-set access to various attributes of the structure as well 730 * as all sc_list operations. 731 */ 732 spinlock_t lock; 733 /* 734 * All Sub-channels of a primary channel are linked here. 735 */ 736 struct list_head sc_list; 737 /* 738 * Current number of sub-channels. 739 */ 740 int num_sc; 741 /* 742 * Number of a sub-channel (position within sc_list) which is supposed 743 * to be used as the next outgoing channel. 744 */ 745 int next_oc; 746 /* 747 * The primary channel this sub-channel belongs to. 748 * This will be NULL for the primary channel. 749 */ 750 struct vmbus_channel *primary_channel; 751 /* 752 * Support per-channel state for use by vmbus drivers. 753 */ 754 void *per_channel_state; 755 /* 756 * To support per-cpu lookup mapping of relid to channel, 757 * link up channels based on their CPU affinity. 758 */ 759 struct list_head percpu_list; 760 }; 761 762 static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 763 { 764 c->batched_reading = state; 765 } 766 767 static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 768 { 769 c->per_channel_state = s; 770 } 771 772 static inline void *get_per_channel_state(struct vmbus_channel *c) 773 { 774 return c->per_channel_state; 775 } 776 777 void vmbus_onmessage(void *context); 778 779 int vmbus_request_offers(void); 780 781 /* 782 * APIs for managing sub-channels. 783 */ 784 785 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 786 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 787 788 /* 789 * Retrieve the (sub) channel on which to send an outgoing request. 790 * When a primary channel has multiple sub-channels, we choose a 791 * channel whose VCPU binding is closest to the VCPU on which 792 * this call is being made. 793 */ 794 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); 795 796 /* 797 * Check if sub-channels have already been offerred. This API will be useful 798 * when the driver is unloaded after establishing sub-channels. In this case, 799 * when the driver is re-loaded, the driver would have to check if the 800 * subchannels have already been established before attempting to request 801 * the creation of sub-channels. 802 * This function returns TRUE to indicate that subchannels have already been 803 * created. 804 * This function should be invoked after setting the callback function for 805 * sub-channel creation. 806 */ 807 bool vmbus_are_subchannels_present(struct vmbus_channel *primary); 808 809 /* The format must be the same as struct vmdata_gpa_direct */ 810 struct vmbus_channel_packet_page_buffer { 811 u16 type; 812 u16 dataoffset8; 813 u16 length8; 814 u16 flags; 815 u64 transactionid; 816 u32 reserved; 817 u32 rangecount; 818 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 819 } __packed; 820 821 /* The format must be the same as struct vmdata_gpa_direct */ 822 struct vmbus_channel_packet_multipage_buffer { 823 u16 type; 824 u16 dataoffset8; 825 u16 length8; 826 u16 flags; 827 u64 transactionid; 828 u32 reserved; 829 u32 rangecount; /* Always 1 in this case */ 830 struct hv_multipage_buffer range; 831 } __packed; 832 833 /* The format must be the same as struct vmdata_gpa_direct */ 834 struct vmbus_packet_mpb_array { 835 u16 type; 836 u16 dataoffset8; 837 u16 length8; 838 u16 flags; 839 u64 transactionid; 840 u32 reserved; 841 u32 rangecount; /* Always 1 in this case */ 842 struct hv_mpb_array range; 843 } __packed; 844 845 846 extern int vmbus_open(struct vmbus_channel *channel, 847 u32 send_ringbuffersize, 848 u32 recv_ringbuffersize, 849 void *userdata, 850 u32 userdatalen, 851 void(*onchannel_callback)(void *context), 852 void *context); 853 854 extern void vmbus_close(struct vmbus_channel *channel); 855 856 extern int vmbus_sendpacket(struct vmbus_channel *channel, 857 void *buffer, 858 u32 bufferLen, 859 u64 requestid, 860 enum vmbus_packet_type type, 861 u32 flags); 862 863 extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, 864 void *buffer, 865 u32 bufferLen, 866 u64 requestid, 867 enum vmbus_packet_type type, 868 u32 flags, 869 bool kick_q); 870 871 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 872 struct hv_page_buffer pagebuffers[], 873 u32 pagecount, 874 void *buffer, 875 u32 bufferlen, 876 u64 requestid); 877 878 extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 879 struct hv_page_buffer pagebuffers[], 880 u32 pagecount, 881 void *buffer, 882 u32 bufferlen, 883 u64 requestid, 884 u32 flags, 885 bool kick_q); 886 887 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 888 struct hv_multipage_buffer *mpb, 889 void *buffer, 890 u32 bufferlen, 891 u64 requestid); 892 893 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 894 struct vmbus_packet_mpb_array *mpb, 895 u32 desc_size, 896 void *buffer, 897 u32 bufferlen, 898 u64 requestid); 899 900 extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 901 void *kbuffer, 902 u32 size, 903 u32 *gpadl_handle); 904 905 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 906 u32 gpadl_handle); 907 908 extern int vmbus_recvpacket(struct vmbus_channel *channel, 909 void *buffer, 910 u32 bufferlen, 911 u32 *buffer_actual_len, 912 u64 *requestid); 913 914 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 915 void *buffer, 916 u32 bufferlen, 917 u32 *buffer_actual_len, 918 u64 *requestid); 919 920 921 extern void vmbus_ontimer(unsigned long data); 922 923 /* Base driver object */ 924 struct hv_driver { 925 const char *name; 926 927 /* the device type supported by this driver */ 928 uuid_le dev_type; 929 const struct hv_vmbus_device_id *id_table; 930 931 struct device_driver driver; 932 933 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 934 int (*remove)(struct hv_device *); 935 void (*shutdown)(struct hv_device *); 936 937 }; 938 939 /* Base device object */ 940 struct hv_device { 941 /* the device type id of this device */ 942 uuid_le dev_type; 943 944 /* the device instance id of this device */ 945 uuid_le dev_instance; 946 947 struct device device; 948 949 struct vmbus_channel *channel; 950 }; 951 952 953 static inline struct hv_device *device_to_hv_device(struct device *d) 954 { 955 return container_of(d, struct hv_device, device); 956 } 957 958 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 959 { 960 return container_of(d, struct hv_driver, driver); 961 } 962 963 static inline void hv_set_drvdata(struct hv_device *dev, void *data) 964 { 965 dev_set_drvdata(&dev->device, data); 966 } 967 968 static inline void *hv_get_drvdata(struct hv_device *dev) 969 { 970 return dev_get_drvdata(&dev->device); 971 } 972 973 /* Vmbus interface */ 974 #define vmbus_driver_register(driver) \ 975 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 976 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 977 struct module *owner, 978 const char *mod_name); 979 void vmbus_driver_unregister(struct hv_driver *hv_driver); 980 981 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 982 resource_size_t min, resource_size_t max, 983 resource_size_t size, resource_size_t align, 984 bool fb_overlap_ok); 985 986 /** 987 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device 988 * 989 * This macro is used to create a struct hv_vmbus_device_id that matches a 990 * specific device. 991 */ 992 #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ 993 g8, g9, ga, gb, gc, gd, ge, gf) \ 994 .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ 995 g8, g9, ga, gb, gc, gd, ge, gf }, 996 997 /* 998 * GUID definitions of various offer types - services offered to the guest. 999 */ 1000 1001 /* 1002 * Network GUID 1003 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1004 */ 1005 #define HV_NIC_GUID \ 1006 .guid = { \ 1007 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ 1008 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ 1009 } 1010 1011 /* 1012 * IDE GUID 1013 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1014 */ 1015 #define HV_IDE_GUID \ 1016 .guid = { \ 1017 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ 1018 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ 1019 } 1020 1021 /* 1022 * SCSI GUID 1023 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1024 */ 1025 #define HV_SCSI_GUID \ 1026 .guid = { \ 1027 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ 1028 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ 1029 } 1030 1031 /* 1032 * Shutdown GUID 1033 * {0e0b6031-5213-4934-818b-38d90ced39db} 1034 */ 1035 #define HV_SHUTDOWN_GUID \ 1036 .guid = { \ 1037 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ 1038 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ 1039 } 1040 1041 /* 1042 * Time Synch GUID 1043 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1044 */ 1045 #define HV_TS_GUID \ 1046 .guid = { \ 1047 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ 1048 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ 1049 } 1050 1051 /* 1052 * Heartbeat GUID 1053 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1054 */ 1055 #define HV_HEART_BEAT_GUID \ 1056 .guid = { \ 1057 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ 1058 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ 1059 } 1060 1061 /* 1062 * KVP GUID 1063 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1064 */ 1065 #define HV_KVP_GUID \ 1066 .guid = { \ 1067 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ 1068 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ 1069 } 1070 1071 /* 1072 * Dynamic memory GUID 1073 * {525074dc-8985-46e2-8057-a307dc18a502} 1074 */ 1075 #define HV_DM_GUID \ 1076 .guid = { \ 1077 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ 1078 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ 1079 } 1080 1081 /* 1082 * Mouse GUID 1083 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1084 */ 1085 #define HV_MOUSE_GUID \ 1086 .guid = { \ 1087 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ 1088 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ 1089 } 1090 1091 /* 1092 * VSS (Backup/Restore) GUID 1093 */ 1094 #define HV_VSS_GUID \ 1095 .guid = { \ 1096 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ 1097 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ 1098 } 1099 /* 1100 * Synthetic Video GUID 1101 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1102 */ 1103 #define HV_SYNTHVID_GUID \ 1104 .guid = { \ 1105 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ 1106 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ 1107 } 1108 1109 /* 1110 * Synthetic FC GUID 1111 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1112 */ 1113 #define HV_SYNTHFC_GUID \ 1114 .guid = { \ 1115 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ 1116 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ 1117 } 1118 1119 /* 1120 * Guest File Copy Service 1121 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1122 */ 1123 1124 #define HV_FCOPY_GUID \ 1125 .guid = { \ 1126 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ 1127 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ 1128 } 1129 1130 /* 1131 * NetworkDirect. This is the guest RDMA service. 1132 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1133 */ 1134 #define HV_ND_GUID \ 1135 .guid = { \ 1136 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ 1137 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ 1138 } 1139 1140 /* 1141 * Common header for Hyper-V ICs 1142 */ 1143 1144 #define ICMSGTYPE_NEGOTIATE 0 1145 #define ICMSGTYPE_HEARTBEAT 1 1146 #define ICMSGTYPE_KVPEXCHANGE 2 1147 #define ICMSGTYPE_SHUTDOWN 3 1148 #define ICMSGTYPE_TIMESYNC 4 1149 #define ICMSGTYPE_VSS 5 1150 1151 #define ICMSGHDRFLAG_TRANSACTION 1 1152 #define ICMSGHDRFLAG_REQUEST 2 1153 #define ICMSGHDRFLAG_RESPONSE 4 1154 1155 1156 /* 1157 * While we want to handle util services as regular devices, 1158 * there is only one instance of each of these services; so 1159 * we statically allocate the service specific state. 1160 */ 1161 1162 struct hv_util_service { 1163 u8 *recv_buffer; 1164 void (*util_cb)(void *); 1165 int (*util_init)(struct hv_util_service *); 1166 void (*util_deinit)(void); 1167 }; 1168 1169 struct vmbuspipe_hdr { 1170 u32 flags; 1171 u32 msgsize; 1172 } __packed; 1173 1174 struct ic_version { 1175 u16 major; 1176 u16 minor; 1177 } __packed; 1178 1179 struct icmsg_hdr { 1180 struct ic_version icverframe; 1181 u16 icmsgtype; 1182 struct ic_version icvermsg; 1183 u16 icmsgsize; 1184 u32 status; 1185 u8 ictransaction_id; 1186 u8 icflags; 1187 u8 reserved[2]; 1188 } __packed; 1189 1190 struct icmsg_negotiate { 1191 u16 icframe_vercnt; 1192 u16 icmsg_vercnt; 1193 u32 reserved; 1194 struct ic_version icversion_data[1]; /* any size array */ 1195 } __packed; 1196 1197 struct shutdown_msg_data { 1198 u32 reason_code; 1199 u32 timeout_seconds; 1200 u32 flags; 1201 u8 display_message[2048]; 1202 } __packed; 1203 1204 struct heartbeat_msg_data { 1205 u64 seq_num; 1206 u32 reserved[8]; 1207 } __packed; 1208 1209 /* Time Sync IC defs */ 1210 #define ICTIMESYNCFLAG_PROBE 0 1211 #define ICTIMESYNCFLAG_SYNC 1 1212 #define ICTIMESYNCFLAG_SAMPLE 2 1213 1214 #ifdef __x86_64__ 1215 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1216 #else 1217 #define WLTIMEDELTA 116444736000000000LL 1218 #endif 1219 1220 struct ictimesync_data { 1221 u64 parenttime; 1222 u64 childtime; 1223 u64 roundtriptime; 1224 u8 flags; 1225 } __packed; 1226 1227 struct hyperv_service_callback { 1228 u8 msg_type; 1229 char *log_msg; 1230 uuid_le data; 1231 struct vmbus_channel *channel; 1232 void (*callback) (void *context); 1233 }; 1234 1235 #define MAX_SRV_VER 0x7ffffff 1236 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, 1237 struct icmsg_negotiate *, u8 *, int, 1238 int); 1239 1240 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1241 1242 /* 1243 * Negotiated version with the Host. 1244 */ 1245 1246 extern __u32 vmbus_proto_version; 1247 1248 #endif /* _HYPERV_H */ 1249