xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 3503d56cc7233ced602e38a4c13caa64f00ab2aa)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_MAX_OPCODE                        = 17,
32 };
33 
34 enum efa_admin_aq_feature_id {
35 	EFA_ADMIN_DEVICE_ATTR                       = 1,
36 	EFA_ADMIN_AENQ_CONFIG                       = 2,
37 	EFA_ADMIN_NETWORK_ATTR                      = 3,
38 	EFA_ADMIN_QUEUE_ATTR                        = 4,
39 	EFA_ADMIN_HW_HINTS                          = 5,
40 	EFA_ADMIN_HOST_INFO                         = 6,
41 };
42 
43 /* QP transport type */
44 enum efa_admin_qp_type {
45 	/* Unreliable Datagram */
46 	EFA_ADMIN_QP_TYPE_UD                        = 1,
47 	/* Scalable Reliable Datagram */
48 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
49 };
50 
51 /* QP state */
52 enum efa_admin_qp_state {
53 	EFA_ADMIN_QP_STATE_RESET                    = 0,
54 	EFA_ADMIN_QP_STATE_INIT                     = 1,
55 	EFA_ADMIN_QP_STATE_RTR                      = 2,
56 	EFA_ADMIN_QP_STATE_RTS                      = 3,
57 	EFA_ADMIN_QP_STATE_SQD                      = 4,
58 	EFA_ADMIN_QP_STATE_SQE                      = 5,
59 	EFA_ADMIN_QP_STATE_ERR                      = 6,
60 };
61 
62 enum efa_admin_get_stats_type {
63 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
64 };
65 
66 enum efa_admin_get_stats_scope {
67 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
68 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
69 };
70 
71 enum efa_admin_modify_qp_mask_bits {
72 	EFA_ADMIN_QP_STATE_BIT                      = 0,
73 	EFA_ADMIN_CUR_QP_STATE_BIT                  = 1,
74 	EFA_ADMIN_QKEY_BIT                          = 2,
75 	EFA_ADMIN_SQ_PSN_BIT                        = 3,
76 	EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT       = 4,
77 };
78 
79 /*
80  * QP allocation sizes, converted by fabric QueuePair (QP) create command
81  * from QP capabilities.
82  */
83 struct efa_admin_qp_alloc_size {
84 	/* Send descriptor ring size in bytes */
85 	u32 send_queue_ring_size;
86 
87 	/* Max number of WQEs that can be outstanding on send queue. */
88 	u32 send_queue_depth;
89 
90 	/*
91 	 * Recv descriptor ring size in bytes, sufficient for user-provided
92 	 * number of WQEs
93 	 */
94 	u32 recv_queue_ring_size;
95 
96 	/* Max number of WQEs that can be outstanding on recv queue */
97 	u32 recv_queue_depth;
98 };
99 
100 struct efa_admin_create_qp_cmd {
101 	/* Common Admin Queue descriptor */
102 	struct efa_admin_aq_common_desc aq_common_desc;
103 
104 	/* Protection Domain associated with this QP */
105 	u16 pd;
106 
107 	/* QP type */
108 	u8 qp_type;
109 
110 	/*
111 	 * 0 : sq_virt - If set, SQ ring base address is
112 	 *    virtual (IOVA returned by MR registration)
113 	 * 1 : rq_virt - If set, RQ ring base address is
114 	 *    virtual (IOVA returned by MR registration)
115 	 * 7:2 : reserved - MBZ
116 	 */
117 	u8 flags;
118 
119 	/*
120 	 * Send queue (SQ) ring base physical address. This field is not
121 	 * used if this is a Low Latency Queue(LLQ).
122 	 */
123 	u64 sq_base_addr;
124 
125 	/* Receive queue (RQ) ring base address. */
126 	u64 rq_base_addr;
127 
128 	/* Index of CQ to be associated with Send Queue completions */
129 	u32 send_cq_idx;
130 
131 	/* Index of CQ to be associated with Recv Queue completions */
132 	u32 recv_cq_idx;
133 
134 	/*
135 	 * Memory registration key for the SQ ring, used only when not in
136 	 * LLQ mode and base address is virtual
137 	 */
138 	u32 sq_l_key;
139 
140 	/*
141 	 * Memory registration key for the RQ ring, used only when base
142 	 * address is virtual
143 	 */
144 	u32 rq_l_key;
145 
146 	/* Requested QP allocation sizes */
147 	struct efa_admin_qp_alloc_size qp_alloc_size;
148 
149 	/* UAR number */
150 	u16 uar;
151 
152 	/* MBZ */
153 	u16 reserved;
154 
155 	/* MBZ */
156 	u32 reserved2;
157 };
158 
159 struct efa_admin_create_qp_resp {
160 	/* Common Admin Queue completion descriptor */
161 	struct efa_admin_acq_common_desc acq_common_desc;
162 
163 	/*
164 	 * Opaque handle to be used for consequent admin operations on the
165 	 * QP
166 	 */
167 	u32 qp_handle;
168 
169 	/*
170 	 * QP number in the given EFA virtual device. Least-significant bits
171 	 *    (as needed according to max_qp) carry unique QP ID
172 	 */
173 	u16 qp_num;
174 
175 	/* MBZ */
176 	u16 reserved;
177 
178 	/* Index of sub-CQ for Send Queue completions */
179 	u16 send_sub_cq_idx;
180 
181 	/* Index of sub-CQ for Receive Queue completions */
182 	u16 recv_sub_cq_idx;
183 
184 	/* SQ doorbell address, as offset to PCIe DB BAR */
185 	u32 sq_db_offset;
186 
187 	/* RQ doorbell address, as offset to PCIe DB BAR */
188 	u32 rq_db_offset;
189 
190 	/*
191 	 * low latency send queue ring base address as an offset to PCIe
192 	 * MMIO LLQ_MEM BAR
193 	 */
194 	u32 llq_descriptors_offset;
195 };
196 
197 struct efa_admin_modify_qp_cmd {
198 	/* Common Admin Queue descriptor */
199 	struct efa_admin_aq_common_desc aq_common_desc;
200 
201 	/*
202 	 * Mask indicating which fields should be updated see enum
203 	 * efa_admin_modify_qp_mask_bits
204 	 */
205 	u32 modify_mask;
206 
207 	/* QP handle returned by create_qp command */
208 	u32 qp_handle;
209 
210 	/* QP state */
211 	u32 qp_state;
212 
213 	/* Override current QP state (before applying the transition) */
214 	u32 cur_qp_state;
215 
216 	/* QKey */
217 	u32 qkey;
218 
219 	/* SQ PSN */
220 	u32 sq_psn;
221 
222 	/* Enable async notification when SQ is drained */
223 	u8 sq_drained_async_notify;
224 
225 	/* MBZ */
226 	u8 reserved1;
227 
228 	/* MBZ */
229 	u16 reserved2;
230 };
231 
232 struct efa_admin_modify_qp_resp {
233 	/* Common Admin Queue completion descriptor */
234 	struct efa_admin_acq_common_desc acq_common_desc;
235 };
236 
237 struct efa_admin_query_qp_cmd {
238 	/* Common Admin Queue descriptor */
239 	struct efa_admin_aq_common_desc aq_common_desc;
240 
241 	/* QP handle returned by create_qp command */
242 	u32 qp_handle;
243 };
244 
245 struct efa_admin_query_qp_resp {
246 	/* Common Admin Queue completion descriptor */
247 	struct efa_admin_acq_common_desc acq_common_desc;
248 
249 	/* QP state */
250 	u32 qp_state;
251 
252 	/* QKey */
253 	u32 qkey;
254 
255 	/* SQ PSN */
256 	u32 sq_psn;
257 
258 	/* Indicates that draining is in progress */
259 	u8 sq_draining;
260 
261 	/* MBZ */
262 	u8 reserved1;
263 
264 	/* MBZ */
265 	u16 reserved2;
266 };
267 
268 struct efa_admin_destroy_qp_cmd {
269 	/* Common Admin Queue descriptor */
270 	struct efa_admin_aq_common_desc aq_common_desc;
271 
272 	/* QP handle returned by create_qp command */
273 	u32 qp_handle;
274 };
275 
276 struct efa_admin_destroy_qp_resp {
277 	/* Common Admin Queue completion descriptor */
278 	struct efa_admin_acq_common_desc acq_common_desc;
279 };
280 
281 /*
282  * Create Address Handle command parameters. Must not be called more than
283  * once for the same destination
284  */
285 struct efa_admin_create_ah_cmd {
286 	/* Common Admin Queue descriptor */
287 	struct efa_admin_aq_common_desc aq_common_desc;
288 
289 	/* Destination address in network byte order */
290 	u8 dest_addr[16];
291 
292 	/* PD number */
293 	u16 pd;
294 
295 	/* MBZ */
296 	u16 reserved;
297 };
298 
299 struct efa_admin_create_ah_resp {
300 	/* Common Admin Queue completion descriptor */
301 	struct efa_admin_acq_common_desc acq_common_desc;
302 
303 	/* Target interface address handle (opaque) */
304 	u16 ah;
305 
306 	/* MBZ */
307 	u16 reserved;
308 };
309 
310 struct efa_admin_destroy_ah_cmd {
311 	/* Common Admin Queue descriptor */
312 	struct efa_admin_aq_common_desc aq_common_desc;
313 
314 	/* Target interface address handle (opaque) */
315 	u16 ah;
316 
317 	/* PD number */
318 	u16 pd;
319 };
320 
321 struct efa_admin_destroy_ah_resp {
322 	/* Common Admin Queue completion descriptor */
323 	struct efa_admin_acq_common_desc acq_common_desc;
324 };
325 
326 /*
327  * Registration of MemoryRegion, required for QP working with Virtual
328  * Addresses. In standard verbs semantics, region length is limited to 2GB
329  * space, but EFA offers larger MR support for large memory space, to ease
330  * on users working with very large datasets (i.e. full GPU memory mapping).
331  */
332 struct efa_admin_reg_mr_cmd {
333 	/* Common Admin Queue descriptor */
334 	struct efa_admin_aq_common_desc aq_common_desc;
335 
336 	/* Protection Domain */
337 	u16 pd;
338 
339 	/* MBZ */
340 	u16 reserved16_w1;
341 
342 	/* Physical Buffer List, each element is page-aligned. */
343 	union {
344 		/*
345 		 * Inline array of guest-physical page addresses of user
346 		 * memory pages (optimization for short region
347 		 * registrations)
348 		 */
349 		u64 inline_pbl_array[4];
350 
351 		/* points to PBL (direct or indirect, chained if needed) */
352 		struct efa_admin_ctrl_buff_info pbl;
353 	} pbl;
354 
355 	/* Memory region length, in bytes. */
356 	u64 mr_length;
357 
358 	/*
359 	 * flags and page size
360 	 * 4:0 : phys_page_size_shift - page size is (1 <<
361 	 *    phys_page_size_shift). Page size is used for
362 	 *    building the Virtual to Physical address mapping
363 	 * 6:5 : reserved - MBZ
364 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
365 	 *    memory registration (no translation), can be used
366 	 *    only by privileged clients. If set, PBL must
367 	 *    contain a single entry.
368 	 */
369 	u8 flags;
370 
371 	/*
372 	 * permissions
373 	 * 0 : local_write_enable - Local write permissions:
374 	 *    must be set for RQ buffers and buffers posted for
375 	 *    RDMA Read requests
376 	 * 1 : reserved1 - MBZ
377 	 * 2 : remote_read_enable - Remote read permissions:
378 	 *    must be set to enable RDMA read from the region
379 	 * 7:3 : reserved2 - MBZ
380 	 */
381 	u8 permissions;
382 
383 	/* MBZ */
384 	u16 reserved16_w5;
385 
386 	/* number of pages in PBL (redundant, could be calculated) */
387 	u32 page_num;
388 
389 	/*
390 	 * IO Virtual Address associated with this MR. If
391 	 * mem_addr_phy_mode_en is set, contains the physical address of
392 	 * the region.
393 	 */
394 	u64 iova;
395 };
396 
397 struct efa_admin_reg_mr_resp {
398 	/* Common Admin Queue completion descriptor */
399 	struct efa_admin_acq_common_desc acq_common_desc;
400 
401 	/*
402 	 * L_Key, to be used in conjunction with local buffer references in
403 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
404 	 */
405 	u32 l_key;
406 
407 	/*
408 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
409 	 * memory region
410 	 */
411 	u32 r_key;
412 };
413 
414 struct efa_admin_dereg_mr_cmd {
415 	/* Common Admin Queue descriptor */
416 	struct efa_admin_aq_common_desc aq_common_desc;
417 
418 	/* L_Key, memory region's l_key */
419 	u32 l_key;
420 };
421 
422 struct efa_admin_dereg_mr_resp {
423 	/* Common Admin Queue completion descriptor */
424 	struct efa_admin_acq_common_desc acq_common_desc;
425 };
426 
427 struct efa_admin_create_cq_cmd {
428 	struct efa_admin_aq_common_desc aq_common_desc;
429 
430 	/*
431 	 * 4:0 : reserved5 - MBZ
432 	 * 5 : interrupt_mode_enabled - if set, cq operates
433 	 *    in interrupt mode (i.e. CQ events and MSI-X are
434 	 *    generated), otherwise - polling
435 	 * 6 : virt - If set, ring base address is virtual
436 	 *    (IOVA returned by MR registration)
437 	 * 7 : reserved6 - MBZ
438 	 */
439 	u8 cq_caps_1;
440 
441 	/*
442 	 * 4:0 : cq_entry_size_words - size of CQ entry in
443 	 *    32-bit words, valid values: 4, 8.
444 	 * 7:5 : reserved7 - MBZ
445 	 */
446 	u8 cq_caps_2;
447 
448 	/* completion queue depth in # of entries. must be power of 2 */
449 	u16 cq_depth;
450 
451 	/* msix vector assigned to this cq */
452 	u32 msix_vector_idx;
453 
454 	/*
455 	 * CQ ring base address, virtual or physical depending on 'virt'
456 	 * flag
457 	 */
458 	struct efa_common_mem_addr cq_ba;
459 
460 	/*
461 	 * Memory registration key for the ring, used only when base
462 	 * address is virtual
463 	 */
464 	u32 l_key;
465 
466 	/*
467 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
468 	 *    attributes.
469 	 */
470 	u16 num_sub_cqs;
471 
472 	/* UAR number */
473 	u16 uar;
474 };
475 
476 struct efa_admin_create_cq_resp {
477 	struct efa_admin_acq_common_desc acq_common_desc;
478 
479 	u16 cq_idx;
480 
481 	/* actual cq depth in number of entries */
482 	u16 cq_actual_depth;
483 };
484 
485 struct efa_admin_destroy_cq_cmd {
486 	struct efa_admin_aq_common_desc aq_common_desc;
487 
488 	u16 cq_idx;
489 
490 	/* MBZ */
491 	u16 reserved1;
492 };
493 
494 struct efa_admin_destroy_cq_resp {
495 	struct efa_admin_acq_common_desc acq_common_desc;
496 };
497 
498 /*
499  * EFA AQ Get Statistics command. Extended statistics are placed in control
500  * buffer pointed by AQ entry
501  */
502 struct efa_admin_aq_get_stats_cmd {
503 	struct efa_admin_aq_common_desc aq_common_descriptor;
504 
505 	union {
506 		/* command specific inline data */
507 		u32 inline_data_w1[3];
508 
509 		struct efa_admin_ctrl_buff_info control_buffer;
510 	} u;
511 
512 	/* stats type as defined in enum efa_admin_get_stats_type */
513 	u8 type;
514 
515 	/* stats scope defined in enum efa_admin_get_stats_scope */
516 	u8 scope;
517 
518 	u16 scope_modifier;
519 };
520 
521 struct efa_admin_basic_stats {
522 	u64 tx_bytes;
523 
524 	u64 tx_pkts;
525 
526 	u64 rx_bytes;
527 
528 	u64 rx_pkts;
529 
530 	u64 rx_drops;
531 };
532 
533 struct efa_admin_acq_get_stats_resp {
534 	struct efa_admin_acq_common_desc acq_common_desc;
535 
536 	struct efa_admin_basic_stats basic_stats;
537 };
538 
539 struct efa_admin_get_set_feature_common_desc {
540 	/*
541 	 * 1:0 : select - 0x1 - current value; 0x3 - default
542 	 *    value
543 	 * 7:3 : reserved3 - MBZ
544 	 */
545 	u8 flags;
546 
547 	/* as appears in efa_admin_aq_feature_id */
548 	u8 feature_id;
549 
550 	/* MBZ */
551 	u16 reserved16;
552 };
553 
554 struct efa_admin_feature_device_attr_desc {
555 	/* Bitmap of efa_admin_aq_feature_id */
556 	u64 supported_features;
557 
558 	/* Bitmap of supported page sizes in MR registrations */
559 	u64 page_size_cap;
560 
561 	u32 fw_version;
562 
563 	u32 admin_api_version;
564 
565 	u32 device_version;
566 
567 	/* Bar used for SQ and RQ doorbells */
568 	u16 db_bar;
569 
570 	/* Indicates how many bits are used on physical address access */
571 	u8 phys_addr_width;
572 
573 	/* Indicates how many bits are used on virtual address access */
574 	u8 virt_addr_width;
575 
576 	/*
577 	 * 0 : rdma_read - If set, RDMA Read is supported on
578 	 *    TX queues
579 	 * 31:1 : reserved - MBZ
580 	 */
581 	u32 device_caps;
582 
583 	/* Max RDMA transfer size in bytes */
584 	u32 max_rdma_size;
585 };
586 
587 struct efa_admin_feature_queue_attr_desc {
588 	/* The maximum number of queue pairs supported */
589 	u32 max_qp;
590 
591 	/* Maximum number of WQEs per Send Queue */
592 	u32 max_sq_depth;
593 
594 	/* Maximum size of data that can be sent inline in a Send WQE */
595 	u32 inline_buf_size;
596 
597 	/* Maximum number of buffer descriptors per Recv Queue */
598 	u32 max_rq_depth;
599 
600 	/* The maximum number of completion queues supported per VF */
601 	u32 max_cq;
602 
603 	/* Maximum number of CQEs per Completion Queue */
604 	u32 max_cq_depth;
605 
606 	/* Number of sub-CQs to be created for each CQ */
607 	u16 sub_cqs_per_cq;
608 
609 	/* MBZ */
610 	u16 reserved;
611 
612 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
613 	u16 max_wr_send_sges;
614 
615 	/* Maximum number of SGEs allowed for a single recv WQE */
616 	u16 max_wr_recv_sges;
617 
618 	/* The maximum number of memory regions supported */
619 	u32 max_mr;
620 
621 	/* The maximum number of pages can be registered */
622 	u32 max_mr_pages;
623 
624 	/* The maximum number of protection domains supported */
625 	u32 max_pd;
626 
627 	/* The maximum number of address handles supported */
628 	u32 max_ah;
629 
630 	/* The maximum size of LLQ in bytes */
631 	u32 max_llq_size;
632 
633 	/* Maximum number of SGEs for a single RDMA read WQE */
634 	u16 max_wr_rdma_sges;
635 };
636 
637 struct efa_admin_feature_aenq_desc {
638 	/* bitmask for AENQ groups the device can report */
639 	u32 supported_groups;
640 
641 	/* bitmask for AENQ groups to report */
642 	u32 enabled_groups;
643 };
644 
645 struct efa_admin_feature_network_attr_desc {
646 	/* Raw address data in network byte order */
647 	u8 addr[16];
648 
649 	/* max packet payload size in bytes */
650 	u32 mtu;
651 };
652 
653 /*
654  * When hint value is 0, hints capabilities are not supported or driver
655  * should use its own predefined value
656  */
657 struct efa_admin_hw_hints {
658 	/* value in ms */
659 	u16 mmio_read_timeout;
660 
661 	/* value in ms */
662 	u16 driver_watchdog_timeout;
663 
664 	/* value in ms */
665 	u16 admin_completion_timeout;
666 
667 	/* poll interval in ms */
668 	u16 poll_interval;
669 };
670 
671 struct efa_admin_get_feature_cmd {
672 	struct efa_admin_aq_common_desc aq_common_descriptor;
673 
674 	struct efa_admin_ctrl_buff_info control_buffer;
675 
676 	struct efa_admin_get_set_feature_common_desc feature_common;
677 
678 	u32 raw[11];
679 };
680 
681 struct efa_admin_get_feature_resp {
682 	struct efa_admin_acq_common_desc acq_common_desc;
683 
684 	union {
685 		u32 raw[14];
686 
687 		struct efa_admin_feature_device_attr_desc device_attr;
688 
689 		struct efa_admin_feature_aenq_desc aenq;
690 
691 		struct efa_admin_feature_network_attr_desc network_attr;
692 
693 		struct efa_admin_feature_queue_attr_desc queue_attr;
694 
695 		struct efa_admin_hw_hints hw_hints;
696 	} u;
697 };
698 
699 struct efa_admin_set_feature_cmd {
700 	struct efa_admin_aq_common_desc aq_common_descriptor;
701 
702 	struct efa_admin_ctrl_buff_info control_buffer;
703 
704 	struct efa_admin_get_set_feature_common_desc feature_common;
705 
706 	union {
707 		u32 raw[11];
708 
709 		/* AENQ configuration */
710 		struct efa_admin_feature_aenq_desc aenq;
711 	} u;
712 };
713 
714 struct efa_admin_set_feature_resp {
715 	struct efa_admin_acq_common_desc acq_common_desc;
716 
717 	union {
718 		u32 raw[14];
719 	} u;
720 };
721 
722 struct efa_admin_alloc_pd_cmd {
723 	struct efa_admin_aq_common_desc aq_common_descriptor;
724 };
725 
726 struct efa_admin_alloc_pd_resp {
727 	struct efa_admin_acq_common_desc acq_common_desc;
728 
729 	/* PD number */
730 	u16 pd;
731 
732 	/* MBZ */
733 	u16 reserved;
734 };
735 
736 struct efa_admin_dealloc_pd_cmd {
737 	struct efa_admin_aq_common_desc aq_common_descriptor;
738 
739 	/* PD number */
740 	u16 pd;
741 
742 	/* MBZ */
743 	u16 reserved;
744 };
745 
746 struct efa_admin_dealloc_pd_resp {
747 	struct efa_admin_acq_common_desc acq_common_desc;
748 };
749 
750 struct efa_admin_alloc_uar_cmd {
751 	struct efa_admin_aq_common_desc aq_common_descriptor;
752 };
753 
754 struct efa_admin_alloc_uar_resp {
755 	struct efa_admin_acq_common_desc acq_common_desc;
756 
757 	/* UAR number */
758 	u16 uar;
759 
760 	/* MBZ */
761 	u16 reserved;
762 };
763 
764 struct efa_admin_dealloc_uar_cmd {
765 	struct efa_admin_aq_common_desc aq_common_descriptor;
766 
767 	/* UAR number */
768 	u16 uar;
769 
770 	/* MBZ */
771 	u16 reserved;
772 };
773 
774 struct efa_admin_dealloc_uar_resp {
775 	struct efa_admin_acq_common_desc acq_common_desc;
776 };
777 
778 /* asynchronous event notification groups */
779 enum efa_admin_aenq_group {
780 	EFA_ADMIN_FATAL_ERROR                       = 1,
781 	EFA_ADMIN_WARNING                           = 2,
782 	EFA_ADMIN_NOTIFICATION                      = 3,
783 	EFA_ADMIN_KEEP_ALIVE                        = 4,
784 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
785 };
786 
787 enum efa_admin_aenq_notification_syndrom {
788 	EFA_ADMIN_SUSPEND                           = 0,
789 	EFA_ADMIN_RESUME                            = 1,
790 	EFA_ADMIN_UPDATE_HINTS                      = 2,
791 };
792 
793 struct efa_admin_mmio_req_read_less_resp {
794 	u16 req_id;
795 
796 	u16 reg_off;
797 
798 	/* value is valid when poll is cleared */
799 	u32 reg_val;
800 };
801 
802 enum efa_admin_os_type {
803 	EFA_ADMIN_OS_LINUX                          = 0,
804 };
805 
806 struct efa_admin_host_info {
807 	/* OS distribution string format */
808 	u8 os_dist_str[128];
809 
810 	/* Defined in enum efa_admin_os_type */
811 	u32 os_type;
812 
813 	/* Kernel version string format */
814 	u8 kernel_ver_str[32];
815 
816 	/* Kernel version numeric format */
817 	u32 kernel_ver;
818 
819 	/*
820 	 * 7:0 : driver_module_type
821 	 * 15:8 : driver_sub_minor
822 	 * 23:16 : driver_minor
823 	 * 31:24 : driver_major
824 	 */
825 	u32 driver_ver;
826 
827 	/*
828 	 * Device's Bus, Device and Function
829 	 * 2:0 : function
830 	 * 7:3 : device
831 	 * 15:8 : bus
832 	 */
833 	u16 bdf;
834 
835 	/*
836 	 * Spec version
837 	 * 7:0 : spec_minor
838 	 * 15:8 : spec_major
839 	 */
840 	u16 spec_ver;
841 
842 	/*
843 	 * 0 : intree - Intree driver
844 	 * 1 : gdr - GPUDirect RDMA supported
845 	 * 31:2 : reserved2
846 	 */
847 	u32 flags;
848 };
849 
850 /* create_qp_cmd */
851 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
852 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
853 
854 /* reg_mr_cmd */
855 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
856 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
857 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
858 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
859 
860 /* create_cq_cmd */
861 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
862 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
863 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
864 
865 /* get_set_feature_common_desc */
866 #define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK   GENMASK(1, 0)
867 
868 /* feature_device_attr_desc */
869 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
870 
871 /* host_info */
872 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
873 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
874 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
875 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
876 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
877 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
878 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
879 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
880 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
881 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
882 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
883 
884 #endif /* _EFA_ADMIN_CMDS_H_ */
885