xref: /linux/drivers/gpu/drm/xe/xe_exec_queue_types.h (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_EXEC_QUEUE_TYPES_H_
7 #define _XE_EXEC_QUEUE_TYPES_H_
8 
9 #include <linux/kref.h>
10 
11 #include <drm/gpu_scheduler.h>
12 
13 #include "xe_gpu_scheduler_types.h"
14 #include "xe_hw_engine_types.h"
15 #include "xe_hw_fence_types.h"
16 #include "xe_lrc_types.h"
17 
18 struct xe_execlist_exec_queue;
19 struct xe_gt;
20 struct xe_guc_exec_queue;
21 struct xe_hw_engine;
22 struct xe_vm;
23 
24 enum xe_exec_queue_priority {
25 	XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
26 	XE_EXEC_QUEUE_PRIORITY_LOW = 0,
27 	XE_EXEC_QUEUE_PRIORITY_NORMAL,
28 	XE_EXEC_QUEUE_PRIORITY_HIGH,
29 	XE_EXEC_QUEUE_PRIORITY_KERNEL,
30 
31 	XE_EXEC_QUEUE_PRIORITY_COUNT
32 };
33 
34 /**
35  * struct xe_exec_queue - Execution queue
36  *
37  * Contains all state necessary for submissions. Can either be a user object or
38  * a kernel object.
39  */
40 struct xe_exec_queue {
41 	/** @gt: graphics tile this exec queue can submit to */
42 	struct xe_gt *gt;
43 	/**
44 	 * @hwe: A hardware of the same class. May (physical engine) or may not
45 	 * (virtual engine) be where jobs actual engine up running. Should never
46 	 * really be used for submissions.
47 	 */
48 	struct xe_hw_engine *hwe;
49 	/** @refcount: ref count of this exec queue */
50 	struct kref refcount;
51 	/** @vm: VM (address space) for this exec queue */
52 	struct xe_vm *vm;
53 	/** @class: class of this exec queue */
54 	enum xe_engine_class class;
55 	/**
56 	 * @logical_mask: logical mask of where job submitted to exec queue can run
57 	 */
58 	u32 logical_mask;
59 	/** @name: name of this exec queue */
60 	char name[MAX_FENCE_NAME_LEN];
61 	/** @width: width (number BB submitted per exec) of this exec queue */
62 	u16 width;
63 	/** @fence_irq: fence IRQ used to signal job completion */
64 	struct xe_hw_fence_irq *fence_irq;
65 
66 	/**
67 	 * @last_fence: last fence on exec queue, protected by vm->lock in write
68 	 * mode if bind exec queue, protected by dma resv lock if non-bind exec
69 	 * queue
70 	 */
71 	struct dma_fence *last_fence;
72 
73 /* queue no longer allowed to submit */
74 #define EXEC_QUEUE_FLAG_BANNED			BIT(0)
75 /* queue used for kernel submission only */
76 #define EXEC_QUEUE_FLAG_KERNEL			BIT(1)
77 /* kernel engine only destroyed at driver unload */
78 #define EXEC_QUEUE_FLAG_PERMANENT		BIT(2)
79 /* queue keeps running pending jobs after destroy ioctl */
80 #define EXEC_QUEUE_FLAG_PERSISTENT		BIT(3)
81 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
82 #define EXEC_QUEUE_FLAG_VM			BIT(4)
83 /* child of VM queue for multi-tile VM jobs */
84 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(5)
85 /* kernel exec_queue only, set priority to highest level */
86 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(6)
87 
88 	/**
89 	 * @flags: flags for this exec queue, should statically setup aside from ban
90 	 * bit
91 	 */
92 	unsigned long flags;
93 
94 	union {
95 		/** @multi_gt_list: list head for VM bind engines if multi-GT */
96 		struct list_head multi_gt_list;
97 		/** @multi_gt_link: link for VM bind engines if multi-GT */
98 		struct list_head multi_gt_link;
99 	};
100 
101 	union {
102 		/** @execlist: execlist backend specific state for exec queue */
103 		struct xe_execlist_exec_queue *execlist;
104 		/** @guc: GuC backend specific state for exec queue */
105 		struct xe_guc_exec_queue *guc;
106 	};
107 
108 	/**
109 	 * @persistent: persistent exec queue state
110 	 */
111 	struct {
112 		/** @xef: file which this exec queue belongs to */
113 		struct xe_file *xef;
114 		/** @link: link in list of persistent exec queues */
115 		struct list_head link;
116 	} persistent;
117 
118 	union {
119 		/**
120 		 * @parallel: parallel submission state
121 		 */
122 		struct {
123 			/** @composite_fence_ctx: context composite fence */
124 			u64 composite_fence_ctx;
125 			/** @composite_fence_seqno: seqno for composite fence */
126 			u32 composite_fence_seqno;
127 		} parallel;
128 		/**
129 		 * @bind: bind submission state
130 		 */
131 		struct {
132 			/** @fence_ctx: context bind fence */
133 			u64 fence_ctx;
134 			/** @fence_seqno: seqno for bind fence */
135 			u32 fence_seqno;
136 		} bind;
137 	};
138 
139 	/** @sched_props: scheduling properties */
140 	struct {
141 		/** @timeslice_us: timeslice period in micro-seconds */
142 		u32 timeslice_us;
143 		/** @preempt_timeout_us: preemption timeout in micro-seconds */
144 		u32 preempt_timeout_us;
145 		/** @priority: priority of this exec queue */
146 		enum xe_exec_queue_priority priority;
147 	} sched_props;
148 
149 	/** @compute: compute exec queue state */
150 	struct {
151 		/** @pfence: preemption fence */
152 		struct dma_fence *pfence;
153 		/** @context: preemption fence context */
154 		u64 context;
155 		/** @seqno: preemption fence seqno */
156 		u32 seqno;
157 		/** @link: link into VM's list of exec queues */
158 		struct list_head link;
159 		/** @lock: preemption fences lock */
160 		spinlock_t lock;
161 	} compute;
162 
163 	/** @usm: unified shared memory state */
164 	struct {
165 		/** @acc_trigger: access counter trigger */
166 		u32 acc_trigger;
167 		/** @acc_notify: access counter notify */
168 		u32 acc_notify;
169 		/** @acc_granularity: access counter granularity */
170 		u32 acc_granularity;
171 	} usm;
172 
173 	/** @ops: submission backend exec queue operations */
174 	const struct xe_exec_queue_ops *ops;
175 
176 	/** @ring_ops: ring operations for this exec queue */
177 	const struct xe_ring_ops *ring_ops;
178 	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
179 	struct drm_sched_entity *entity;
180 	/** @lrc: logical ring context for this exec queue */
181 	struct xe_lrc lrc[];
182 };
183 
184 /**
185  * struct xe_exec_queue_ops - Submission backend exec queue operations
186  */
187 struct xe_exec_queue_ops {
188 	/** @init: Initialize exec queue for submission backend */
189 	int (*init)(struct xe_exec_queue *q);
190 	/** @kill: Kill inflight submissions for backend */
191 	void (*kill)(struct xe_exec_queue *q);
192 	/** @fini: Fini exec queue for submission backend */
193 	void (*fini)(struct xe_exec_queue *q);
194 	/** @set_priority: Set priority for exec queue */
195 	int (*set_priority)(struct xe_exec_queue *q,
196 			    enum xe_exec_queue_priority priority);
197 	/** @set_timeslice: Set timeslice for exec queue */
198 	int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
199 	/** @set_preempt_timeout: Set preemption timeout for exec queue */
200 	int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
201 	/** @set_job_timeout: Set job timeout for exec queue */
202 	int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
203 	/**
204 	 * @suspend: Suspend exec queue from executing, allowed to be called
205 	 * multiple times in a row before resume with the caveat that
206 	 * suspend_wait returns before calling suspend again.
207 	 */
208 	int (*suspend)(struct xe_exec_queue *q);
209 	/**
210 	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
211 	 * call after suspend.
212 	 */
213 	void (*suspend_wait)(struct xe_exec_queue *q);
214 	/**
215 	 * @resume: Resume exec queue execution, exec queue must be in a suspended
216 	 * state and dma fence returned from most recent suspend call must be
217 	 * signalled when this function is called.
218 	 */
219 	void (*resume)(struct xe_exec_queue *q);
220 	/** @reset_status: check exec queue reset status */
221 	bool (*reset_status)(struct xe_exec_queue *q);
222 };
223 
224 #endif
225