xref: /linux/drivers/gpu/drm/xe/xe_bo_evict.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_bo_evict.h"
7 
8 #include "xe_bo.h"
9 #include "xe_device.h"
10 #include "xe_ggtt.h"
11 #include "xe_tile.h"
12 
13 /**
14  * xe_bo_evict_all - evict all BOs from VRAM
15  *
16  * @xe: xe device
17  *
18  * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
19  * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
20  * All eviction magic done via TTM calls.
21  *
22  * Evict == move VRAM BOs to temporary (typically system) memory.
23  *
24  * This function should be called before the device goes into a suspend state
25  * where the VRAM loses power.
26  */
27 int xe_bo_evict_all(struct xe_device *xe)
28 {
29 	struct ttm_device *bdev = &xe->ttm;
30 	struct xe_bo *bo;
31 	struct xe_tile *tile;
32 	struct list_head still_in_list;
33 	u32 mem_type;
34 	u8 id;
35 	int ret;
36 
37 	if (!IS_DGFX(xe))
38 		return 0;
39 
40 	/* User memory */
41 	for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
42 		struct ttm_resource_manager *man =
43 			ttm_manager_type(bdev, mem_type);
44 
45 		if (man) {
46 			ret = ttm_resource_manager_evict_all(bdev, man);
47 			if (ret)
48 				return ret;
49 		}
50 	}
51 
52 	/* Pinned user memory in VRAM */
53 	INIT_LIST_HEAD(&still_in_list);
54 	spin_lock(&xe->pinned.lock);
55 	for (;;) {
56 		bo = list_first_entry_or_null(&xe->pinned.external_vram,
57 					      typeof(*bo), pinned_link);
58 		if (!bo)
59 			break;
60 		xe_bo_get(bo);
61 		list_move_tail(&bo->pinned_link, &still_in_list);
62 		spin_unlock(&xe->pinned.lock);
63 
64 		xe_bo_lock(bo, false);
65 		ret = xe_bo_evict_pinned(bo);
66 		xe_bo_unlock(bo);
67 		xe_bo_put(bo);
68 		if (ret) {
69 			spin_lock(&xe->pinned.lock);
70 			list_splice_tail(&still_in_list,
71 					 &xe->pinned.external_vram);
72 			spin_unlock(&xe->pinned.lock);
73 			return ret;
74 		}
75 
76 		spin_lock(&xe->pinned.lock);
77 	}
78 	list_splice_tail(&still_in_list, &xe->pinned.external_vram);
79 	spin_unlock(&xe->pinned.lock);
80 
81 	/*
82 	 * Wait for all user BO to be evicted as those evictions depend on the
83 	 * memory moved below.
84 	 */
85 	for_each_tile(tile, xe, id)
86 		xe_tile_migrate_wait(tile);
87 
88 	spin_lock(&xe->pinned.lock);
89 	for (;;) {
90 		bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
91 					      typeof(*bo), pinned_link);
92 		if (!bo)
93 			break;
94 		xe_bo_get(bo);
95 		list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
96 		spin_unlock(&xe->pinned.lock);
97 
98 		xe_bo_lock(bo, false);
99 		ret = xe_bo_evict_pinned(bo);
100 		xe_bo_unlock(bo);
101 		xe_bo_put(bo);
102 		if (ret)
103 			return ret;
104 
105 		spin_lock(&xe->pinned.lock);
106 	}
107 	spin_unlock(&xe->pinned.lock);
108 
109 	return 0;
110 }
111 
112 /**
113  * xe_bo_restore_kernel - restore kernel BOs to VRAM
114  *
115  * @xe: xe device
116  *
117  * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
118  * moves done via TTM calls.
119  *
120  * This function should be called early, before trying to init the GT, on device
121  * resume.
122  */
123 int xe_bo_restore_kernel(struct xe_device *xe)
124 {
125 	struct xe_bo *bo;
126 	int ret;
127 
128 	if (!IS_DGFX(xe))
129 		return 0;
130 
131 	spin_lock(&xe->pinned.lock);
132 	for (;;) {
133 		bo = list_first_entry_or_null(&xe->pinned.evicted,
134 					      typeof(*bo), pinned_link);
135 		if (!bo)
136 			break;
137 		xe_bo_get(bo);
138 		list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
139 		spin_unlock(&xe->pinned.lock);
140 
141 		xe_bo_lock(bo, false);
142 		ret = xe_bo_restore_pinned(bo);
143 		xe_bo_unlock(bo);
144 		if (ret) {
145 			xe_bo_put(bo);
146 			return ret;
147 		}
148 
149 		if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
150 			struct xe_tile *tile = bo->tile;
151 
152 			mutex_lock(&tile->mem.ggtt->lock);
153 			xe_ggtt_map_bo(tile->mem.ggtt, bo);
154 			mutex_unlock(&tile->mem.ggtt->lock);
155 		}
156 
157 		/*
158 		 * We expect validate to trigger a move VRAM and our move code
159 		 * should setup the iosys map.
160 		 */
161 		xe_assert(xe, !iosys_map_is_null(&bo->vmap));
162 		xe_assert(xe, xe_bo_is_vram(bo));
163 
164 		xe_bo_put(bo);
165 
166 		spin_lock(&xe->pinned.lock);
167 	}
168 	spin_unlock(&xe->pinned.lock);
169 
170 	return 0;
171 }
172 
173 /**
174  * xe_bo_restore_user - restore pinned user BOs to VRAM
175  *
176  * @xe: xe device
177  *
178  * Move pinned user BOs from temporary (typically system) memory to VRAM via
179  * CPU. All moves done via TTM calls.
180  *
181  * This function should be called late, after GT init, on device resume.
182  */
183 int xe_bo_restore_user(struct xe_device *xe)
184 {
185 	struct xe_bo *bo;
186 	struct xe_tile *tile;
187 	struct list_head still_in_list;
188 	u8 id;
189 	int ret;
190 
191 	if (!IS_DGFX(xe))
192 		return 0;
193 
194 	/* Pinned user memory in VRAM should be validated on resume */
195 	INIT_LIST_HEAD(&still_in_list);
196 	spin_lock(&xe->pinned.lock);
197 	for (;;) {
198 		bo = list_first_entry_or_null(&xe->pinned.external_vram,
199 					      typeof(*bo), pinned_link);
200 		if (!bo)
201 			break;
202 		list_move_tail(&bo->pinned_link, &still_in_list);
203 		xe_bo_get(bo);
204 		spin_unlock(&xe->pinned.lock);
205 
206 		xe_bo_lock(bo, false);
207 		ret = xe_bo_restore_pinned(bo);
208 		xe_bo_unlock(bo);
209 		xe_bo_put(bo);
210 		if (ret) {
211 			spin_lock(&xe->pinned.lock);
212 			list_splice_tail(&still_in_list,
213 					 &xe->pinned.external_vram);
214 			spin_unlock(&xe->pinned.lock);
215 			return ret;
216 		}
217 
218 		spin_lock(&xe->pinned.lock);
219 	}
220 	list_splice_tail(&still_in_list, &xe->pinned.external_vram);
221 	spin_unlock(&xe->pinned.lock);
222 
223 	/* Wait for validate to complete */
224 	for_each_tile(tile, xe, id)
225 		xe_tile_migrate_wait(tile);
226 
227 	return 0;
228 }
229