xref: /linux/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c (revision a460513ed4b6994bfeb7bd86f72853140bc1ac12)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "i915_scatterlist.h"
26 #include "i915_pvinfo.h"
27 #include "i915_vgpu.h"
28 
29 /**
30  * DOC: fence register handling
31  *
32  * Important to avoid confusions: "fences" in the i915 driver are not execution
33  * fences used to track command completion but hardware detiler objects which
34  * wrap a given range of the global GTT. Each platform has only a fairly limited
35  * set of these objects.
36  *
37  * Fences are used to detile GTT memory mappings. They're also connected to the
38  * hardware frontbuffer render tracking and hence interact with frontbuffer
39  * compression. Furthermore on older platforms fences are required for tiled
40  * objects used by the display engine. They can also be used by the render
41  * engine - they're required for blitter commands and are optional for render
42  * commands. But on gen4+ both display (with the exception of fbc) and rendering
43  * have their own tiling state bits and don't need fences.
44  *
45  * Also note that fences only support X and Y tiling and hence can't be used for
46  * the fancier new tiling formats like W, Ys and Yf.
47  *
48  * Finally note that because fences are such a restricted resource they're
49  * dynamically associated with objects. Furthermore fence state is committed to
50  * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
51  * explicitly call i915_gem_object_get_fence() to synchronize fencing status
52  * for cpu access. Also note that some code wants an unfenced view, for those
53  * cases the fence can be removed forcefully with i915_gem_object_put_fence().
54  *
55  * Internally these functions will synchronize with userspace access by removing
56  * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
57  */
58 
59 #define pipelined 0
60 
61 static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
62 {
63 	return fence->ggtt->vm.i915;
64 }
65 
66 static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
67 {
68 	return fence->ggtt->vm.gt->uncore;
69 }
70 
71 static void i965_write_fence_reg(struct i915_fence_reg *fence)
72 {
73 	i915_reg_t fence_reg_lo, fence_reg_hi;
74 	int fence_pitch_shift;
75 	u64 val;
76 
77 	if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
78 		fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
79 		fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
80 		fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
81 
82 	} else {
83 		fence_reg_lo = FENCE_REG_965_LO(fence->id);
84 		fence_reg_hi = FENCE_REG_965_HI(fence->id);
85 		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
86 	}
87 
88 	val = 0;
89 	if (fence->tiling) {
90 		unsigned int stride = fence->stride;
91 
92 		GEM_BUG_ON(!IS_ALIGNED(stride, 128));
93 
94 		val = fence->start + fence->size - I965_FENCE_PAGE;
95 		val <<= 32;
96 		val |= fence->start;
97 		val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
98 		if (fence->tiling == I915_TILING_Y)
99 			val |= BIT(I965_FENCE_TILING_Y_SHIFT);
100 		val |= I965_FENCE_REG_VALID;
101 	}
102 
103 	if (!pipelined) {
104 		struct intel_uncore *uncore = fence_to_uncore(fence);
105 
106 		/*
107 		 * To w/a incoherency with non-atomic 64-bit register updates,
108 		 * we split the 64-bit update into two 32-bit writes. In order
109 		 * for a partial fence not to be evaluated between writes, we
110 		 * precede the update with write to turn off the fence register,
111 		 * and only enable the fence as the last step.
112 		 *
113 		 * For extra levels of paranoia, we make sure each step lands
114 		 * before applying the next step.
115 		 */
116 		intel_uncore_write_fw(uncore, fence_reg_lo, 0);
117 		intel_uncore_posting_read_fw(uncore, fence_reg_lo);
118 
119 		intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
120 		intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
121 		intel_uncore_posting_read_fw(uncore, fence_reg_lo);
122 	}
123 }
124 
125 static void i915_write_fence_reg(struct i915_fence_reg *fence)
126 {
127 	u32 val;
128 
129 	val = 0;
130 	if (fence->tiling) {
131 		unsigned int stride = fence->stride;
132 		unsigned int tiling = fence->tiling;
133 		bool is_y_tiled = tiling == I915_TILING_Y;
134 
135 		if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
136 			stride /= 128;
137 		else
138 			stride /= 512;
139 		GEM_BUG_ON(!is_power_of_2(stride));
140 
141 		val = fence->start;
142 		if (is_y_tiled)
143 			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
144 		val |= I915_FENCE_SIZE_BITS(fence->size);
145 		val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
146 
147 		val |= I830_FENCE_REG_VALID;
148 	}
149 
150 	if (!pipelined) {
151 		struct intel_uncore *uncore = fence_to_uncore(fence);
152 		i915_reg_t reg = FENCE_REG(fence->id);
153 
154 		intel_uncore_write_fw(uncore, reg, val);
155 		intel_uncore_posting_read_fw(uncore, reg);
156 	}
157 }
158 
159 static void i830_write_fence_reg(struct i915_fence_reg *fence)
160 {
161 	u32 val;
162 
163 	val = 0;
164 	if (fence->tiling) {
165 		unsigned int stride = fence->stride;
166 
167 		val = fence->start;
168 		if (fence->tiling == I915_TILING_Y)
169 			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
170 		val |= I830_FENCE_SIZE_BITS(fence->size);
171 		val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
172 		val |= I830_FENCE_REG_VALID;
173 	}
174 
175 	if (!pipelined) {
176 		struct intel_uncore *uncore = fence_to_uncore(fence);
177 		i915_reg_t reg = FENCE_REG(fence->id);
178 
179 		intel_uncore_write_fw(uncore, reg, val);
180 		intel_uncore_posting_read_fw(uncore, reg);
181 	}
182 }
183 
184 static void fence_write(struct i915_fence_reg *fence)
185 {
186 	struct drm_i915_private *i915 = fence_to_i915(fence);
187 
188 	/*
189 	 * Previous access through the fence register is marshalled by
190 	 * the mb() inside the fault handlers (i915_gem_release_mmaps)
191 	 * and explicitly managed for internal users.
192 	 */
193 
194 	if (IS_GEN(i915, 2))
195 		i830_write_fence_reg(fence);
196 	else if (IS_GEN(i915, 3))
197 		i915_write_fence_reg(fence);
198 	else
199 		i965_write_fence_reg(fence);
200 
201 	/*
202 	 * Access through the fenced region afterwards is
203 	 * ordered by the posting reads whilst writing the registers.
204 	 */
205 }
206 
207 static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
208 {
209 	return INTEL_GEN(fence_to_i915(fence)) < 4;
210 }
211 
212 static int fence_update(struct i915_fence_reg *fence,
213 			struct i915_vma *vma)
214 {
215 	struct i915_ggtt *ggtt = fence->ggtt;
216 	struct intel_uncore *uncore = fence_to_uncore(fence);
217 	intel_wakeref_t wakeref;
218 	struct i915_vma *old;
219 	int ret;
220 
221 	fence->tiling = 0;
222 	if (vma) {
223 		GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
224 			   !i915_gem_object_get_tiling(vma->obj));
225 
226 		if (!i915_vma_is_map_and_fenceable(vma))
227 			return -EINVAL;
228 
229 		if (gpu_uses_fence_registers(fence)) {
230 			/* implicit 'unfenced' GPU blits */
231 			ret = i915_vma_sync(vma);
232 			if (ret)
233 				return ret;
234 		}
235 
236 		fence->start = vma->node.start;
237 		fence->size = vma->fence_size;
238 		fence->stride = i915_gem_object_get_stride(vma->obj);
239 		fence->tiling = i915_gem_object_get_tiling(vma->obj);
240 	}
241 	WRITE_ONCE(fence->dirty, false);
242 
243 	old = xchg(&fence->vma, NULL);
244 	if (old) {
245 		/* XXX Ideally we would move the waiting to outside the mutex */
246 		ret = i915_active_wait(&fence->active);
247 		if (ret) {
248 			fence->vma = old;
249 			return ret;
250 		}
251 
252 		i915_vma_flush_writes(old);
253 
254 		/*
255 		 * Ensure that all userspace CPU access is completed before
256 		 * stealing the fence.
257 		 */
258 		if (old != vma) {
259 			GEM_BUG_ON(old->fence != fence);
260 			i915_vma_revoke_mmap(old);
261 			old->fence = NULL;
262 		}
263 
264 		list_move(&fence->link, &ggtt->fence_list);
265 	}
266 
267 	/*
268 	 * We only need to update the register itself if the device is awake.
269 	 * If the device is currently powered down, we will defer the write
270 	 * to the runtime resume, see intel_ggtt_restore_fences().
271 	 *
272 	 * This only works for removing the fence register, on acquisition
273 	 * the caller must hold the rpm wakeref. The fence register must
274 	 * be cleared before we can use any other fences to ensure that
275 	 * the new fences do not overlap the elided clears, confusing HW.
276 	 */
277 	wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
278 	if (!wakeref) {
279 		GEM_BUG_ON(vma);
280 		return 0;
281 	}
282 
283 	WRITE_ONCE(fence->vma, vma);
284 	fence_write(fence);
285 
286 	if (vma) {
287 		vma->fence = fence;
288 		list_move_tail(&fence->link, &ggtt->fence_list);
289 	}
290 
291 	intel_runtime_pm_put(uncore->rpm, wakeref);
292 	return 0;
293 }
294 
295 /**
296  * i915_vma_revoke_fence - force-remove fence for a VMA
297  * @vma: vma to map linearly (not through a fence reg)
298  *
299  * This function force-removes any fence from the given object, which is useful
300  * if the kernel wants to do untiled GTT access.
301  */
302 void i915_vma_revoke_fence(struct i915_vma *vma)
303 {
304 	struct i915_fence_reg *fence = vma->fence;
305 	intel_wakeref_t wakeref;
306 
307 	lockdep_assert_held(&vma->vm->mutex);
308 	if (!fence)
309 		return;
310 
311 	GEM_BUG_ON(fence->vma != vma);
312 	GEM_BUG_ON(!i915_active_is_idle(&fence->active));
313 	GEM_BUG_ON(atomic_read(&fence->pin_count));
314 
315 	fence->tiling = 0;
316 	WRITE_ONCE(fence->vma, NULL);
317 	vma->fence = NULL;
318 
319 	with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
320 		fence_write(fence);
321 }
322 
323 static bool fence_is_active(const struct i915_fence_reg *fence)
324 {
325 	return fence->vma && i915_vma_is_active(fence->vma);
326 }
327 
328 static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
329 {
330 	struct i915_fence_reg *active = NULL;
331 	struct i915_fence_reg *fence, *fn;
332 
333 	list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
334 		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
335 
336 		if (fence == active) /* now seen this fence twice */
337 			active = ERR_PTR(-EAGAIN);
338 
339 		/* Prefer idle fences so we do not have to wait on the GPU */
340 		if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
341 			if (!active)
342 				active = fence;
343 
344 			list_move_tail(&fence->link, &ggtt->fence_list);
345 			continue;
346 		}
347 
348 		if (atomic_read(&fence->pin_count))
349 			continue;
350 
351 		return fence;
352 	}
353 
354 	/* Wait for completion of pending flips which consume fences */
355 	if (intel_has_pending_fb_unpin(ggtt->vm.i915))
356 		return ERR_PTR(-EAGAIN);
357 
358 	return ERR_PTR(-EDEADLK);
359 }
360 
361 int __i915_vma_pin_fence(struct i915_vma *vma)
362 {
363 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
364 	struct i915_fence_reg *fence;
365 	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
366 	int err;
367 
368 	lockdep_assert_held(&vma->vm->mutex);
369 
370 	/* Just update our place in the LRU if our fence is getting reused. */
371 	if (vma->fence) {
372 		fence = vma->fence;
373 		GEM_BUG_ON(fence->vma != vma);
374 		atomic_inc(&fence->pin_count);
375 		if (!fence->dirty) {
376 			list_move_tail(&fence->link, &ggtt->fence_list);
377 			return 0;
378 		}
379 	} else if (set) {
380 		fence = fence_find(ggtt);
381 		if (IS_ERR(fence))
382 			return PTR_ERR(fence);
383 
384 		GEM_BUG_ON(atomic_read(&fence->pin_count));
385 		atomic_inc(&fence->pin_count);
386 	} else {
387 		return 0;
388 	}
389 
390 	err = fence_update(fence, set);
391 	if (err)
392 		goto out_unpin;
393 
394 	GEM_BUG_ON(fence->vma != set);
395 	GEM_BUG_ON(vma->fence != (set ? fence : NULL));
396 
397 	if (set)
398 		return 0;
399 
400 out_unpin:
401 	atomic_dec(&fence->pin_count);
402 	return err;
403 }
404 
405 /**
406  * i915_vma_pin_fence - set up fencing for a vma
407  * @vma: vma to map through a fence reg
408  *
409  * When mapping objects through the GTT, userspace wants to be able to write
410  * to them without having to worry about swizzling if the object is tiled.
411  * This function walks the fence regs looking for a free one for @obj,
412  * stealing one if it can't find any.
413  *
414  * It then sets up the reg based on the object's properties: address, pitch
415  * and tiling format.
416  *
417  * For an untiled surface, this removes any existing fence.
418  *
419  * Returns:
420  *
421  * 0 on success, negative error code on failure.
422  */
423 int i915_vma_pin_fence(struct i915_vma *vma)
424 {
425 	int err;
426 
427 	if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
428 		return 0;
429 
430 	/*
431 	 * Note that we revoke fences on runtime suspend. Therefore the user
432 	 * must keep the device awake whilst using the fence.
433 	 */
434 	assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
435 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
436 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
437 
438 	err = mutex_lock_interruptible(&vma->vm->mutex);
439 	if (err)
440 		return err;
441 
442 	err = __i915_vma_pin_fence(vma);
443 	mutex_unlock(&vma->vm->mutex);
444 
445 	return err;
446 }
447 
448 /**
449  * i915_reserve_fence - Reserve a fence for vGPU
450  * @ggtt: Global GTT
451  *
452  * This function walks the fence regs looking for a free one and remove
453  * it from the fence_list. It is used to reserve fence for vGPU to use.
454  */
455 struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
456 {
457 	struct i915_fence_reg *fence;
458 	int count;
459 	int ret;
460 
461 	lockdep_assert_held(&ggtt->vm.mutex);
462 
463 	/* Keep at least one fence available for the display engine. */
464 	count = 0;
465 	list_for_each_entry(fence, &ggtt->fence_list, link)
466 		count += !atomic_read(&fence->pin_count);
467 	if (count <= 1)
468 		return ERR_PTR(-ENOSPC);
469 
470 	fence = fence_find(ggtt);
471 	if (IS_ERR(fence))
472 		return fence;
473 
474 	if (fence->vma) {
475 		/* Force-remove fence from VMA */
476 		ret = fence_update(fence, NULL);
477 		if (ret)
478 			return ERR_PTR(ret);
479 	}
480 
481 	list_del(&fence->link);
482 
483 	return fence;
484 }
485 
486 /**
487  * i915_unreserve_fence - Reclaim a reserved fence
488  * @fence: the fence reg
489  *
490  * This function add a reserved fence register from vGPU to the fence_list.
491  */
492 void i915_unreserve_fence(struct i915_fence_reg *fence)
493 {
494 	struct i915_ggtt *ggtt = fence->ggtt;
495 
496 	lockdep_assert_held(&ggtt->vm.mutex);
497 
498 	list_add(&fence->link, &ggtt->fence_list);
499 }
500 
501 /**
502  * intel_ggtt_restore_fences - restore fence state
503  * @ggtt: Global GTT
504  *
505  * Restore the hw fence state to match the software tracking again, to be called
506  * after a gpu reset and on resume. Note that on runtime suspend we only cancel
507  * the fences, to be reacquired by the user later.
508  */
509 void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
510 {
511 	int i;
512 
513 	for (i = 0; i < ggtt->num_fences; i++)
514 		fence_write(&ggtt->fence_regs[i]);
515 }
516 
517 /**
518  * DOC: tiling swizzling details
519  *
520  * The idea behind tiling is to increase cache hit rates by rearranging
521  * pixel data so that a group of pixel accesses are in the same cacheline.
522  * Performance improvement from doing this on the back/depth buffer are on
523  * the order of 30%.
524  *
525  * Intel architectures make this somewhat more complicated, though, by
526  * adjustments made to addressing of data when the memory is in interleaved
527  * mode (matched pairs of DIMMS) to improve memory bandwidth.
528  * For interleaved memory, the CPU sends every sequential 64 bytes
529  * to an alternate memory channel so it can get the bandwidth from both.
530  *
531  * The GPU also rearranges its accesses for increased bandwidth to interleaved
532  * memory, and it matches what the CPU does for non-tiled.  However, when tiled
533  * it does it a little differently, since one walks addresses not just in the
534  * X direction but also Y.  So, along with alternating channels when bit
535  * 6 of the address flips, it also alternates when other bits flip --  Bits 9
536  * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
537  * are common to both the 915 and 965-class hardware.
538  *
539  * The CPU also sometimes XORs in higher bits as well, to improve
540  * bandwidth doing strided access like we do so frequently in graphics.  This
541  * is called "Channel XOR Randomization" in the MCH documentation.  The result
542  * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
543  * decode.
544  *
545  * All of this bit 6 XORing has an effect on our memory management,
546  * as we need to make sure that the 3d driver can correctly address object
547  * contents.
548  *
549  * If we don't have interleaved memory, all tiling is safe and no swizzling is
550  * required.
551  *
552  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
553  * 17 is not just a page offset, so as we page an object out and back in,
554  * individual pages in it will have different bit 17 addresses, resulting in
555  * each 64 bytes being swapped with its neighbor!
556  *
557  * Otherwise, if interleaved, we have to tell the 3d driver what the address
558  * swizzling it needs to do is, since it's writing with the CPU to the pages
559  * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
560  * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
561  * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
562  * to match what the GPU expects.
563  */
564 
565 /**
566  * detect_bit_6_swizzle - detect bit 6 swizzling pattern
567  * @ggtt: Global GGTT
568  *
569  * Detects bit 6 swizzling of address lookup between IGD access and CPU
570  * access through main memory.
571  */
572 static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
573 {
574 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
575 	struct drm_i915_private *i915 = ggtt->vm.i915;
576 	u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
577 	u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
578 
579 	if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
580 		/*
581 		 * On BDW+, swizzling is not used. We leave the CPU memory
582 		 * controller in charge of optimizing memory accesses without
583 		 * the extra address manipulation GPU side.
584 		 *
585 		 * VLV and CHV don't have GPU swizzling.
586 		 */
587 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
588 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
589 	} else if (INTEL_GEN(i915) >= 6) {
590 		if (i915->preserve_bios_swizzle) {
591 			if (intel_uncore_read(uncore, DISP_ARB_CTL) &
592 			    DISP_TILE_SURFACE_SWIZZLING) {
593 				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
594 				swizzle_y = I915_BIT_6_SWIZZLE_9;
595 			} else {
596 				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
597 				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
598 			}
599 		} else {
600 			u32 dimm_c0, dimm_c1;
601 			dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
602 			dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
603 			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
604 			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
605 			/*
606 			 * Enable swizzling when the channels are populated
607 			 * with identically sized dimms. We don't need to check
608 			 * the 3rd channel because no cpu with gpu attached
609 			 * ships in that configuration. Also, swizzling only
610 			 * makes sense for 2 channels anyway.
611 			 */
612 			if (dimm_c0 == dimm_c1) {
613 				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
614 				swizzle_y = I915_BIT_6_SWIZZLE_9;
615 			} else {
616 				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
617 				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
618 			}
619 		}
620 	} else if (IS_GEN(i915, 5)) {
621 		/*
622 		 * On Ironlake whatever DRAM config, GPU always do
623 		 * same swizzling setup.
624 		 */
625 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
626 		swizzle_y = I915_BIT_6_SWIZZLE_9;
627 	} else if (IS_GEN(i915, 2)) {
628 		/*
629 		 * As far as we know, the 865 doesn't have these bit 6
630 		 * swizzling issues.
631 		 */
632 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
633 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
634 	} else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
635 		/*
636 		 * The 965, G33, and newer, have a very flexible memory
637 		 * configuration.  It will enable dual-channel mode
638 		 * (interleaving) on as much memory as it can, and the GPU
639 		 * will additionally sometimes enable different bit 6
640 		 * swizzling for tiled objects from the CPU.
641 		 *
642 		 * Here's what I found on the G965:
643 		 *    slot fill         memory size  swizzling
644 		 * 0A   0B   1A   1B    1-ch   2-ch
645 		 * 512  0    0    0     512    0     O
646 		 * 512  0    512  0     16     1008  X
647 		 * 512  0    0    512   16     1008  X
648 		 * 0    512  0    512   16     1008  X
649 		 * 1024 1024 1024 0     2048   1024  O
650 		 *
651 		 * We could probably detect this based on either the DRB
652 		 * matching, which was the case for the swizzling required in
653 		 * the table above, or from the 1-ch value being less than
654 		 * the minimum size of a rank.
655 		 *
656 		 * Reports indicate that the swizzling actually
657 		 * varies depending upon page placement inside the
658 		 * channels, i.e. we see swizzled pages where the
659 		 * banks of memory are paired and unswizzled on the
660 		 * uneven portion, so leave that as unknown.
661 		 */
662 		if (intel_uncore_read(uncore, C0DRB3) ==
663 		    intel_uncore_read(uncore, C1DRB3)) {
664 			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
665 			swizzle_y = I915_BIT_6_SWIZZLE_9;
666 		}
667 	} else {
668 		u32 dcc = intel_uncore_read(uncore, DCC);
669 
670 		/*
671 		 * On 9xx chipsets, channel interleave by the CPU is
672 		 * determined by DCC.  For single-channel, neither the CPU
673 		 * nor the GPU do swizzling.  For dual channel interleaved,
674 		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
675 		 * 9 for Y tiled.  The CPU's interleave is independent, and
676 		 * can be based on either bit 11 (haven't seen this yet) or
677 		 * bit 17 (common).
678 		 */
679 		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
680 		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
681 		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
682 			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
683 			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
684 			break;
685 		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
686 			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
687 				/*
688 				 * This is the base swizzling by the GPU for
689 				 * tiled buffers.
690 				 */
691 				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
692 				swizzle_y = I915_BIT_6_SWIZZLE_9;
693 			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
694 				/* Bit 11 swizzling by the CPU in addition. */
695 				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
696 				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
697 			} else {
698 				/* Bit 17 swizzling by the CPU in addition. */
699 				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
700 				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
701 			}
702 			break;
703 		}
704 
705 		/* check for L-shaped memory aka modified enhanced addressing */
706 		if (IS_GEN(i915, 4) &&
707 		    !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
708 			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
709 			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
710 		}
711 
712 		if (dcc == 0xffffffff) {
713 			drm_err(&i915->drm, "Couldn't read from MCHBAR.  "
714 				  "Disabling tiling.\n");
715 			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
716 			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
717 		}
718 	}
719 
720 	if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
721 	    swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
722 		/*
723 		 * Userspace likes to explode if it sees unknown swizzling,
724 		 * so lie. We will finish the lie when reporting through
725 		 * the get-tiling-ioctl by reporting the physical swizzle
726 		 * mode as unknown instead.
727 		 *
728 		 * As we don't strictly know what the swizzling is, it may be
729 		 * bit17 dependent, and so we need to also prevent the pages
730 		 * from being moved.
731 		 */
732 		i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
733 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
734 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
735 	}
736 
737 	i915->ggtt.bit_6_swizzle_x = swizzle_x;
738 	i915->ggtt.bit_6_swizzle_y = swizzle_y;
739 }
740 
741 /*
742  * Swap every 64 bytes of this page around, to account for it having a new
743  * bit 17 of its physical address and therefore being interpreted differently
744  * by the GPU.
745  */
746 static void swizzle_page(struct page *page)
747 {
748 	char temp[64];
749 	char *vaddr;
750 	int i;
751 
752 	vaddr = kmap(page);
753 
754 	for (i = 0; i < PAGE_SIZE; i += 128) {
755 		memcpy(temp, &vaddr[i], 64);
756 		memcpy(&vaddr[i], &vaddr[i + 64], 64);
757 		memcpy(&vaddr[i + 64], temp, 64);
758 	}
759 
760 	kunmap(page);
761 }
762 
763 /**
764  * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
765  * @obj: i915 GEM buffer object
766  * @pages: the scattergather list of physical pages
767  *
768  * This function fixes up the swizzling in case any page frame number for this
769  * object has changed in bit 17 since that state has been saved with
770  * i915_gem_object_save_bit_17_swizzle().
771  *
772  * This is called when pinning backing storage again, since the kernel is free
773  * to move unpinned backing storage around (either by directly moving pages or
774  * by swapping them out and back in again).
775  */
776 void
777 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
778 				  struct sg_table *pages)
779 {
780 	struct sgt_iter sgt_iter;
781 	struct page *page;
782 	int i;
783 
784 	if (obj->bit_17 == NULL)
785 		return;
786 
787 	i = 0;
788 	for_each_sgt_page(page, sgt_iter, pages) {
789 		char new_bit_17 = page_to_phys(page) >> 17;
790 		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
791 			swizzle_page(page);
792 			set_page_dirty(page);
793 		}
794 		i++;
795 	}
796 }
797 
798 /**
799  * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
800  * @obj: i915 GEM buffer object
801  * @pages: the scattergather list of physical pages
802  *
803  * This function saves the bit 17 of each page frame number so that swizzling
804  * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
805  * be called before the backing storage can be unpinned.
806  */
807 void
808 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
809 				    struct sg_table *pages)
810 {
811 	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
812 	struct sgt_iter sgt_iter;
813 	struct page *page;
814 	int i;
815 
816 	if (obj->bit_17 == NULL) {
817 		obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
818 		if (obj->bit_17 == NULL) {
819 			DRM_ERROR("Failed to allocate memory for bit 17 "
820 				  "record\n");
821 			return;
822 		}
823 	}
824 
825 	i = 0;
826 
827 	for_each_sgt_page(page, sgt_iter, pages) {
828 		if (page_to_phys(page) & (1 << 17))
829 			__set_bit(i, obj->bit_17);
830 		else
831 			__clear_bit(i, obj->bit_17);
832 		i++;
833 	}
834 }
835 
836 void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
837 {
838 	struct drm_i915_private *i915 = ggtt->vm.i915;
839 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
840 	int num_fences;
841 	int i;
842 
843 	INIT_LIST_HEAD(&ggtt->fence_list);
844 	INIT_LIST_HEAD(&ggtt->userfault_list);
845 	intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
846 
847 	detect_bit_6_swizzle(ggtt);
848 
849 	if (!i915_ggtt_has_aperture(ggtt))
850 		num_fences = 0;
851 	else if (INTEL_GEN(i915) >= 7 &&
852 		 !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
853 		num_fences = 32;
854 	else if (INTEL_GEN(i915) >= 4 ||
855 		 IS_I945G(i915) || IS_I945GM(i915) ||
856 		 IS_G33(i915) || IS_PINEVIEW(i915))
857 		num_fences = 16;
858 	else
859 		num_fences = 8;
860 
861 	if (intel_vgpu_active(i915))
862 		num_fences = intel_uncore_read(uncore,
863 					       vgtif_reg(avail_rs.fence_num));
864 	ggtt->fence_regs = kcalloc(num_fences,
865 				   sizeof(*ggtt->fence_regs),
866 				   GFP_KERNEL);
867 	if (!ggtt->fence_regs)
868 		num_fences = 0;
869 
870 	/* Initialize fence registers to zero */
871 	for (i = 0; i < num_fences; i++) {
872 		struct i915_fence_reg *fence = &ggtt->fence_regs[i];
873 
874 		i915_active_init(&fence->active, NULL, NULL);
875 		fence->ggtt = ggtt;
876 		fence->id = i;
877 		list_add_tail(&fence->link, &ggtt->fence_list);
878 	}
879 	ggtt->num_fences = num_fences;
880 
881 	intel_ggtt_restore_fences(ggtt);
882 }
883 
884 void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
885 {
886 	int i;
887 
888 	for (i = 0; i < ggtt->num_fences; i++) {
889 		struct i915_fence_reg *fence = &ggtt->fence_regs[i];
890 
891 		i915_active_fini(&fence->active);
892 	}
893 
894 	kfree(ggtt->fence_regs);
895 }
896 
897 void intel_gt_init_swizzling(struct intel_gt *gt)
898 {
899 	struct drm_i915_private *i915 = gt->i915;
900 	struct intel_uncore *uncore = gt->uncore;
901 
902 	if (INTEL_GEN(i915) < 5 ||
903 	    i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
904 		return;
905 
906 	intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
907 
908 	if (IS_GEN(i915, 5))
909 		return;
910 
911 	intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
912 
913 	if (IS_GEN(i915, 6))
914 		intel_uncore_write(uncore,
915 				   ARB_MODE,
916 				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
917 	else if (IS_GEN(i915, 7))
918 		intel_uncore_write(uncore,
919 				   ARB_MODE,
920 				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
921 	else if (IS_GEN(i915, 8))
922 		intel_uncore_write(uncore,
923 				   GAMTARBMODE,
924 				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
925 	else
926 		MISSING_CASE(INTEL_GEN(i915));
927 }
928