xref: /linux/drivers/gpu/drm/nouveau/nouveau_ttm.c (revision 72503791edffe516848d0f01d377fa9cd0711970)
1 /*
2  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3  * All Rights Reserved.
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sub license,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26 
27 #include <subdev/fb.h>
28 #include <subdev/vm.h>
29 #include <subdev/instmem.h>
30 
31 #include "nouveau_drm.h"
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 
35 static int
36 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37 {
38 	/* nothing to do */
39 	return 0;
40 }
41 
42 static int
43 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
44 {
45 	/* nothing to do */
46 	return 0;
47 }
48 
49 static inline void
50 nouveau_mem_node_cleanup(struct nouveau_mem *node)
51 {
52 	if (node->vma[0].node) {
53 		nouveau_vm_unmap(&node->vma[0]);
54 		nouveau_vm_put(&node->vma[0]);
55 	}
56 
57 	if (node->vma[1].node) {
58 		nouveau_vm_unmap(&node->vma[1]);
59 		nouveau_vm_put(&node->vma[1]);
60 	}
61 }
62 
63 static void
64 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
65 			 struct ttm_mem_reg *mem)
66 {
67 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
68 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
69 	nouveau_mem_node_cleanup(mem->mm_node);
70 	pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
71 }
72 
73 static int
74 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
75 			 struct ttm_buffer_object *bo,
76 			 struct ttm_placement *placement,
77 			 struct ttm_mem_reg *mem)
78 {
79 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
80 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
81 	struct nouveau_bo *nvbo = nouveau_bo(bo);
82 	struct nouveau_mem *node;
83 	u32 size_nc = 0;
84 	int ret;
85 
86 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
87 		size_nc = 1 << nvbo->page_shift;
88 
89 	ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
90 			   mem->page_alignment << PAGE_SHIFT, size_nc,
91 			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
92 	if (ret) {
93 		mem->mm_node = NULL;
94 		return (ret == -ENOSPC) ? 0 : ret;
95 	}
96 
97 	node->page_shift = nvbo->page_shift;
98 
99 	mem->mm_node = node;
100 	mem->start   = node->offset >> PAGE_SHIFT;
101 	return 0;
102 }
103 
104 static void
105 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
106 {
107 	struct nouveau_mm *mm = man->priv;
108 	struct nouveau_mm_node *r;
109 	u32 total = 0, free = 0;
110 
111 	mutex_lock(&mm->mutex);
112 	list_for_each_entry(r, &mm->nodes, nl_entry) {
113 		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
114 		       prefix, r->type, ((u64)r->offset << 12),
115 		       (((u64)r->offset + r->length) << 12));
116 
117 		total += r->length;
118 		if (!r->type)
119 			free += r->length;
120 	}
121 	mutex_unlock(&mm->mutex);
122 
123 	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
124 	       prefix, (u64)total << 12, (u64)free << 12);
125 	printk(KERN_DEBUG "%s  block: 0x%08x\n",
126 	       prefix, mm->block_size << 12);
127 }
128 
129 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
130 	nouveau_vram_manager_init,
131 	nouveau_vram_manager_fini,
132 	nouveau_vram_manager_new,
133 	nouveau_vram_manager_del,
134 	nouveau_vram_manager_debug
135 };
136 
137 static int
138 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
139 {
140 	return 0;
141 }
142 
143 static int
144 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
145 {
146 	return 0;
147 }
148 
149 static void
150 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
151 			 struct ttm_mem_reg *mem)
152 {
153 	nouveau_mem_node_cleanup(mem->mm_node);
154 	kfree(mem->mm_node);
155 	mem->mm_node = NULL;
156 }
157 
158 static int
159 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 			 struct ttm_buffer_object *bo,
161 			 struct ttm_placement *placement,
162 			 struct ttm_mem_reg *mem)
163 {
164 	struct nouveau_mem *node;
165 
166 	if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
167 		return -ENOMEM;
168 
169 	node = kzalloc(sizeof(*node), GFP_KERNEL);
170 	if (!node)
171 		return -ENOMEM;
172 	node->page_shift = 12;
173 
174 	mem->mm_node = node;
175 	mem->start   = 0;
176 	return 0;
177 }
178 
179 static void
180 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
181 {
182 }
183 
184 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
185 	nouveau_gart_manager_init,
186 	nouveau_gart_manager_fini,
187 	nouveau_gart_manager_new,
188 	nouveau_gart_manager_del,
189 	nouveau_gart_manager_debug
190 };
191 
192 #include <core/subdev/vm/nv04.h>
193 static int
194 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
195 {
196 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
197 	struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
198 	struct nv04_vmmgr_priv *priv = (void *)vmm;
199 	struct nouveau_vm *vm = NULL;
200 	nouveau_vm_ref(priv->vm, &vm, NULL);
201 	man->priv = vm;
202 	return 0;
203 }
204 
205 static int
206 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
207 {
208 	struct nouveau_vm *vm = man->priv;
209 	nouveau_vm_ref(NULL, &vm, NULL);
210 	man->priv = NULL;
211 	return 0;
212 }
213 
214 static void
215 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
216 {
217 	struct nouveau_mem *node = mem->mm_node;
218 	if (node->vma[0].node)
219 		nouveau_vm_put(&node->vma[0]);
220 	kfree(mem->mm_node);
221 	mem->mm_node = NULL;
222 }
223 
224 static int
225 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
226 		      struct ttm_buffer_object *bo,
227 		      struct ttm_placement *placement,
228 		      struct ttm_mem_reg *mem)
229 {
230 	struct nouveau_mem *node;
231 	int ret;
232 
233 	node = kzalloc(sizeof(*node), GFP_KERNEL);
234 	if (!node)
235 		return -ENOMEM;
236 
237 	node->page_shift = 12;
238 
239 	ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
240 			     NV_MEM_ACCESS_RW, &node->vma[0]);
241 	if (ret) {
242 		kfree(node);
243 		return ret;
244 	}
245 
246 	mem->mm_node = node;
247 	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
248 	return 0;
249 }
250 
251 static void
252 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
253 {
254 }
255 
256 const struct ttm_mem_type_manager_func nv04_gart_manager = {
257 	nv04_gart_manager_init,
258 	nv04_gart_manager_fini,
259 	nv04_gart_manager_new,
260 	nv04_gart_manager_del,
261 	nv04_gart_manager_debug
262 };
263 
264 int
265 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
266 {
267 	struct drm_file *file_priv = filp->private_data;
268 	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
269 
270 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
271 		return drm_mmap(filp, vma);
272 
273 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
274 }
275 
276 static int
277 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
278 {
279 	return ttm_mem_global_init(ref->object);
280 }
281 
282 static void
283 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
284 {
285 	ttm_mem_global_release(ref->object);
286 }
287 
288 int
289 nouveau_ttm_global_init(struct nouveau_drm *drm)
290 {
291 	struct drm_global_reference *global_ref;
292 	int ret;
293 
294 	global_ref = &drm->ttm.mem_global_ref;
295 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
296 	global_ref->size = sizeof(struct ttm_mem_global);
297 	global_ref->init = &nouveau_ttm_mem_global_init;
298 	global_ref->release = &nouveau_ttm_mem_global_release;
299 
300 	ret = drm_global_item_ref(global_ref);
301 	if (unlikely(ret != 0)) {
302 		DRM_ERROR("Failed setting up TTM memory accounting\n");
303 		drm->ttm.mem_global_ref.release = NULL;
304 		return ret;
305 	}
306 
307 	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
308 	global_ref = &drm->ttm.bo_global_ref.ref;
309 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
310 	global_ref->size = sizeof(struct ttm_bo_global);
311 	global_ref->init = &ttm_bo_global_init;
312 	global_ref->release = &ttm_bo_global_release;
313 
314 	ret = drm_global_item_ref(global_ref);
315 	if (unlikely(ret != 0)) {
316 		DRM_ERROR("Failed setting up TTM BO subsystem\n");
317 		drm_global_item_unref(&drm->ttm.mem_global_ref);
318 		drm->ttm.mem_global_ref.release = NULL;
319 		return ret;
320 	}
321 
322 	return 0;
323 }
324 
325 void
326 nouveau_ttm_global_release(struct nouveau_drm *drm)
327 {
328 	if (drm->ttm.mem_global_ref.release == NULL)
329 		return;
330 
331 	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
332 	drm_global_item_unref(&drm->ttm.mem_global_ref);
333 	drm->ttm.mem_global_ref.release = NULL;
334 }
335 
336 int
337 nouveau_ttm_init(struct nouveau_drm *drm)
338 {
339 	struct drm_device *dev = drm->dev;
340 	u32 bits;
341 	int ret;
342 
343 	bits = nouveau_vmmgr(drm->device)->dma_bits;
344 	if ( drm->agp.stat == ENABLED ||
345 	    !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
346 		bits = 32;
347 
348 	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
349 	if (ret)
350 		return ret;
351 
352 	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
353 	if (ret)
354 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
355 
356 	ret = nouveau_ttm_global_init(drm);
357 	if (ret)
358 		return ret;
359 
360 	ret = ttm_bo_device_init(&drm->ttm.bdev,
361 				  drm->ttm.bo_global_ref.ref.object,
362 				  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
363 				  bits <= 32 ? true : false);
364 	if (ret) {
365 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
366 		return ret;
367 	}
368 
369 	/* VRAM init */
370 	drm->gem.vram_available  = nouveau_fb(drm->device)->ram.size;
371 	drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
372 
373 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
374 			      drm->gem.vram_available >> PAGE_SHIFT);
375 	if (ret) {
376 		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
377 		return ret;
378 	}
379 
380 	drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
381 				     pci_resource_len(dev->pdev, 1),
382 				     DRM_MTRR_WC);
383 
384 	/* GART init */
385 	if (drm->agp.stat != ENABLED) {
386 		drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
387 		if (drm->gem.gart_available > 512 * 1024 * 1024)
388 			drm->gem.gart_available = 512 * 1024 * 1024;
389 	} else {
390 		drm->gem.gart_available = drm->agp.size;
391 	}
392 
393 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
394 			      drm->gem.gart_available >> PAGE_SHIFT);
395 	if (ret) {
396 		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
397 		return ret;
398 	}
399 
400 	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
401 	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
402 	return 0;
403 }
404 
405 void
406 nouveau_ttm_fini(struct nouveau_drm *drm)
407 {
408 	mutex_lock(&drm->dev->struct_mutex);
409 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
410 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
411 	mutex_unlock(&drm->dev->struct_mutex);
412 
413 	ttm_bo_device_release(&drm->ttm.bdev);
414 
415 	nouveau_ttm_global_release(drm);
416 
417 	if (drm->ttm.mtrr >= 0) {
418 		drm_mtrr_del(drm->ttm.mtrr,
419 			     pci_resource_start(drm->dev->pdev, 1),
420 			     pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
421 		drm->ttm.mtrr = -1;
422 	}
423 }
424