xref: /linux/arch/x86/kernel/pci-dma.c (revision a13d7201d7deedcbb6ac6efa94a1a7d34d3d79ec)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9 
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17 
18 static int forbid_dac __read_mostly;
19 
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22 
23 static int iommu_sac_force __read_mostly;
24 
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32 
33 int iommu_merge __read_mostly = 0;
34 
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38 
39 /*
40  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41  * If this variable is 1, IOMMU implementations do no DMA translation for
42  * devices and allow every device to access to whole physical memory. This is
43  * useful if a user wants to use an IOMMU only for KVM device assignment to
44  * guests and not for driver dma translation.
45  */
46 int iommu_pass_through __read_mostly;
47 
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49 
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 	.init_name = "fallback device",
53 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
54 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57 
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES       65536
60 
61 int dma_set_mask(struct device *dev, u64 mask)
62 {
63 	if (!dev->dma_mask || !dma_supported(dev, mask))
64 		return -EIO;
65 
66 	*dev->dma_mask = mask;
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL(dma_set_mask);
71 
72 void __init pci_iommu_alloc(void)
73 {
74 	struct iommu_table_entry *p;
75 
76 	sort_iommu_table(__iommu_table, __iommu_table_end);
77 	check_iommu_entries(__iommu_table, __iommu_table_end);
78 
79 	for (p = __iommu_table; p < __iommu_table_end; p++) {
80 		if (p && p->detect && p->detect() > 0) {
81 			p->flags |= IOMMU_DETECTED;
82 			if (p->early_init)
83 				p->early_init();
84 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 				break;
86 		}
87 	}
88 }
89 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 				 dma_addr_t *dma_addr, gfp_t flag,
91 				 struct dma_attrs *attrs)
92 {
93 	unsigned long dma_mask;
94 	struct page *page;
95 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
96 	dma_addr_t addr;
97 
98 	dma_mask = dma_alloc_coherent_mask(dev, flag);
99 
100 	flag &= ~__GFP_ZERO;
101 again:
102 	page = NULL;
103 	/* CMA can be used only in the context which permits sleeping */
104 	if (flag & __GFP_WAIT) {
105 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
106 		if (page && page_to_phys(page) + size > dma_mask) {
107 			dma_release_from_contiguous(dev, page, count);
108 			page = NULL;
109 		}
110 	}
111 	/* fallback */
112 	if (!page)
113 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
114 	if (!page)
115 		return NULL;
116 
117 	addr = page_to_phys(page);
118 	if (addr + size > dma_mask) {
119 		__free_pages(page, get_order(size));
120 
121 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
122 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
123 			goto again;
124 		}
125 
126 		return NULL;
127 	}
128 	memset(page_address(page), 0, size);
129 	*dma_addr = addr;
130 	return page_address(page);
131 }
132 
133 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
134 			       dma_addr_t dma_addr, struct dma_attrs *attrs)
135 {
136 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
137 	struct page *page = virt_to_page(vaddr);
138 
139 	if (!dma_release_from_contiguous(dev, page, count))
140 		free_pages((unsigned long)vaddr, get_order(size));
141 }
142 
143 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
144 		      gfp_t gfp, struct dma_attrs *attrs)
145 {
146 	struct dma_map_ops *ops = get_dma_ops(dev);
147 	void *memory;
148 
149 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
150 
151 	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
152 		return memory;
153 
154 	if (!dev)
155 		dev = &x86_dma_fallback_dev;
156 
157 	if (!is_device_dma_capable(dev))
158 		return NULL;
159 
160 	if (!ops->alloc)
161 		return NULL;
162 
163 	memory = ops->alloc(dev, size, dma_handle,
164 			    dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
165 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
166 
167 	return memory;
168 }
169 EXPORT_SYMBOL(dma_alloc_attrs);
170 
171 void dma_free_attrs(struct device *dev, size_t size,
172 		    void *vaddr, dma_addr_t bus,
173 		    struct dma_attrs *attrs)
174 {
175 	struct dma_map_ops *ops = get_dma_ops(dev);
176 
177 	WARN_ON(irqs_disabled());       /* for portability */
178 
179 	if (dma_release_from_coherent(dev, get_order(size), vaddr))
180 		return;
181 
182 	debug_dma_free_coherent(dev, size, vaddr, bus);
183 	if (ops->free)
184 		ops->free(dev, size, vaddr, bus, attrs);
185 }
186 EXPORT_SYMBOL(dma_free_attrs);
187 
188 /*
189  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
190  * parameter documentation.
191  */
192 static __init int iommu_setup(char *p)
193 {
194 	iommu_merge = 1;
195 
196 	if (!p)
197 		return -EINVAL;
198 
199 	while (*p) {
200 		if (!strncmp(p, "off", 3))
201 			no_iommu = 1;
202 		/* gart_parse_options has more force support */
203 		if (!strncmp(p, "force", 5))
204 			force_iommu = 1;
205 		if (!strncmp(p, "noforce", 7)) {
206 			iommu_merge = 0;
207 			force_iommu = 0;
208 		}
209 
210 		if (!strncmp(p, "biomerge", 8)) {
211 			iommu_merge = 1;
212 			force_iommu = 1;
213 		}
214 		if (!strncmp(p, "panic", 5))
215 			panic_on_overflow = 1;
216 		if (!strncmp(p, "nopanic", 7))
217 			panic_on_overflow = 0;
218 		if (!strncmp(p, "merge", 5)) {
219 			iommu_merge = 1;
220 			force_iommu = 1;
221 		}
222 		if (!strncmp(p, "nomerge", 7))
223 			iommu_merge = 0;
224 		if (!strncmp(p, "forcesac", 8))
225 			iommu_sac_force = 1;
226 		if (!strncmp(p, "allowdac", 8))
227 			forbid_dac = 0;
228 		if (!strncmp(p, "nodac", 5))
229 			forbid_dac = 1;
230 		if (!strncmp(p, "usedac", 6)) {
231 			forbid_dac = -1;
232 			return 1;
233 		}
234 #ifdef CONFIG_SWIOTLB
235 		if (!strncmp(p, "soft", 4))
236 			swiotlb = 1;
237 #endif
238 		if (!strncmp(p, "pt", 2))
239 			iommu_pass_through = 1;
240 
241 		gart_parse_options(p);
242 
243 #ifdef CONFIG_CALGARY_IOMMU
244 		if (!strncmp(p, "calgary", 7))
245 			use_calgary = 1;
246 #endif /* CONFIG_CALGARY_IOMMU */
247 
248 		p += strcspn(p, ",");
249 		if (*p == ',')
250 			++p;
251 	}
252 	return 0;
253 }
254 early_param("iommu", iommu_setup);
255 
256 int dma_supported(struct device *dev, u64 mask)
257 {
258 	struct dma_map_ops *ops = get_dma_ops(dev);
259 
260 #ifdef CONFIG_PCI
261 	if (mask > 0xffffffff && forbid_dac > 0) {
262 		dev_info(dev, "PCI: Disallowing DAC for device\n");
263 		return 0;
264 	}
265 #endif
266 
267 	if (ops->dma_supported)
268 		return ops->dma_supported(dev, mask);
269 
270 	/* Copied from i386. Doesn't make much sense, because it will
271 	   only work for pci_alloc_coherent.
272 	   The caller just has to use GFP_DMA in this case. */
273 	if (mask < DMA_BIT_MASK(24))
274 		return 0;
275 
276 	/* Tell the device to use SAC when IOMMU force is on.  This
277 	   allows the driver to use cheaper accesses in some cases.
278 
279 	   Problem with this is that if we overflow the IOMMU area and
280 	   return DAC as fallback address the device may not handle it
281 	   correctly.
282 
283 	   As a special case some controllers have a 39bit address
284 	   mode that is as efficient as 32bit (aic79xx). Don't force
285 	   SAC for these.  Assume all masks <= 40 bits are of this
286 	   type. Normally this doesn't make any difference, but gives
287 	   more gentle handling of IOMMU overflow. */
288 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
289 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
290 		return 0;
291 	}
292 
293 	return 1;
294 }
295 EXPORT_SYMBOL(dma_supported);
296 
297 static int __init pci_iommu_init(void)
298 {
299 	struct iommu_table_entry *p;
300 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
301 
302 #ifdef CONFIG_PCI
303 	dma_debug_add_bus(&pci_bus_type);
304 #endif
305 	x86_init.iommu.iommu_init();
306 
307 	for (p = __iommu_table; p < __iommu_table_end; p++) {
308 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
309 			p->late_init();
310 	}
311 
312 	return 0;
313 }
314 /* Must execute after PCI subsystem */
315 rootfs_initcall(pci_iommu_init);
316 
317 #ifdef CONFIG_PCI
318 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
319 
320 static void via_no_dac(struct pci_dev *dev)
321 {
322 	if (forbid_dac == 0) {
323 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
324 		forbid_dac = 1;
325 	}
326 }
327 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
328 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
329 #endif
330