xref: /linux/arch/x86/kernel/amd_nb.c (revision 564eb714f5f09ac733c26860d5f0831f213fbdf1)
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 static u32 *flush_words;
17 
18 const struct pci_device_id amd_nb_misc_ids[] = {
19 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
24 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
25 	{}
26 };
27 EXPORT_SYMBOL(amd_nb_misc_ids);
28 
29 static const struct pci_device_id amd_nb_link_ids[] = {
30 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
31 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
32 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
33 	{}
34 };
35 
36 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
37 	{ 0x00, 0x18, 0x20 },
38 	{ 0xff, 0x00, 0x20 },
39 	{ 0xfe, 0x00, 0x20 },
40 	{ }
41 };
42 
43 struct amd_northbridge_info amd_northbridges;
44 EXPORT_SYMBOL(amd_northbridges);
45 
46 static struct pci_dev *next_northbridge(struct pci_dev *dev,
47 					const struct pci_device_id *ids)
48 {
49 	do {
50 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
51 		if (!dev)
52 			break;
53 	} while (!pci_match_id(ids, dev));
54 	return dev;
55 }
56 
57 int amd_cache_northbridges(void)
58 {
59 	u16 i = 0;
60 	struct amd_northbridge *nb;
61 	struct pci_dev *misc, *link;
62 
63 	if (amd_nb_num())
64 		return 0;
65 
66 	misc = NULL;
67 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
68 		i++;
69 
70 	if (i == 0)
71 		return 0;
72 
73 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
74 	if (!nb)
75 		return -ENOMEM;
76 
77 	amd_northbridges.nb = nb;
78 	amd_northbridges.num = i;
79 
80 	link = misc = NULL;
81 	for (i = 0; i != amd_nb_num(); i++) {
82 		node_to_amd_nb(i)->misc = misc =
83 			next_northbridge(misc, amd_nb_misc_ids);
84 		node_to_amd_nb(i)->link = link =
85 			next_northbridge(link, amd_nb_link_ids);
86 	}
87 
88 	/* GART present only on Fam15h upto model 0fh */
89 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
90 	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
91 		amd_northbridges.flags |= AMD_NB_GART;
92 
93 	/*
94 	 * Check for L3 cache presence.
95 	 */
96 	if (!cpuid_edx(0x80000006))
97 		return 0;
98 
99 	/*
100 	 * Some CPU families support L3 Cache Index Disable. There are some
101 	 * limitations because of E382 and E388 on family 0x10.
102 	 */
103 	if (boot_cpu_data.x86 == 0x10 &&
104 	    boot_cpu_data.x86_model >= 0x8 &&
105 	    (boot_cpu_data.x86_model > 0x9 ||
106 	     boot_cpu_data.x86_mask >= 0x1))
107 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
108 
109 	if (boot_cpu_data.x86 == 0x15)
110 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
111 
112 	/* L3 cache partitioning is supported on family 0x15 */
113 	if (boot_cpu_data.x86 == 0x15)
114 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
115 
116 	return 0;
117 }
118 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
119 
120 /*
121  * Ignores subdevice/subvendor but as far as I can figure out
122  * they're useless anyways
123  */
124 bool __init early_is_amd_nb(u32 device)
125 {
126 	const struct pci_device_id *id;
127 	u32 vendor = device & 0xffff;
128 
129 	device >>= 16;
130 	for (id = amd_nb_misc_ids; id->vendor; id++)
131 		if (vendor == id->vendor && device == id->device)
132 			return true;
133 	return false;
134 }
135 
136 struct resource *amd_get_mmconfig_range(struct resource *res)
137 {
138 	u32 address;
139 	u64 base, msr;
140 	unsigned segn_busn_bits;
141 
142 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
143 		return NULL;
144 
145 	/* assume all cpus from fam10h have mmconfig */
146         if (boot_cpu_data.x86 < 0x10)
147 		return NULL;
148 
149 	address = MSR_FAM10H_MMIO_CONF_BASE;
150 	rdmsrl(address, msr);
151 
152 	/* mmconfig is not enabled */
153 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
154 		return NULL;
155 
156 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
157 
158 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
159 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
160 
161 	res->flags = IORESOURCE_MEM;
162 	res->start = base;
163 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
164 	return res;
165 }
166 
167 int amd_get_subcaches(int cpu)
168 {
169 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
170 	unsigned int mask;
171 	int cuid;
172 
173 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
174 		return 0;
175 
176 	pci_read_config_dword(link, 0x1d4, &mask);
177 
178 	cuid = cpu_data(cpu).compute_unit_id;
179 	return (mask >> (4 * cuid)) & 0xf;
180 }
181 
182 int amd_set_subcaches(int cpu, int mask)
183 {
184 	static unsigned int reset, ban;
185 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
186 	unsigned int reg;
187 	int cuid;
188 
189 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
190 		return -EINVAL;
191 
192 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
193 	if (reset == 0) {
194 		pci_read_config_dword(nb->link, 0x1d4, &reset);
195 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
196 		ban &= 0x180000;
197 	}
198 
199 	/* deactivate BAN mode if any subcaches are to be disabled */
200 	if (mask != 0xf) {
201 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
202 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
203 	}
204 
205 	cuid = cpu_data(cpu).compute_unit_id;
206 	mask <<= 4 * cuid;
207 	mask |= (0xf ^ (1 << cuid)) << 26;
208 
209 	pci_write_config_dword(nb->link, 0x1d4, mask);
210 
211 	/* reset BAN mode if L3 partitioning returned to reset state */
212 	pci_read_config_dword(nb->link, 0x1d4, &reg);
213 	if (reg == reset) {
214 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
215 		reg &= ~0x180000;
216 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
217 	}
218 
219 	return 0;
220 }
221 
222 static int amd_cache_gart(void)
223 {
224 	u16 i;
225 
226        if (!amd_nb_has_feature(AMD_NB_GART))
227                return 0;
228 
229        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
230        if (!flush_words) {
231                amd_northbridges.flags &= ~AMD_NB_GART;
232                return -ENOMEM;
233        }
234 
235        for (i = 0; i != amd_nb_num(); i++)
236                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
237                                      &flush_words[i]);
238 
239        return 0;
240 }
241 
242 void amd_flush_garts(void)
243 {
244 	int flushed, i;
245 	unsigned long flags;
246 	static DEFINE_SPINLOCK(gart_lock);
247 
248 	if (!amd_nb_has_feature(AMD_NB_GART))
249 		return;
250 
251 	/* Avoid races between AGP and IOMMU. In theory it's not needed
252 	   but I'm not sure if the hardware won't lose flush requests
253 	   when another is pending. This whole thing is so expensive anyways
254 	   that it doesn't matter to serialize more. -AK */
255 	spin_lock_irqsave(&gart_lock, flags);
256 	flushed = 0;
257 	for (i = 0; i < amd_nb_num(); i++) {
258 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
259 				       flush_words[i] | 1);
260 		flushed++;
261 	}
262 	for (i = 0; i < amd_nb_num(); i++) {
263 		u32 w;
264 		/* Make sure the hardware actually executed the flush*/
265 		for (;;) {
266 			pci_read_config_dword(node_to_amd_nb(i)->misc,
267 					      0x9c, &w);
268 			if (!(w & 1))
269 				break;
270 			cpu_relax();
271 		}
272 	}
273 	spin_unlock_irqrestore(&gart_lock, flags);
274 	if (!flushed)
275 		pr_notice("nothing to flush?\n");
276 }
277 EXPORT_SYMBOL_GPL(amd_flush_garts);
278 
279 static __init int init_amd_nbs(void)
280 {
281 	int err = 0;
282 
283 	err = amd_cache_northbridges();
284 
285 	if (err < 0)
286 		pr_notice("Cannot enumerate AMD northbridges\n");
287 
288 	if (amd_cache_gart() < 0)
289 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
290 
291 	return err;
292 }
293 
294 /* This has to go after the PCI subsystem */
295 fs_initcall(init_amd_nbs);
296