xref: /linux/tools/testing/selftests/kvm/aarch64/get-reg-list.c (revision a460513ed4b6994bfeb7bd86f72853140bc1ac12)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Check for KVM_GET_REG_LIST regressions.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  *
7  * When attempting to migrate from a host with an older kernel to a host
8  * with a newer kernel we allow the newer kernel on the destination to
9  * list new registers with get-reg-list. We assume they'll be unused, at
10  * least until the guest reboots, and so they're relatively harmless.
11  * However, if the destination host with the newer kernel is missing
12  * registers which the source host with the older kernel has, then that's
13  * a regression in get-reg-list. This test checks for that regression by
14  * checking the current list against a blessed list. We should never have
15  * missing registers, but if new ones appear then they can probably be
16  * added to the blessed list. A completely new blessed list can be created
17  * by running the test with the --list command line argument.
18  *
19  * Note, the blessed list should be created from the oldest possible
20  * kernel. We can't go older than v4.15, though, because that's the first
21  * release to expose the ID system registers in KVM_GET_REG_LIST, see
22  * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
23  * from guests"). Also, one must use the --core-reg-fixup command line
24  * option when running on an older kernel that doesn't include df205b5c6328
25  * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
26  */
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include "kvm_util.h"
31 #include "test_util.h"
32 #include "processor.h"
33 
34 #ifdef REG_LIST_SVE
35 #define reg_list_sve() (true)
36 #else
37 #define reg_list_sve() (false)
38 #endif
39 
40 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
41 
42 #define for_each_reg(i)								\
43 	for ((i) = 0; (i) < reg_list->n; ++(i))
44 
45 #define for_each_reg_filtered(i)						\
46 	for_each_reg(i)								\
47 		if (!filter_reg(reg_list->reg[i]))
48 
49 #define for_each_missing_reg(i)							\
50 	for ((i) = 0; (i) < blessed_n; ++(i))					\
51 		if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
52 
53 #define for_each_new_reg(i)							\
54 	for_each_reg_filtered(i)						\
55 		if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
56 
57 
58 static struct kvm_reg_list *reg_list;
59 
60 static __u64 base_regs[], vregs[], sve_regs[], rejects_set[];
61 static __u64 base_regs_n, vregs_n, sve_regs_n, rejects_set_n;
62 static __u64 *blessed_reg, blessed_n;
63 
64 static bool filter_reg(__u64 reg)
65 {
66 	/*
67 	 * DEMUX register presence depends on the host's CLIDR_EL1.
68 	 * This means there's no set of them that we can bless.
69 	 */
70 	if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
71 		return true;
72 
73 	return false;
74 }
75 
76 static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
77 {
78 	int i;
79 
80 	for (i = 0; i < nr_regs; ++i)
81 		if (reg == regs[i])
82 			return true;
83 	return false;
84 }
85 
86 static const char *str_with_index(const char *template, __u64 index)
87 {
88 	char *str, *p;
89 	int n;
90 
91 	str = strdup(template);
92 	p = strstr(str, "##");
93 	n = sprintf(p, "%lld", index);
94 	strcat(p + n, strstr(template, "##") + 2);
95 
96 	return (const char *)str;
97 }
98 
99 #define CORE_REGS_XX_NR_WORDS	2
100 #define CORE_SPSR_XX_NR_WORDS	2
101 #define CORE_FPREGS_XX_NR_WORDS	4
102 
103 static const char *core_id_to_str(__u64 id)
104 {
105 	__u64 core_off = id & ~REG_MASK, idx;
106 
107 	/*
108 	 * core_off is the offset into struct kvm_regs
109 	 */
110 	switch (core_off) {
111 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
112 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
113 		idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
114 		TEST_ASSERT(idx < 31, "Unexpected regs.regs index: %lld", idx);
115 		return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
116 	case KVM_REG_ARM_CORE_REG(regs.sp):
117 		return "KVM_REG_ARM_CORE_REG(regs.sp)";
118 	case KVM_REG_ARM_CORE_REG(regs.pc):
119 		return "KVM_REG_ARM_CORE_REG(regs.pc)";
120 	case KVM_REG_ARM_CORE_REG(regs.pstate):
121 		return "KVM_REG_ARM_CORE_REG(regs.pstate)";
122 	case KVM_REG_ARM_CORE_REG(sp_el1):
123 		return "KVM_REG_ARM_CORE_REG(sp_el1)";
124 	case KVM_REG_ARM_CORE_REG(elr_el1):
125 		return "KVM_REG_ARM_CORE_REG(elr_el1)";
126 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
127 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
128 		idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
129 		TEST_ASSERT(idx < KVM_NR_SPSR, "Unexpected spsr index: %lld", idx);
130 		return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
131 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
132 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
133 		idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
134 		TEST_ASSERT(idx < 32, "Unexpected fp_regs.vregs index: %lld", idx);
135 		return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
136 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
137 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
138 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
139 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
140 	}
141 
142 	TEST_FAIL("Unknown core reg id: 0x%llx", id);
143 	return NULL;
144 }
145 
146 static const char *sve_id_to_str(__u64 id)
147 {
148 	__u64 sve_off, n, i;
149 
150 	if (id == KVM_REG_ARM64_SVE_VLS)
151 		return "KVM_REG_ARM64_SVE_VLS";
152 
153 	sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
154 	i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
155 
156 	TEST_ASSERT(i == 0, "Currently we don't expect slice > 0, reg id 0x%llx", id);
157 
158 	switch (sve_off) {
159 	case KVM_REG_ARM64_SVE_ZREG_BASE ...
160 	     KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
161 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
162 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
163 			    "Unexpected bits set in SVE ZREG id: 0x%llx", id);
164 		return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
165 	case KVM_REG_ARM64_SVE_PREG_BASE ...
166 	     KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
167 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
168 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
169 			    "Unexpected bits set in SVE PREG id: 0x%llx", id);
170 		return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
171 	case KVM_REG_ARM64_SVE_FFR_BASE:
172 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
173 			    "Unexpected bits set in SVE FFR id: 0x%llx", id);
174 		return "KVM_REG_ARM64_SVE_FFR(0)";
175 	}
176 
177 	return NULL;
178 }
179 
180 static void print_reg(__u64 id)
181 {
182 	unsigned op0, op1, crn, crm, op2;
183 	const char *reg_size = NULL;
184 
185 	TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
186 		    "KVM_REG_ARM64 missing in reg id: 0x%llx", id);
187 
188 	switch (id & KVM_REG_SIZE_MASK) {
189 	case KVM_REG_SIZE_U8:
190 		reg_size = "KVM_REG_SIZE_U8";
191 		break;
192 	case KVM_REG_SIZE_U16:
193 		reg_size = "KVM_REG_SIZE_U16";
194 		break;
195 	case KVM_REG_SIZE_U32:
196 		reg_size = "KVM_REG_SIZE_U32";
197 		break;
198 	case KVM_REG_SIZE_U64:
199 		reg_size = "KVM_REG_SIZE_U64";
200 		break;
201 	case KVM_REG_SIZE_U128:
202 		reg_size = "KVM_REG_SIZE_U128";
203 		break;
204 	case KVM_REG_SIZE_U256:
205 		reg_size = "KVM_REG_SIZE_U256";
206 		break;
207 	case KVM_REG_SIZE_U512:
208 		reg_size = "KVM_REG_SIZE_U512";
209 		break;
210 	case KVM_REG_SIZE_U1024:
211 		reg_size = "KVM_REG_SIZE_U1024";
212 		break;
213 	case KVM_REG_SIZE_U2048:
214 		reg_size = "KVM_REG_SIZE_U2048";
215 		break;
216 	default:
217 		TEST_FAIL("Unexpected reg size: 0x%llx in reg id: 0x%llx",
218 			  (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
219 	}
220 
221 	switch (id & KVM_REG_ARM_COPROC_MASK) {
222 	case KVM_REG_ARM_CORE:
223 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(id));
224 		break;
225 	case KVM_REG_ARM_DEMUX:
226 		TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
227 			    "Unexpected bits set in DEMUX reg id: 0x%llx", id);
228 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
229 		       reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
230 		break;
231 	case KVM_REG_ARM64_SYSREG:
232 		op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
233 		op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
234 		crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
235 		crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
236 		op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
237 		TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
238 			    "Unexpected bits set in SYSREG reg id: 0x%llx", id);
239 		printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
240 		break;
241 	case KVM_REG_ARM_FW:
242 		TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
243 			    "Unexpected bits set in FW reg id: 0x%llx", id);
244 		printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
245 		break;
246 	case KVM_REG_ARM64_SVE:
247 		if (reg_list_sve())
248 			printf("\t%s,\n", sve_id_to_str(id));
249 		else
250 			TEST_FAIL("KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", id);
251 		break;
252 	default:
253 		TEST_FAIL("Unexpected coproc type: 0x%llx in reg id: 0x%llx",
254 			  (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
255 	}
256 }
257 
258 /*
259  * Older kernels listed each 32-bit word of CORE registers separately.
260  * For 64 and 128-bit registers we need to ignore the extra words. We
261  * also need to fixup the sizes, because the older kernels stated all
262  * registers were 64-bit, even when they weren't.
263  */
264 static void core_reg_fixup(void)
265 {
266 	struct kvm_reg_list *tmp;
267 	__u64 id, core_off;
268 	int i;
269 
270 	tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
271 
272 	for (i = 0; i < reg_list->n; ++i) {
273 		id = reg_list->reg[i];
274 
275 		if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
276 			tmp->reg[tmp->n++] = id;
277 			continue;
278 		}
279 
280 		core_off = id & ~REG_MASK;
281 
282 		switch (core_off) {
283 		case 0x52: case 0xd2: case 0xd6:
284 			/*
285 			 * These offsets are pointing at padding.
286 			 * We need to ignore them too.
287 			 */
288 			continue;
289 		case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
290 		     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
291 			if (core_off & 3)
292 				continue;
293 			id &= ~KVM_REG_SIZE_MASK;
294 			id |= KVM_REG_SIZE_U128;
295 			tmp->reg[tmp->n++] = id;
296 			continue;
297 		case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
298 		case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
299 			id &= ~KVM_REG_SIZE_MASK;
300 			id |= KVM_REG_SIZE_U32;
301 			tmp->reg[tmp->n++] = id;
302 			continue;
303 		default:
304 			if (core_off & 1)
305 				continue;
306 			tmp->reg[tmp->n++] = id;
307 			break;
308 		}
309 	}
310 
311 	free(reg_list);
312 	reg_list = tmp;
313 }
314 
315 static void prepare_vcpu_init(struct kvm_vcpu_init *init)
316 {
317 	if (reg_list_sve())
318 		init->features[0] |= 1 << KVM_ARM_VCPU_SVE;
319 }
320 
321 static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
322 {
323 	int feature;
324 
325 	if (reg_list_sve()) {
326 		feature = KVM_ARM_VCPU_SVE;
327 		vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
328 	}
329 }
330 
331 static void check_supported(void)
332 {
333 	if (reg_list_sve() && !kvm_check_cap(KVM_CAP_ARM_SVE)) {
334 		fprintf(stderr, "SVE not available, skipping tests\n");
335 		exit(KSFT_SKIP);
336 	}
337 }
338 
339 int main(int ac, char **av)
340 {
341 	struct kvm_vcpu_init init = { .target = -1, };
342 	int new_regs = 0, missing_regs = 0, i;
343 	int failed_get = 0, failed_set = 0, failed_reject = 0;
344 	bool print_list = false, print_filtered = false, fixup_core_regs = false;
345 	struct kvm_vm *vm;
346 	__u64 *vec_regs;
347 
348 	check_supported();
349 
350 	for (i = 1; i < ac; ++i) {
351 		if (strcmp(av[i], "--core-reg-fixup") == 0)
352 			fixup_core_regs = true;
353 		else if (strcmp(av[i], "--list") == 0)
354 			print_list = true;
355 		else if (strcmp(av[i], "--list-filtered") == 0)
356 			print_filtered = true;
357 		else
358 			TEST_FAIL("Unknown option: %s\n", av[i]);
359 	}
360 
361 	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
362 	prepare_vcpu_init(&init);
363 	aarch64_vcpu_add_default(vm, 0, &init, NULL);
364 	finalize_vcpu(vm, 0);
365 
366 	reg_list = vcpu_get_reg_list(vm, 0);
367 
368 	if (fixup_core_regs)
369 		core_reg_fixup();
370 
371 	if (print_list || print_filtered) {
372 		putchar('\n');
373 		for_each_reg(i) {
374 			__u64 id = reg_list->reg[i];
375 			if ((print_list && !filter_reg(id)) ||
376 			    (print_filtered && filter_reg(id)))
377 				print_reg(id);
378 		}
379 		putchar('\n');
380 		return 0;
381 	}
382 
383 	/*
384 	 * We only test that we can get the register and then write back the
385 	 * same value. Some registers may allow other values to be written
386 	 * back, but others only allow some bits to be changed, and at least
387 	 * for ID registers set will fail if the value does not exactly match
388 	 * what was returned by get. If registers that allow other values to
389 	 * be written need to have the other values tested, then we should
390 	 * create a new set of tests for those in a new independent test
391 	 * executable.
392 	 */
393 	for_each_reg(i) {
394 		uint8_t addr[2048 / 8];
395 		struct kvm_one_reg reg = {
396 			.id = reg_list->reg[i],
397 			.addr = (__u64)&addr,
398 		};
399 		int ret;
400 
401 		ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
402 		if (ret) {
403 			puts("Failed to get ");
404 			print_reg(reg.id);
405 			putchar('\n');
406 			++failed_get;
407 		}
408 
409 		/* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
410 		if (find_reg(rejects_set, rejects_set_n, reg.id)) {
411 			ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
412 			if (ret != -1 || errno != EPERM) {
413 				printf("Failed to reject (ret=%d, errno=%d) ", ret, errno);
414 				print_reg(reg.id);
415 				putchar('\n');
416 				++failed_reject;
417 			}
418 			continue;
419 		}
420 
421 		ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
422 		if (ret) {
423 			puts("Failed to set ");
424 			print_reg(reg.id);
425 			putchar('\n');
426 			++failed_set;
427 		}
428 	}
429 
430 	if (reg_list_sve()) {
431 		blessed_n = base_regs_n + sve_regs_n;
432 		vec_regs = sve_regs;
433 	} else {
434 		blessed_n = base_regs_n + vregs_n;
435 		vec_regs = vregs;
436 	}
437 
438 	blessed_reg = calloc(blessed_n, sizeof(__u64));
439 	for (i = 0; i < base_regs_n; ++i)
440 		blessed_reg[i] = base_regs[i];
441 	for (i = 0; i < blessed_n - base_regs_n; ++i)
442 		blessed_reg[base_regs_n + i] = vec_regs[i];
443 
444 	for_each_new_reg(i)
445 		++new_regs;
446 
447 	for_each_missing_reg(i)
448 		++missing_regs;
449 
450 	if (new_regs || missing_regs) {
451 		printf("Number blessed registers: %5lld\n", blessed_n);
452 		printf("Number registers:         %5lld\n", reg_list->n);
453 	}
454 
455 	if (new_regs) {
456 		printf("\nThere are %d new registers.\n"
457 		       "Consider adding them to the blessed reg "
458 		       "list with the following lines:\n\n", new_regs);
459 		for_each_new_reg(i)
460 			print_reg(reg_list->reg[i]);
461 		putchar('\n');
462 	}
463 
464 	if (missing_regs) {
465 		printf("\nThere are %d missing registers.\n"
466 		       "The following lines are missing registers:\n\n", missing_regs);
467 		for_each_missing_reg(i)
468 			print_reg(blessed_reg[i]);
469 		putchar('\n');
470 	}
471 
472 	TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
473 		    "There are %d missing registers; "
474 		    "%d registers failed get; %d registers failed set; %d registers failed reject",
475 		    missing_regs, failed_get, failed_set, failed_reject);
476 
477 	return 0;
478 }
479 
480 /*
481  * The current blessed list was primed with the output of kernel version
482  * v4.15 with --core-reg-fixup and then later updated with new registers.
483  *
484  * The blessed list is up to date with kernel version v5.10-rc5
485  */
486 static __u64 base_regs[] = {
487 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
488 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
489 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
490 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
491 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
492 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
493 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
494 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
495 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
496 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
497 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
498 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
499 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
500 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
501 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
502 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
503 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
504 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
505 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
506 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
507 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
508 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
509 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
510 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
511 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
512 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
513 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
514 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
515 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
516 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
517 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
518 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
519 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
520 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
521 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
522 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
523 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
524 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
525 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
526 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
527 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
528 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
529 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
530 	KVM_REG_ARM_FW_REG(0),
531 	KVM_REG_ARM_FW_REG(1),
532 	KVM_REG_ARM_FW_REG(2),
533 	ARM64_SYS_REG(3, 3, 14, 3, 1),	/* CNTV_CTL_EL0 */
534 	ARM64_SYS_REG(3, 3, 14, 3, 2),	/* CNTV_CVAL_EL0 */
535 	ARM64_SYS_REG(3, 3, 14, 0, 2),
536 	ARM64_SYS_REG(3, 0, 0, 0, 0),	/* MIDR_EL1 */
537 	ARM64_SYS_REG(3, 0, 0, 0, 6),	/* REVIDR_EL1 */
538 	ARM64_SYS_REG(3, 1, 0, 0, 1),	/* CLIDR_EL1 */
539 	ARM64_SYS_REG(3, 1, 0, 0, 7),	/* AIDR_EL1 */
540 	ARM64_SYS_REG(3, 3, 0, 0, 1),	/* CTR_EL0 */
541 	ARM64_SYS_REG(2, 0, 0, 0, 4),
542 	ARM64_SYS_REG(2, 0, 0, 0, 5),
543 	ARM64_SYS_REG(2, 0, 0, 0, 6),
544 	ARM64_SYS_REG(2, 0, 0, 0, 7),
545 	ARM64_SYS_REG(2, 0, 0, 1, 4),
546 	ARM64_SYS_REG(2, 0, 0, 1, 5),
547 	ARM64_SYS_REG(2, 0, 0, 1, 6),
548 	ARM64_SYS_REG(2, 0, 0, 1, 7),
549 	ARM64_SYS_REG(2, 0, 0, 2, 0),	/* MDCCINT_EL1 */
550 	ARM64_SYS_REG(2, 0, 0, 2, 2),	/* MDSCR_EL1 */
551 	ARM64_SYS_REG(2, 0, 0, 2, 4),
552 	ARM64_SYS_REG(2, 0, 0, 2, 5),
553 	ARM64_SYS_REG(2, 0, 0, 2, 6),
554 	ARM64_SYS_REG(2, 0, 0, 2, 7),
555 	ARM64_SYS_REG(2, 0, 0, 3, 4),
556 	ARM64_SYS_REG(2, 0, 0, 3, 5),
557 	ARM64_SYS_REG(2, 0, 0, 3, 6),
558 	ARM64_SYS_REG(2, 0, 0, 3, 7),
559 	ARM64_SYS_REG(2, 0, 0, 4, 4),
560 	ARM64_SYS_REG(2, 0, 0, 4, 5),
561 	ARM64_SYS_REG(2, 0, 0, 4, 6),
562 	ARM64_SYS_REG(2, 0, 0, 4, 7),
563 	ARM64_SYS_REG(2, 0, 0, 5, 4),
564 	ARM64_SYS_REG(2, 0, 0, 5, 5),
565 	ARM64_SYS_REG(2, 0, 0, 5, 6),
566 	ARM64_SYS_REG(2, 0, 0, 5, 7),
567 	ARM64_SYS_REG(2, 0, 0, 6, 4),
568 	ARM64_SYS_REG(2, 0, 0, 6, 5),
569 	ARM64_SYS_REG(2, 0, 0, 6, 6),
570 	ARM64_SYS_REG(2, 0, 0, 6, 7),
571 	ARM64_SYS_REG(2, 0, 0, 7, 4),
572 	ARM64_SYS_REG(2, 0, 0, 7, 5),
573 	ARM64_SYS_REG(2, 0, 0, 7, 6),
574 	ARM64_SYS_REG(2, 0, 0, 7, 7),
575 	ARM64_SYS_REG(2, 0, 0, 8, 4),
576 	ARM64_SYS_REG(2, 0, 0, 8, 5),
577 	ARM64_SYS_REG(2, 0, 0, 8, 6),
578 	ARM64_SYS_REG(2, 0, 0, 8, 7),
579 	ARM64_SYS_REG(2, 0, 0, 9, 4),
580 	ARM64_SYS_REG(2, 0, 0, 9, 5),
581 	ARM64_SYS_REG(2, 0, 0, 9, 6),
582 	ARM64_SYS_REG(2, 0, 0, 9, 7),
583 	ARM64_SYS_REG(2, 0, 0, 10, 4),
584 	ARM64_SYS_REG(2, 0, 0, 10, 5),
585 	ARM64_SYS_REG(2, 0, 0, 10, 6),
586 	ARM64_SYS_REG(2, 0, 0, 10, 7),
587 	ARM64_SYS_REG(2, 0, 0, 11, 4),
588 	ARM64_SYS_REG(2, 0, 0, 11, 5),
589 	ARM64_SYS_REG(2, 0, 0, 11, 6),
590 	ARM64_SYS_REG(2, 0, 0, 11, 7),
591 	ARM64_SYS_REG(2, 0, 0, 12, 4),
592 	ARM64_SYS_REG(2, 0, 0, 12, 5),
593 	ARM64_SYS_REG(2, 0, 0, 12, 6),
594 	ARM64_SYS_REG(2, 0, 0, 12, 7),
595 	ARM64_SYS_REG(2, 0, 0, 13, 4),
596 	ARM64_SYS_REG(2, 0, 0, 13, 5),
597 	ARM64_SYS_REG(2, 0, 0, 13, 6),
598 	ARM64_SYS_REG(2, 0, 0, 13, 7),
599 	ARM64_SYS_REG(2, 0, 0, 14, 4),
600 	ARM64_SYS_REG(2, 0, 0, 14, 5),
601 	ARM64_SYS_REG(2, 0, 0, 14, 6),
602 	ARM64_SYS_REG(2, 0, 0, 14, 7),
603 	ARM64_SYS_REG(2, 0, 0, 15, 4),
604 	ARM64_SYS_REG(2, 0, 0, 15, 5),
605 	ARM64_SYS_REG(2, 0, 0, 15, 6),
606 	ARM64_SYS_REG(2, 0, 0, 15, 7),
607 	ARM64_SYS_REG(2, 4, 0, 7, 0),	/* DBGVCR32_EL2 */
608 	ARM64_SYS_REG(3, 0, 0, 0, 5),	/* MPIDR_EL1 */
609 	ARM64_SYS_REG(3, 0, 0, 1, 0),	/* ID_PFR0_EL1 */
610 	ARM64_SYS_REG(3, 0, 0, 1, 1),	/* ID_PFR1_EL1 */
611 	ARM64_SYS_REG(3, 0, 0, 1, 2),	/* ID_DFR0_EL1 */
612 	ARM64_SYS_REG(3, 0, 0, 1, 3),	/* ID_AFR0_EL1 */
613 	ARM64_SYS_REG(3, 0, 0, 1, 4),	/* ID_MMFR0_EL1 */
614 	ARM64_SYS_REG(3, 0, 0, 1, 5),	/* ID_MMFR1_EL1 */
615 	ARM64_SYS_REG(3, 0, 0, 1, 6),	/* ID_MMFR2_EL1 */
616 	ARM64_SYS_REG(3, 0, 0, 1, 7),	/* ID_MMFR3_EL1 */
617 	ARM64_SYS_REG(3, 0, 0, 2, 0),	/* ID_ISAR0_EL1 */
618 	ARM64_SYS_REG(3, 0, 0, 2, 1),	/* ID_ISAR1_EL1 */
619 	ARM64_SYS_REG(3, 0, 0, 2, 2),	/* ID_ISAR2_EL1 */
620 	ARM64_SYS_REG(3, 0, 0, 2, 3),	/* ID_ISAR3_EL1 */
621 	ARM64_SYS_REG(3, 0, 0, 2, 4),	/* ID_ISAR4_EL1 */
622 	ARM64_SYS_REG(3, 0, 0, 2, 5),	/* ID_ISAR5_EL1 */
623 	ARM64_SYS_REG(3, 0, 0, 2, 6),	/* ID_MMFR4_EL1 */
624 	ARM64_SYS_REG(3, 0, 0, 2, 7),	/* ID_ISAR6_EL1 */
625 	ARM64_SYS_REG(3, 0, 0, 3, 0),	/* MVFR0_EL1 */
626 	ARM64_SYS_REG(3, 0, 0, 3, 1),	/* MVFR1_EL1 */
627 	ARM64_SYS_REG(3, 0, 0, 3, 2),	/* MVFR2_EL1 */
628 	ARM64_SYS_REG(3, 0, 0, 3, 3),
629 	ARM64_SYS_REG(3, 0, 0, 3, 4),	/* ID_PFR2_EL1 */
630 	ARM64_SYS_REG(3, 0, 0, 3, 5),	/* ID_DFR1_EL1 */
631 	ARM64_SYS_REG(3, 0, 0, 3, 6),	/* ID_MMFR5_EL1 */
632 	ARM64_SYS_REG(3, 0, 0, 3, 7),
633 	ARM64_SYS_REG(3, 0, 0, 4, 0),	/* ID_AA64PFR0_EL1 */
634 	ARM64_SYS_REG(3, 0, 0, 4, 1),	/* ID_AA64PFR1_EL1 */
635 	ARM64_SYS_REG(3, 0, 0, 4, 2),
636 	ARM64_SYS_REG(3, 0, 0, 4, 3),
637 	ARM64_SYS_REG(3, 0, 0, 4, 4),	/* ID_AA64ZFR0_EL1 */
638 	ARM64_SYS_REG(3, 0, 0, 4, 5),
639 	ARM64_SYS_REG(3, 0, 0, 4, 6),
640 	ARM64_SYS_REG(3, 0, 0, 4, 7),
641 	ARM64_SYS_REG(3, 0, 0, 5, 0),	/* ID_AA64DFR0_EL1 */
642 	ARM64_SYS_REG(3, 0, 0, 5, 1),	/* ID_AA64DFR1_EL1 */
643 	ARM64_SYS_REG(3, 0, 0, 5, 2),
644 	ARM64_SYS_REG(3, 0, 0, 5, 3),
645 	ARM64_SYS_REG(3, 0, 0, 5, 4),	/* ID_AA64AFR0_EL1 */
646 	ARM64_SYS_REG(3, 0, 0, 5, 5),	/* ID_AA64AFR1_EL1 */
647 	ARM64_SYS_REG(3, 0, 0, 5, 6),
648 	ARM64_SYS_REG(3, 0, 0, 5, 7),
649 	ARM64_SYS_REG(3, 0, 0, 6, 0),	/* ID_AA64ISAR0_EL1 */
650 	ARM64_SYS_REG(3, 0, 0, 6, 1),	/* ID_AA64ISAR1_EL1 */
651 	ARM64_SYS_REG(3, 0, 0, 6, 2),
652 	ARM64_SYS_REG(3, 0, 0, 6, 3),
653 	ARM64_SYS_REG(3, 0, 0, 6, 4),
654 	ARM64_SYS_REG(3, 0, 0, 6, 5),
655 	ARM64_SYS_REG(3, 0, 0, 6, 6),
656 	ARM64_SYS_REG(3, 0, 0, 6, 7),
657 	ARM64_SYS_REG(3, 0, 0, 7, 0),	/* ID_AA64MMFR0_EL1 */
658 	ARM64_SYS_REG(3, 0, 0, 7, 1),	/* ID_AA64MMFR1_EL1 */
659 	ARM64_SYS_REG(3, 0, 0, 7, 2),	/* ID_AA64MMFR2_EL1 */
660 	ARM64_SYS_REG(3, 0, 0, 7, 3),
661 	ARM64_SYS_REG(3, 0, 0, 7, 4),
662 	ARM64_SYS_REG(3, 0, 0, 7, 5),
663 	ARM64_SYS_REG(3, 0, 0, 7, 6),
664 	ARM64_SYS_REG(3, 0, 0, 7, 7),
665 	ARM64_SYS_REG(3, 0, 1, 0, 0),	/* SCTLR_EL1 */
666 	ARM64_SYS_REG(3, 0, 1, 0, 1),	/* ACTLR_EL1 */
667 	ARM64_SYS_REG(3, 0, 1, 0, 2),	/* CPACR_EL1 */
668 	ARM64_SYS_REG(3, 0, 2, 0, 0),	/* TTBR0_EL1 */
669 	ARM64_SYS_REG(3, 0, 2, 0, 1),	/* TTBR1_EL1 */
670 	ARM64_SYS_REG(3, 0, 2, 0, 2),	/* TCR_EL1 */
671 	ARM64_SYS_REG(3, 0, 5, 1, 0),	/* AFSR0_EL1 */
672 	ARM64_SYS_REG(3, 0, 5, 1, 1),	/* AFSR1_EL1 */
673 	ARM64_SYS_REG(3, 0, 5, 2, 0),	/* ESR_EL1 */
674 	ARM64_SYS_REG(3, 0, 6, 0, 0),	/* FAR_EL1 */
675 	ARM64_SYS_REG(3, 0, 7, 4, 0),	/* PAR_EL1 */
676 	ARM64_SYS_REG(3, 0, 9, 14, 1),	/* PMINTENSET_EL1 */
677 	ARM64_SYS_REG(3, 0, 9, 14, 2),	/* PMINTENCLR_EL1 */
678 	ARM64_SYS_REG(3, 0, 10, 2, 0),	/* MAIR_EL1 */
679 	ARM64_SYS_REG(3, 0, 10, 3, 0),	/* AMAIR_EL1 */
680 	ARM64_SYS_REG(3, 0, 12, 0, 0),	/* VBAR_EL1 */
681 	ARM64_SYS_REG(3, 0, 12, 1, 1),	/* DISR_EL1 */
682 	ARM64_SYS_REG(3, 0, 13, 0, 1),	/* CONTEXTIDR_EL1 */
683 	ARM64_SYS_REG(3, 0, 13, 0, 4),	/* TPIDR_EL1 */
684 	ARM64_SYS_REG(3, 0, 14, 1, 0),	/* CNTKCTL_EL1 */
685 	ARM64_SYS_REG(3, 2, 0, 0, 0),	/* CSSELR_EL1 */
686 	ARM64_SYS_REG(3, 3, 9, 12, 0),	/* PMCR_EL0 */
687 	ARM64_SYS_REG(3, 3, 9, 12, 1),	/* PMCNTENSET_EL0 */
688 	ARM64_SYS_REG(3, 3, 9, 12, 2),	/* PMCNTENCLR_EL0 */
689 	ARM64_SYS_REG(3, 3, 9, 12, 3),	/* PMOVSCLR_EL0 */
690 	ARM64_SYS_REG(3, 3, 9, 12, 4),	/* PMSWINC_EL0 */
691 	ARM64_SYS_REG(3, 3, 9, 12, 5),	/* PMSELR_EL0 */
692 	ARM64_SYS_REG(3, 3, 9, 13, 0),	/* PMCCNTR_EL0 */
693 	ARM64_SYS_REG(3, 3, 9, 14, 0),	/* PMUSERENR_EL0 */
694 	ARM64_SYS_REG(3, 3, 9, 14, 3),	/* PMOVSSET_EL0 */
695 	ARM64_SYS_REG(3, 3, 13, 0, 2),	/* TPIDR_EL0 */
696 	ARM64_SYS_REG(3, 3, 13, 0, 3),	/* TPIDRRO_EL0 */
697 	ARM64_SYS_REG(3, 3, 14, 8, 0),
698 	ARM64_SYS_REG(3, 3, 14, 8, 1),
699 	ARM64_SYS_REG(3, 3, 14, 8, 2),
700 	ARM64_SYS_REG(3, 3, 14, 8, 3),
701 	ARM64_SYS_REG(3, 3, 14, 8, 4),
702 	ARM64_SYS_REG(3, 3, 14, 8, 5),
703 	ARM64_SYS_REG(3, 3, 14, 8, 6),
704 	ARM64_SYS_REG(3, 3, 14, 8, 7),
705 	ARM64_SYS_REG(3, 3, 14, 9, 0),
706 	ARM64_SYS_REG(3, 3, 14, 9, 1),
707 	ARM64_SYS_REG(3, 3, 14, 9, 2),
708 	ARM64_SYS_REG(3, 3, 14, 9, 3),
709 	ARM64_SYS_REG(3, 3, 14, 9, 4),
710 	ARM64_SYS_REG(3, 3, 14, 9, 5),
711 	ARM64_SYS_REG(3, 3, 14, 9, 6),
712 	ARM64_SYS_REG(3, 3, 14, 9, 7),
713 	ARM64_SYS_REG(3, 3, 14, 10, 0),
714 	ARM64_SYS_REG(3, 3, 14, 10, 1),
715 	ARM64_SYS_REG(3, 3, 14, 10, 2),
716 	ARM64_SYS_REG(3, 3, 14, 10, 3),
717 	ARM64_SYS_REG(3, 3, 14, 10, 4),
718 	ARM64_SYS_REG(3, 3, 14, 10, 5),
719 	ARM64_SYS_REG(3, 3, 14, 10, 6),
720 	ARM64_SYS_REG(3, 3, 14, 10, 7),
721 	ARM64_SYS_REG(3, 3, 14, 11, 0),
722 	ARM64_SYS_REG(3, 3, 14, 11, 1),
723 	ARM64_SYS_REG(3, 3, 14, 11, 2),
724 	ARM64_SYS_REG(3, 3, 14, 11, 3),
725 	ARM64_SYS_REG(3, 3, 14, 11, 4),
726 	ARM64_SYS_REG(3, 3, 14, 11, 5),
727 	ARM64_SYS_REG(3, 3, 14, 11, 6),
728 	ARM64_SYS_REG(3, 3, 14, 12, 0),
729 	ARM64_SYS_REG(3, 3, 14, 12, 1),
730 	ARM64_SYS_REG(3, 3, 14, 12, 2),
731 	ARM64_SYS_REG(3, 3, 14, 12, 3),
732 	ARM64_SYS_REG(3, 3, 14, 12, 4),
733 	ARM64_SYS_REG(3, 3, 14, 12, 5),
734 	ARM64_SYS_REG(3, 3, 14, 12, 6),
735 	ARM64_SYS_REG(3, 3, 14, 12, 7),
736 	ARM64_SYS_REG(3, 3, 14, 13, 0),
737 	ARM64_SYS_REG(3, 3, 14, 13, 1),
738 	ARM64_SYS_REG(3, 3, 14, 13, 2),
739 	ARM64_SYS_REG(3, 3, 14, 13, 3),
740 	ARM64_SYS_REG(3, 3, 14, 13, 4),
741 	ARM64_SYS_REG(3, 3, 14, 13, 5),
742 	ARM64_SYS_REG(3, 3, 14, 13, 6),
743 	ARM64_SYS_REG(3, 3, 14, 13, 7),
744 	ARM64_SYS_REG(3, 3, 14, 14, 0),
745 	ARM64_SYS_REG(3, 3, 14, 14, 1),
746 	ARM64_SYS_REG(3, 3, 14, 14, 2),
747 	ARM64_SYS_REG(3, 3, 14, 14, 3),
748 	ARM64_SYS_REG(3, 3, 14, 14, 4),
749 	ARM64_SYS_REG(3, 3, 14, 14, 5),
750 	ARM64_SYS_REG(3, 3, 14, 14, 6),
751 	ARM64_SYS_REG(3, 3, 14, 14, 7),
752 	ARM64_SYS_REG(3, 3, 14, 15, 0),
753 	ARM64_SYS_REG(3, 3, 14, 15, 1),
754 	ARM64_SYS_REG(3, 3, 14, 15, 2),
755 	ARM64_SYS_REG(3, 3, 14, 15, 3),
756 	ARM64_SYS_REG(3, 3, 14, 15, 4),
757 	ARM64_SYS_REG(3, 3, 14, 15, 5),
758 	ARM64_SYS_REG(3, 3, 14, 15, 6),
759 	ARM64_SYS_REG(3, 3, 14, 15, 7),	/* PMCCFILTR_EL0 */
760 	ARM64_SYS_REG(3, 4, 3, 0, 0),	/* DACR32_EL2 */
761 	ARM64_SYS_REG(3, 4, 5, 0, 1),	/* IFSR32_EL2 */
762 	ARM64_SYS_REG(3, 4, 5, 3, 0),	/* FPEXC32_EL2 */
763 };
764 static __u64 base_regs_n = ARRAY_SIZE(base_regs);
765 
766 static __u64 vregs[] = {
767 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
768 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
769 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
770 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
771 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
772 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
773 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
774 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
775 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
776 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
777 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
778 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
779 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
780 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
781 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
782 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
783 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
784 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
785 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
786 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
787 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
788 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
789 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
790 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
791 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
792 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
793 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
794 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
795 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
796 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
797 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
798 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
799 };
800 static __u64 vregs_n = ARRAY_SIZE(vregs);
801 
802 static __u64 sve_regs[] = {
803 	KVM_REG_ARM64_SVE_VLS,
804 	KVM_REG_ARM64_SVE_ZREG(0, 0),
805 	KVM_REG_ARM64_SVE_ZREG(1, 0),
806 	KVM_REG_ARM64_SVE_ZREG(2, 0),
807 	KVM_REG_ARM64_SVE_ZREG(3, 0),
808 	KVM_REG_ARM64_SVE_ZREG(4, 0),
809 	KVM_REG_ARM64_SVE_ZREG(5, 0),
810 	KVM_REG_ARM64_SVE_ZREG(6, 0),
811 	KVM_REG_ARM64_SVE_ZREG(7, 0),
812 	KVM_REG_ARM64_SVE_ZREG(8, 0),
813 	KVM_REG_ARM64_SVE_ZREG(9, 0),
814 	KVM_REG_ARM64_SVE_ZREG(10, 0),
815 	KVM_REG_ARM64_SVE_ZREG(11, 0),
816 	KVM_REG_ARM64_SVE_ZREG(12, 0),
817 	KVM_REG_ARM64_SVE_ZREG(13, 0),
818 	KVM_REG_ARM64_SVE_ZREG(14, 0),
819 	KVM_REG_ARM64_SVE_ZREG(15, 0),
820 	KVM_REG_ARM64_SVE_ZREG(16, 0),
821 	KVM_REG_ARM64_SVE_ZREG(17, 0),
822 	KVM_REG_ARM64_SVE_ZREG(18, 0),
823 	KVM_REG_ARM64_SVE_ZREG(19, 0),
824 	KVM_REG_ARM64_SVE_ZREG(20, 0),
825 	KVM_REG_ARM64_SVE_ZREG(21, 0),
826 	KVM_REG_ARM64_SVE_ZREG(22, 0),
827 	KVM_REG_ARM64_SVE_ZREG(23, 0),
828 	KVM_REG_ARM64_SVE_ZREG(24, 0),
829 	KVM_REG_ARM64_SVE_ZREG(25, 0),
830 	KVM_REG_ARM64_SVE_ZREG(26, 0),
831 	KVM_REG_ARM64_SVE_ZREG(27, 0),
832 	KVM_REG_ARM64_SVE_ZREG(28, 0),
833 	KVM_REG_ARM64_SVE_ZREG(29, 0),
834 	KVM_REG_ARM64_SVE_ZREG(30, 0),
835 	KVM_REG_ARM64_SVE_ZREG(31, 0),
836 	KVM_REG_ARM64_SVE_PREG(0, 0),
837 	KVM_REG_ARM64_SVE_PREG(1, 0),
838 	KVM_REG_ARM64_SVE_PREG(2, 0),
839 	KVM_REG_ARM64_SVE_PREG(3, 0),
840 	KVM_REG_ARM64_SVE_PREG(4, 0),
841 	KVM_REG_ARM64_SVE_PREG(5, 0),
842 	KVM_REG_ARM64_SVE_PREG(6, 0),
843 	KVM_REG_ARM64_SVE_PREG(7, 0),
844 	KVM_REG_ARM64_SVE_PREG(8, 0),
845 	KVM_REG_ARM64_SVE_PREG(9, 0),
846 	KVM_REG_ARM64_SVE_PREG(10, 0),
847 	KVM_REG_ARM64_SVE_PREG(11, 0),
848 	KVM_REG_ARM64_SVE_PREG(12, 0),
849 	KVM_REG_ARM64_SVE_PREG(13, 0),
850 	KVM_REG_ARM64_SVE_PREG(14, 0),
851 	KVM_REG_ARM64_SVE_PREG(15, 0),
852 	KVM_REG_ARM64_SVE_FFR(0),
853 	ARM64_SYS_REG(3, 0, 1, 2, 0),   /* ZCR_EL1 */
854 };
855 static __u64 sve_regs_n = ARRAY_SIZE(sve_regs);
856 
857 static __u64 rejects_set[] = {
858 #ifdef REG_LIST_SVE
859 	KVM_REG_ARM64_SVE_VLS,
860 #endif
861 };
862 static __u64 rejects_set_n = ARRAY_SIZE(rejects_set);
863