xref: /linux/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm.h>
3 #include <linux/psp-sev.h>
4 #include <stdio.h>
5 #include <sys/ioctl.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9 
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14 #include "kselftest.h"
15 #include "../lib/kvm_util_internal.h"
16 
17 #define SEV_POLICY_ES 0b100
18 
19 #define NR_MIGRATE_TEST_VCPUS 4
20 #define NR_MIGRATE_TEST_VMS 3
21 #define NR_LOCK_TESTING_THREADS 3
22 #define NR_LOCK_TESTING_ITERATIONS 10000
23 
24 static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error)
25 {
26 	struct kvm_sev_cmd cmd = {
27 		.id = cmd_id,
28 		.data = (uint64_t)data,
29 		.sev_fd = open_sev_dev_path_or_exit(),
30 	};
31 	int ret;
32 
33 	ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
34 	*fw_error = cmd.error;
35 	return ret;
36 }
37 
38 static void sev_ioctl(int vm_fd, int cmd_id, void *data)
39 {
40 	int ret;
41 	__u32 fw_error;
42 
43 	ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error);
44 	TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS,
45 		    "%d failed: return code: %d, errno: %d, fw error: %d",
46 		    cmd_id, ret, errno, fw_error);
47 }
48 
49 static struct kvm_vm *sev_vm_create(bool es)
50 {
51 	struct kvm_vm *vm;
52 	struct kvm_sev_launch_start start = { 0 };
53 	int i;
54 
55 	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
56 	sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
57 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
58 		vm_vcpu_add(vm, i);
59 	if (es)
60 		start.policy |= SEV_POLICY_ES;
61 	sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
62 	if (es)
63 		sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
64 	return vm;
65 }
66 
67 static struct kvm_vm *aux_vm_create(bool with_vcpus)
68 {
69 	struct kvm_vm *vm;
70 	int i;
71 
72 	vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
73 	if (!with_vcpus)
74 		return vm;
75 
76 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
77 		vm_vcpu_add(vm, i);
78 
79 	return vm;
80 }
81 
82 static int __sev_migrate_from(int dst_fd, int src_fd)
83 {
84 	struct kvm_enable_cap cap = {
85 		.cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
86 		.args = { src_fd }
87 	};
88 
89 	return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
90 }
91 
92 
93 static void sev_migrate_from(int dst_fd, int src_fd)
94 {
95 	int ret;
96 
97 	ret = __sev_migrate_from(dst_fd, src_fd);
98 	TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
99 }
100 
101 static void test_sev_migrate_from(bool es)
102 {
103 	struct kvm_vm *src_vm;
104 	struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
105 	int i, ret;
106 
107 	src_vm = sev_vm_create(es);
108 	for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
109 		dst_vms[i] = aux_vm_create(true);
110 
111 	/* Initial migration from the src to the first dst. */
112 	sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
113 
114 	for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
115 		sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
116 
117 	/* Migrate the guest back to the original VM. */
118 	ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
119 	TEST_ASSERT(ret == -1 && errno == EIO,
120 		    "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
121 		    errno);
122 
123 	kvm_vm_free(src_vm);
124 	for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
125 		kvm_vm_free(dst_vms[i]);
126 }
127 
128 struct locking_thread_input {
129 	struct kvm_vm *vm;
130 	int source_fds[NR_LOCK_TESTING_THREADS];
131 };
132 
133 static void *locking_test_thread(void *arg)
134 {
135 	int i, j;
136 	struct locking_thread_input *input = (struct locking_thread_input *)arg;
137 
138 	for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
139 		j = i % NR_LOCK_TESTING_THREADS;
140 		__sev_migrate_from(input->vm->fd, input->source_fds[j]);
141 	}
142 
143 	return NULL;
144 }
145 
146 static void test_sev_migrate_locking(void)
147 {
148 	struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
149 	pthread_t pt[NR_LOCK_TESTING_THREADS];
150 	int i;
151 
152 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
153 		input[i].vm = sev_vm_create(/* es= */ false);
154 		input[0].source_fds[i] = input[i].vm->fd;
155 	}
156 	for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
157 		memcpy(input[i].source_fds, input[0].source_fds,
158 		       sizeof(input[i].source_fds));
159 
160 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
161 		pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
162 
163 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
164 		pthread_join(pt[i], NULL);
165 	for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
166 		kvm_vm_free(input[i].vm);
167 }
168 
169 static void test_sev_migrate_parameters(void)
170 {
171 	struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
172 		*sev_es_vm_no_vmsa;
173 	int ret;
174 
175 	sev_vm = sev_vm_create(/* es= */ false);
176 	sev_es_vm = sev_vm_create(/* es= */ true);
177 	vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
178 	vm_no_sev = aux_vm_create(true);
179 	sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
180 	sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
181 	vm_vcpu_add(sev_es_vm_no_vmsa, 1);
182 
183 	ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
184 	TEST_ASSERT(
185 		ret == -1 && errno == EINVAL,
186 		"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
187 		ret, errno);
188 
189 	ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
190 	TEST_ASSERT(
191 		ret == -1 && errno == EINVAL,
192 		"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
193 		ret, errno);
194 
195 	ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
196 	TEST_ASSERT(
197 		ret == -1 && errno == EINVAL,
198 		"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
199 		ret, errno);
200 
201 	ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
202 	TEST_ASSERT(
203 		ret == -1 && errno == EINVAL,
204 		"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
205 		ret, errno);
206 
207 	ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
208 	TEST_ASSERT(ret == -1 && errno == EINVAL,
209 		    "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
210 		    errno);
211 
212 	kvm_vm_free(sev_vm);
213 	kvm_vm_free(sev_es_vm);
214 	kvm_vm_free(sev_es_vm_no_vmsa);
215 	kvm_vm_free(vm_no_vcpu);
216 	kvm_vm_free(vm_no_sev);
217 }
218 
219 static int __sev_mirror_create(int dst_fd, int src_fd)
220 {
221 	struct kvm_enable_cap cap = {
222 		.cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
223 		.args = { src_fd }
224 	};
225 
226 	return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
227 }
228 
229 
230 static void sev_mirror_create(int dst_fd, int src_fd)
231 {
232 	int ret;
233 
234 	ret = __sev_mirror_create(dst_fd, src_fd);
235 	TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
236 }
237 
238 static void verify_mirror_allowed_cmds(int vm_fd)
239 {
240 	struct kvm_sev_guest_status status;
241 
242 	for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
243 		int ret;
244 		__u32 fw_error;
245 
246 		/*
247 		 * These commands are allowed for mirror VMs, all others are
248 		 * not.
249 		 */
250 		switch (cmd_id) {
251 		case KVM_SEV_LAUNCH_UPDATE_VMSA:
252 		case KVM_SEV_GUEST_STATUS:
253 		case KVM_SEV_DBG_DECRYPT:
254 		case KVM_SEV_DBG_ENCRYPT:
255 			continue;
256 		default:
257 			break;
258 		}
259 
260 		/*
261 		 * These commands should be disallowed before the data
262 		 * parameter is examined so NULL is OK here.
263 		 */
264 		ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
265 		TEST_ASSERT(
266 			ret == -1 && errno == EINVAL,
267 			"Should not be able call command: %d. ret: %d, errno: %d\n",
268 			cmd_id, ret, errno);
269 	}
270 
271 	sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status);
272 }
273 
274 static void test_sev_mirror(bool es)
275 {
276 	struct kvm_vm *src_vm, *dst_vm;
277 	int i;
278 
279 	src_vm = sev_vm_create(es);
280 	dst_vm = aux_vm_create(false);
281 
282 	sev_mirror_create(dst_vm->fd, src_vm->fd);
283 
284 	/* Check that we can complete creation of the mirror VM.  */
285 	for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
286 		vm_vcpu_add(dst_vm, i);
287 
288 	if (es)
289 		sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
290 
291 	verify_mirror_allowed_cmds(dst_vm->fd);
292 
293 	kvm_vm_free(src_vm);
294 	kvm_vm_free(dst_vm);
295 }
296 
297 static void test_sev_mirror_parameters(void)
298 {
299 	struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
300 	int ret;
301 
302 	sev_vm = sev_vm_create(/* es= */ false);
303 	sev_es_vm = sev_vm_create(/* es= */ true);
304 	vm_with_vcpu = aux_vm_create(true);
305 	vm_no_vcpu = aux_vm_create(false);
306 
307 	ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
308 	TEST_ASSERT(
309 		ret == -1 && errno == EINVAL,
310 		"Should not be able copy context to self. ret: %d, errno: %d\n",
311 		ret, errno);
312 
313 	ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
314 	TEST_ASSERT(
315 		ret == -1 && errno == EINVAL,
316 		"Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
317 		ret, errno);
318 
319 	ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
320 	TEST_ASSERT(
321 		ret == -1 && errno == EINVAL,
322 		"Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
323 		ret, errno);
324 
325 	ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
326 	TEST_ASSERT(ret == -1 && errno == EINVAL,
327 		    "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
328 		    errno);
329 
330 	ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
331 	TEST_ASSERT(
332 		ret == -1 && errno == EINVAL,
333 		"SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
334 		ret, errno);
335 
336 	kvm_vm_free(sev_vm);
337 	kvm_vm_free(sev_es_vm);
338 	kvm_vm_free(vm_with_vcpu);
339 	kvm_vm_free(vm_no_vcpu);
340 }
341 
342 static void test_sev_move_copy(void)
343 {
344 	struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
345 	int ret;
346 
347 	sev_vm = sev_vm_create(/* es= */ false);
348 	dst_vm = aux_vm_create(true);
349 	mirror_vm = aux_vm_create(false);
350 	dst_mirror_vm = aux_vm_create(false);
351 
352 	sev_mirror_create(mirror_vm->fd, sev_vm->fd);
353 	ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
354 	TEST_ASSERT(ret == -1 && errno == EBUSY,
355 		    "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
356 		    errno);
357 
358 	/* The mirror itself can be migrated.  */
359 	sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
360 	ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
361 	TEST_ASSERT(ret == -1 && errno == EBUSY,
362 		    "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
363 		    errno);
364 
365 	/*
366 	 * mirror_vm is not a mirror anymore, dst_mirror_vm is.  Thus,
367 	 * the owner can be copied as soon as dst_mirror_vm is gone.
368 	 */
369 	kvm_vm_free(dst_mirror_vm);
370 	sev_migrate_from(dst_vm->fd, sev_vm->fd);
371 
372 	kvm_vm_free(mirror_vm);
373 	kvm_vm_free(dst_vm);
374 	kvm_vm_free(sev_vm);
375 }
376 
377 int main(int argc, char *argv[])
378 {
379 	if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
380 		test_sev_migrate_from(/* es= */ false);
381 		test_sev_migrate_from(/* es= */ true);
382 		test_sev_migrate_locking();
383 		test_sev_migrate_parameters();
384 		if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
385 			test_sev_move_copy();
386 	}
387 	if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
388 		test_sev_mirror(/* es= */ false);
389 		test_sev_mirror(/* es= */ true);
390 		test_sev_mirror_parameters();
391 	}
392 	return 0;
393 }
394