xref: /illumos-gate/usr/src/uts/i86pc/ml/bios_call_src.S (revision 5d9d9091f564c198a760790b0bfa72c44e17912b)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Copyright 2019 Joyent, Inc.
29 */
30
31#include <sys/segments.h>
32#include <sys/controlregs.h>
33
34/*
35 * Do a call into BIOS.  This goes down to 16 bit real mode and back again.
36 */
37
38/*
39 * instruction prefix to change operand size in instruction
40 */
41#define DATASZ	.byte 0x66;
42
43	.globl	_start
44_start:
45
46	/*
47	 * Save caller registers
48	 */
49	movq	%rbp, save_rbp
50	movq	%rsp, save_rsp
51	movq	%rbx, save_rbx
52	movq	%rsi, save_rsi
53	movq	%r12, save_r12
54	movq	%r13, save_r13
55	movq	%r14, save_r14
56	movq	%r15, save_r15
57
58	/* Switch to a low memory stack */
59	movq	$_start, %rsp
60
61	/* put interrupt number in %bl */
62	movq	%rdi, %rbx
63
64	/* allocate space for args on stack */
65	subq	$18, %rsp
66	movq	%rsp, %rdi
67
68	/* copy args from high memory to stack in low memory */
69	cld
70	movl	$18, %ecx
71	rep
72	movsb
73
74	/*
75	 * Save system registers
76	 */
77	sidt	save_idt
78	sgdt	save_gdt
79	str	save_tr
80	movw	%cs, save_cs
81	movw	%ds, save_ds
82	movw	%ss, save_ss
83	movw	%es, save_es
84	movw	%fs, save_fs
85	movw	%gs, save_gs
86	movq	%cr4, %rax
87	movq	%rax, save_cr4
88	movq	%cr3, %rax
89	movq	%rax, save_cr3
90	movq	%cr0, %rax
91	movq	%rax, save_cr0
92
93	/*
94	 * save/clear the extension parts of the fs/gs base registers and cr8
95	 */
96	movl	$MSR_AMD_FSBASE, %ecx
97	rdmsr
98	movl	%eax, save_fsbase
99	movl	%edx, save_fsbase + 4
100	xorl	%eax, %eax
101	xorl	%edx, %edx
102	wrmsr
103
104	movl	$MSR_AMD_GSBASE, %ecx
105	rdmsr
106	movl	%eax, save_gsbase
107	movl	%edx, save_gsbase + 4
108	xorl	%eax, %eax
109	xorl	%edx, %edx
110	wrmsr
111
112	movl	$MSR_AMD_KGSBASE, %ecx
113	rdmsr
114	movl	%eax, save_kgsbase
115	movl	%edx, save_kgsbase + 4
116	xorl	%eax, %eax
117	xorl	%edx, %edx
118	wrmsr
119
120	movq	%cr8, %rax
121	movq	%rax, save_cr8
122
123	/*
124	 * set offsets in 16 bit ljmp instructions below
125	 */
126	leaq	enter_real, %rax
127	movw	%ax, enter_real_ljmp
128
129	leaq	enter_protected, %rax
130	movw	%ax, enter_protected_ljmp
131
132	leaq	gdt_info, %rax
133	movw	%ax, gdt_info_load
134
135	/*
136	 * insert BIOS interrupt number into later instruction
137	 */
138	movb    %bl, int_instr+1
139	jmp     1f
1401:
141
142	/*
143	 * zero out all the registers to make sure they're 16 bit clean
144	 */
145	xorq	%r8, %r8
146	xorq	%r9, %r9
147	xorq	%r10, %r10
148	xorq	%r11, %r11
149	xorq	%r12, %r12
150	xorq	%r13, %r13
151	xorq	%r14, %r14
152	xorq	%r15, %r15
153	xorl	%eax, %eax
154	xorl	%ebx, %ebx
155	xorl	%ecx, %ecx
156	xorl	%edx, %edx
157	xorl	%ebp, %ebp
158	xorl	%esi, %esi
159	xorl	%edi, %edi
160
161	/*
162	 * Load our own GDT/IDT
163	 */
164	lgdt	gdt_info
165	lidt	idt_info
166
167	/*
168	 * Shut down 64 bit mode. First get into compatibility mode.
169	 */
170	movq	%rsp, %rax
171	pushq	$B32DATA_SEL
172	pushq	%rax
173	pushf
174	pushq	$B32CODE_SEL
175	pushq	$1f
176	iretq
1771:
178	.code32
179
180	/*
181	 * disable long mode by:
182	 * - shutting down paging (bit 31 of cr0)
183	 * - flushing the TLB
184	 * - disabling LME (long made enable) in EFER (extended feature reg)
185	 */
186	movl	%cr0, %eax
187	btcl	$31, %eax		/* disable paging */
188	movl	%eax, %cr0
189	ljmp	$B32CODE_SEL, $1f
1901:
191
192	xorl	%eax, %eax
193	movl	%eax, %cr3		/* flushes TLB */
194
195	movl	$MSR_AMD_EFER, %ecx	/* Extended Feature Enable */
196	rdmsr
197	btcl	$8, %eax		/* bit 8 Long Mode Enable bit */
198	wrmsr
199
200	/*
201	 * ok.. now enter 16 bit mode, so we can shut down protected mode
202	 *
203	 * We'll have to act like we're still in a 32 bit section.
204	 * So the code from this point has DATASZ in front of it to get 32 bit
205	 * operands. If DATASZ is missing the operands will be 16 bit.
206	 *
207	 * Now shut down paging and protected (ie. segmentation) modes.
208	 */
209	ljmp	$B16CODE_SEL, $enter_16_bit
210enter_16_bit:
211
212	/*
213	 * Make sure hidden parts of segment registers are 16 bit clean
214	 */
215	DATASZ	movl	$B16DATA_SEL, %eax
216		movw    %ax, %ss
217		movw    %ax, %ds
218		movw    %ax, %es
219		movw    %ax, %fs
220		movw    %ax, %gs
221
222
223	DATASZ	movl	$0x0, %eax	/* put us in real mode */
224	DATASZ	movl	%eax, %cr0
225	.byte	0xea			/* ljmp */
226enter_real_ljmp:
227	.value	0			/* addr (16 bit) */
228	.value	0x0			/* value for %cs */
229enter_real:
230
231	/*
232	 * zero out the remaining segment registers
233	 */
234	DATASZ	xorl	%eax, %eax
235		movw    %ax, %ss
236		movw    %ax, %ds
237		movw    %ax, %es
238		movw    %ax, %fs
239		movw    %ax, %gs
240
241	/*
242	 * load the arguments to the BIOS call from the stack
243	 */
244	popl	%eax	/* really executes a 16 bit pop */
245	popl	%ebx
246	popl	%ecx
247	popl	%edx
248	popl	%esi
249	popl	%edi
250	popl	%ebp
251	pop	%es
252	pop	%ds
253
254	/*
255	 * do the actual BIOS call
256	 */
257	sti
258int_instr:
259	int	$0x10		/* this int number is overwritten */
260	cli			/* ensure interrupts remain disabled */
261
262	/*
263	 * save results of the BIOS call
264	 */
265	pushf
266	push	%ds
267	push	%es
268	pushl	%ebp		/* still executes as 16 bit */
269	pushl	%edi
270	pushl	%esi
271	pushl	%edx
272	pushl	%ecx
273	pushl	%ebx
274	pushl	%eax
275
276	/*
277	 * Restore protected mode and 32 bit execution
278	 */
279	push	$0			/* make sure %ds is zero before lgdt */
280	pop	%ds
281	.byte	0x0f, 0x01, 0x16	/* lgdt */
282gdt_info_load:
283	.value	0	/* temp GDT in currently addressible mem */
284
285	DATASZ	movl	$0x1, %eax
286	DATASZ	movl	%eax, %cr0
287
288	.byte	0xea			/* ljmp */
289enter_protected_ljmp:
290	.value	0			/* addr (still in 16 bit) */
291	.value	B32CODE_SEL		/* %cs value */
292enter_protected:
293
294	/*
295	 * We are now back in a 32 bit code section, fix data/stack segments
296	 */
297	.code32
298	movw	$B32DATA_SEL, %ax
299	movw	%ax, %ds
300	movw	%ax, %ss
301
302	/*
303	 * Re-enable paging. Note we only use 32 bit mov's to restore these
304	 * control registers. That's OK as the upper 32 bits are always zero.
305	 */
306	movl	save_cr4, %eax
307	movl	%eax, %cr4
308	movl	save_cr3, %eax
309	movl	%eax, %cr3
310
311	/*
312	 * re-enable long mode
313	 */
314	movl	$MSR_AMD_EFER, %ecx
315	rdmsr
316	btsl	$8, %eax
317	wrmsr
318
319	movl	save_cr0, %eax
320	movl	%eax, %cr0
321	jmp	enter_paging
322enter_paging:
323
324
325	/*
326	 * transition back to 64 bit mode
327	 */
328	pushl	$B64CODE_SEL
329	pushl	$longmode
330	lret
331longmode:
332	.code64
333	/*
334	 * restore caller frame pointer and segment registers
335	 */
336	lgdt	save_gdt
337	lidt	save_idt
338
339	/*
340	 * Before loading the task register we need to reset the busy bit
341	 * in its corresponding GDT selector. The busy bit is the 2nd bit in
342	 * the 5th byte of the selector.
343	 */
344	movzwq	save_tr, %rax
345	addq	save_gdt+2, %rax
346	btcl	$1, 5(%rax)
347	ltr	save_tr
348	movw	save_ds, %ds
349	movw	save_ss, %ss
350	movw	save_es, %es
351	movw	save_fs, %fs
352	movw	save_gs, %gs
353
354	pushq	save_cs
355	pushq	$.newcs
356	lretq
357.newcs:
358
359	/*
360	 * restore the hidden kernel segment base register values
361	 */
362	movl	save_fsbase, %eax
363	movl	save_fsbase + 4, %edx
364	movl	$MSR_AMD_FSBASE, %ecx
365	wrmsr
366
367	movl	save_gsbase, %eax
368	movl	save_gsbase + 4, %edx
369	movl	$MSR_AMD_GSBASE, %ecx
370	wrmsr
371
372	movl	save_kgsbase, %eax
373	movl	save_kgsbase + 4, %edx
374	movl	$MSR_AMD_KGSBASE, %ecx
375	wrmsr
376
377	movq	save_cr8, %rax
378	cmpq	$0, %rax
379	je	1f
380	movq	%rax, %cr8
3811:
382
383	/*
384	 * copy results to caller's location, then restore remaining registers
385	 */
386	movq    save_rsi, %rdi
387	movq	%rsp, %rsi
388	movq	$18, %rcx
389	rep
390	movsb
391	movw	18(%rsp), %ax
392	andq	$0xffff, %rax
393	movq    save_r12, %r12
394	movq    save_r13, %r13
395	movq    save_r14, %r14
396	movq    save_r15, %r15
397	movq    save_rbx, %rbx
398	movq    save_rbp, %rbp
399	movq    save_rsp, %rsp
400	ret
401
402
403/*
404 * Caller's registers to restore
405 */
406	.align 4
407save_esi:
408	.long	0
409save_edi:
410	.long	0
411save_ebx:
412	.long	0
413save_ebp:
414	.long	0
415save_esp:
416	.long	0
417
418	.align 8
419save_rsi:
420	.quad	0
421save_rbx:
422	.quad	0
423save_rbp:
424	.quad	0
425save_rsp:
426	.quad	0
427save_r12:
428	.quad	0
429save_r13:
430	.quad	0
431save_r14:
432	.quad	0
433save_r15:
434	.quad	0
435save_kgsbase:
436	.quad	0
437save_gsbase:
438	.quad	0
439save_fsbase:
440	.quad	0
441save_cr8:
442	.quad	0
443
444save_idt:
445	.quad	0
446	.quad	0
447
448save_gdt:
449	.quad	0
450	.quad	0
451
452save_cr0:
453	.quad	0
454save_cr3:
455	.quad	0
456save_cr4:
457	.quad	0
458save_cs:
459	.quad	0
460save_ss:
461	.value	0
462save_ds:
463	.value	0
464save_es:
465	.value	0
466save_fs:
467	.value	0
468save_gs:
469	.value	0
470save_tr:
471	.value	0
472
473idt_info:
474	.value 0x3ff
475	.quad 0
476
477
478/*
479 * We need to trampoline thru a gdt we have in low memory.
480 */
481#include "../boot/boot_gdt.s"
482