xref: /linux/arch/powerpc/kernel/vmlinux.lds.S (revision ac84bac4062e7fc24f5e2c61c6a414b2a00a29ad)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x)	PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x)	PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN	0
11
12#include <asm/page.h>
13#include <asm-generic/vmlinux.lds.h>
14#include <asm/cache.h>
15#include <asm/thread_info.h>
16
17#define STRICT_ALIGN_SIZE	(1 << CONFIG_DATA_SHIFT)
18#define ETEXT_ALIGN_SIZE	(1 << CONFIG_ETEXT_SHIFT)
19
20ENTRY(_stext)
21
22PHDRS {
23	text PT_LOAD FLAGS(7); /* RWX */
24	note PT_NOTE FLAGS(0);
25}
26
27#ifdef CONFIG_PPC64
28OUTPUT_ARCH(powerpc:common64)
29jiffies = jiffies_64;
30#else
31OUTPUT_ARCH(powerpc:common)
32jiffies = jiffies_64 + 4;
33#endif
34SECTIONS
35{
36	. = KERNELBASE;
37
38/*
39 * Text, read only data and other permanent read-only sections
40 */
41
42	_text = .;
43	_stext = .;
44
45	/*
46	 * Head text.
47	 * This needs to be in its own output section to avoid ld placing
48	 * branch trampoline stubs randomly throughout the fixed sections,
49	 * which it will do (even if the branch comes from another section)
50	 * in order to optimize stub generation.
51	 */
52	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
53#ifdef CONFIG_PPC64
54		KEEP(*(.head.text.first_256B));
55#ifdef CONFIG_PPC_BOOK3E
56#else
57		KEEP(*(.head.text.real_vectors));
58		*(.head.text.real_trampolines);
59		KEEP(*(.head.text.virt_vectors));
60		*(.head.text.virt_trampolines);
61# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
62		KEEP(*(.head.data.fwnmi_page));
63# endif
64#endif
65#else /* !CONFIG_PPC64 */
66		HEAD_TEXT
67#endif
68	} :text
69
70	__head_end = .;
71
72#ifdef CONFIG_PPC64
73	/*
74	 * ALIGN(0) overrides the default output section alignment because
75	 * this needs to start right after .head.text in order for fixed
76	 * section placement to work.
77	 */
78	.text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
79#ifdef CONFIG_LD_HEAD_STUB_CATCH
80		KEEP(*(.linker_stub_catch));
81		. = . ;
82#endif
83
84#else
85	.text : AT(ADDR(.text) - LOAD_OFFSET) {
86		ALIGN_FUNCTION();
87#endif
88		/* careful! __ftr_alt_* sections need to be close to .text */
89		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
90#ifdef CONFIG_PPC64
91		*(.tramp.ftrace.text);
92#endif
93		SCHED_TEXT
94		CPUIDLE_TEXT
95		LOCK_TEXT
96		KPROBES_TEXT
97		IRQENTRY_TEXT
98		SOFTIRQENTRY_TEXT
99		/*
100		 * -Os builds call FP save/restore functions. The powerpc64
101		 * linker generates those on demand in the .sfpr section.
102		 * .sfpr gets placed at the beginning of a group of input
103		 * sections, which can break start-of-text offset if it is
104		 * included with the main text sections, so put it by itself.
105		 */
106		*(.sfpr);
107		MEM_KEEP(init.text)
108		MEM_KEEP(exit.text)
109
110#ifdef CONFIG_PPC32
111		*(.got1)
112		__got2_start = .;
113		*(.got2)
114		__got2_end = .;
115#endif /* CONFIG_PPC32 */
116
117	} :text
118
119	. = ALIGN(ETEXT_ALIGN_SIZE);
120	_etext = .;
121	PROVIDE32 (etext = .);
122
123	/* Read-only data */
124	RO_DATA(PAGE_SIZE)
125
126#ifdef CONFIG_PPC64
127	. = ALIGN(8);
128	__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
129		__start___stf_entry_barrier_fixup = .;
130		*(__stf_entry_barrier_fixup)
131		__stop___stf_entry_barrier_fixup = .;
132	}
133
134	. = ALIGN(8);
135	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
136		__start___stf_exit_barrier_fixup = .;
137		*(__stf_exit_barrier_fixup)
138		__stop___stf_exit_barrier_fixup = .;
139	}
140
141	. = ALIGN(8);
142	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
143		__start___rfi_flush_fixup = .;
144		*(__rfi_flush_fixup)
145		__stop___rfi_flush_fixup = .;
146	}
147#endif /* CONFIG_PPC64 */
148
149#ifdef CONFIG_PPC_BARRIER_NOSPEC
150	. = ALIGN(8);
151	__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
152		__start___barrier_nospec_fixup = .;
153		*(__barrier_nospec_fixup)
154		__stop___barrier_nospec_fixup = .;
155	}
156#endif /* CONFIG_PPC_BARRIER_NOSPEC */
157
158#ifdef CONFIG_PPC_FSL_BOOK3E
159	. = ALIGN(8);
160	__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
161		__start__btb_flush_fixup = .;
162		*(__btb_flush_fixup)
163		__stop__btb_flush_fixup = .;
164	}
165#endif
166
167/*
168 * Init sections discarded at runtime
169 */
170	. = ALIGN(STRICT_ALIGN_SIZE);
171	__init_begin = .;
172	. = ALIGN(PAGE_SIZE);
173	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
174		_sinittext = .;
175		INIT_TEXT
176		_einittext = .;
177#ifdef CONFIG_PPC64
178		*(.tramp.ftrace.init);
179#endif
180	} :text
181
182	/* .exit.text is discarded at runtime, not link time,
183	 * to deal with references from __bug_table
184	 */
185	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
186		EXIT_TEXT
187	}
188
189	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
190		INIT_DATA
191	}
192
193	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
194		INIT_SETUP(16)
195	}
196
197	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
198		INIT_CALLS
199	}
200
201	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
202		CON_INITCALL
203	}
204
205	. = ALIGN(8);
206	__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
207		__start___ftr_fixup = .;
208		KEEP(*(__ftr_fixup))
209		__stop___ftr_fixup = .;
210	}
211	. = ALIGN(8);
212	__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
213		__start___mmu_ftr_fixup = .;
214		KEEP(*(__mmu_ftr_fixup))
215		__stop___mmu_ftr_fixup = .;
216	}
217	. = ALIGN(8);
218	__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
219		__start___lwsync_fixup = .;
220		KEEP(*(__lwsync_fixup))
221		__stop___lwsync_fixup = .;
222	}
223#ifdef CONFIG_PPC64
224	. = ALIGN(8);
225	__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
226		__start___fw_ftr_fixup = .;
227		KEEP(*(__fw_ftr_fixup))
228		__stop___fw_ftr_fixup = .;
229	}
230#endif
231	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
232		INIT_RAM_FS
233	}
234
235	PERCPU_SECTION(L1_CACHE_BYTES)
236
237	. = ALIGN(8);
238	.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
239		__machine_desc_start = . ;
240		KEEP(*(.machine.desc))
241		__machine_desc_end = . ;
242	}
243#ifdef CONFIG_RELOCATABLE
244	. = ALIGN(8);
245	.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
246	{
247#ifdef CONFIG_PPC32
248		__dynamic_symtab = .;
249#endif
250		*(.dynsym)
251	}
252	.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
253	.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
254	{
255		__dynamic_start = .;
256		*(.dynamic)
257	}
258	.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
259	.gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
260	.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
261	.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
262	{
263		__rela_dyn_start = .;
264		*(.rela*)
265	}
266#endif
267	/* .exit.data is discarded at runtime, not link time,
268	 * to deal with references from .exit.text
269	 */
270	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
271		EXIT_DATA
272	}
273
274	/* freed after init ends here */
275	. = ALIGN(PAGE_SIZE);
276	__init_end = .;
277
278/*
279 * And now the various read/write data
280 */
281
282	. = ALIGN(PAGE_SIZE);
283	_sdata = .;
284
285#ifdef CONFIG_PPC32
286	.data : AT(ADDR(.data) - LOAD_OFFSET) {
287		DATA_DATA
288#ifdef CONFIG_UBSAN
289		*(.data..Lubsan_data*)
290		*(.data..Lubsan_type*)
291#endif
292		*(.data.rel*)
293		*(SDATA_MAIN)
294		*(.sdata2)
295		*(.got.plt) *(.got)
296		*(.plt)
297		*(.branch_lt)
298	}
299#else
300	.data : AT(ADDR(.data) - LOAD_OFFSET) {
301		DATA_DATA
302		*(.data.rel*)
303		*(.toc1)
304		*(.branch_lt)
305	}
306
307	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
308		__start_opd = .;
309		KEEP(*(.opd))
310		__end_opd = .;
311	}
312
313	. = ALIGN(256);
314	.got : AT(ADDR(.got) - LOAD_OFFSET) {
315		__toc_start = .;
316#ifndef CONFIG_RELOCATABLE
317		__prom_init_toc_start = .;
318		arch/powerpc/kernel/prom_init.o*(.toc .got)
319		__prom_init_toc_end = .;
320#endif
321		*(.got)
322		*(.toc)
323	}
324#endif
325
326	/* The initial task and kernel stack */
327	INIT_TASK_DATA_SECTION(THREAD_ALIGN)
328
329	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
330		PAGE_ALIGNED_DATA(PAGE_SIZE)
331	}
332
333	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
334		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
335	}
336
337	.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
338		READ_MOSTLY_DATA(L1_CACHE_BYTES)
339	}
340
341	. = ALIGN(PAGE_SIZE);
342	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
343		NOSAVE_DATA
344	}
345
346	BUG_TABLE
347
348	. = ALIGN(PAGE_SIZE);
349	_edata  =  .;
350	PROVIDE32 (edata = .);
351
352/*
353 * And finally the bss
354 */
355
356	BSS_SECTION(0, 0, 0)
357
358	. = ALIGN(PAGE_SIZE);
359	_end = . ;
360	PROVIDE32 (end = .);
361
362	STABS_DEBUG
363
364	DWARF_DEBUG
365
366	DISCARDS
367	/DISCARD/ : {
368		*(*.EMB.apuinfo)
369		*(.glink .iplt .plt .rela* .comment)
370		*(.gnu.version*)
371		*(.gnu.attributes)
372		*(.eh_frame)
373	}
374}
375