xref: /linux/arch/arm/include/asm/proc-fns.h (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 /*
2  *  arch/arm/include/asm/proc-fns.h
3  *
4  *  Copyright (C) 1997-1999 Russell King
5  *  Copyright (C) 2000 Deep Blue Solutions Ltd
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #ifndef __ASM_PROCFNS_H
12 #define __ASM_PROCFNS_H
13 
14 #ifdef __KERNEL__
15 
16 #include <asm/glue-proc.h>
17 #include <asm/page.h>
18 
19 #ifndef __ASSEMBLY__
20 
21 struct mm_struct;
22 
23 /*
24  * Don't change this structure - ASM code relies on it.
25  */
26 struct processor {
27 	/* MISC
28 	 * get data abort address/flags
29 	 */
30 	void (*_data_abort)(unsigned long pc);
31 	/*
32 	 * Retrieve prefetch fault address
33 	 */
34 	unsigned long (*_prefetch_abort)(unsigned long lr);
35 	/*
36 	 * Set up any processor specifics
37 	 */
38 	void (*_proc_init)(void);
39 	/*
40 	 * Check for processor bugs
41 	 */
42 	void (*check_bugs)(void);
43 	/*
44 	 * Disable any processor specifics
45 	 */
46 	void (*_proc_fin)(void);
47 	/*
48 	 * Special stuff for a reset
49 	 */
50 	void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
51 	/*
52 	 * Idle the processor
53 	 */
54 	int (*_do_idle)(void);
55 	/*
56 	 * Processor architecture specific
57 	 */
58 	/*
59 	 * clean a virtual address range from the
60 	 * D-cache without flushing the cache.
61 	 */
62 	void (*dcache_clean_area)(void *addr, int size);
63 
64 	/*
65 	 * Set the page table
66 	 */
67 	void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
68 	/*
69 	 * Set a possibly extended PTE.  Non-extended PTEs should
70 	 * ignore 'ext'.
71 	 */
72 #ifdef CONFIG_ARM_LPAE
73 	void (*set_pte_ext)(pte_t *ptep, pte_t pte);
74 #else
75 	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
76 #endif
77 
78 	/* Suspend/resume */
79 	unsigned int suspend_size;
80 	void (*do_suspend)(void *);
81 	void (*do_resume)(void *);
82 };
83 
84 #ifndef MULTI_CPU
85 static inline void init_proc_vtable(const struct processor *p)
86 {
87 }
88 
89 extern void cpu_proc_init(void);
90 extern void cpu_proc_fin(void);
91 extern int cpu_do_idle(void);
92 extern void cpu_dcache_clean_area(void *, int);
93 extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
94 #ifdef CONFIG_ARM_LPAE
95 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
96 #else
97 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
98 #endif
99 extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
100 
101 /* These three are private to arch/arm/kernel/suspend.c */
102 extern void cpu_do_suspend(void *);
103 extern void cpu_do_resume(void *);
104 #else
105 
106 extern struct processor processor;
107 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108 #include <linux/smp.h>
109 /*
110  * This can't be a per-cpu variable because we need to access it before
111  * per-cpu has been initialised.  We have a couple of functions that are
112  * called in a pre-emptible context, and so can't use smp_processor_id()
113  * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
114  * function pointers for these are identical across all CPUs.
115  */
116 extern struct processor *cpu_vtable[];
117 #define PROC_VTABLE(f)			cpu_vtable[smp_processor_id()]->f
118 #define PROC_TABLE(f)			cpu_vtable[0]->f
119 static inline void init_proc_vtable(const struct processor *p)
120 {
121 	unsigned int cpu = smp_processor_id();
122 	*cpu_vtable[cpu] = *p;
123 	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
124 		     cpu_vtable[0]->dcache_clean_area);
125 	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
126 		     cpu_vtable[0]->set_pte_ext);
127 }
128 #else
129 #define PROC_VTABLE(f)			processor.f
130 #define PROC_TABLE(f)			processor.f
131 static inline void init_proc_vtable(const struct processor *p)
132 {
133 	processor = *p;
134 }
135 #endif
136 
137 #define cpu_proc_init			PROC_VTABLE(_proc_init)
138 #define cpu_check_bugs			PROC_VTABLE(check_bugs)
139 #define cpu_proc_fin			PROC_VTABLE(_proc_fin)
140 #define cpu_reset			PROC_VTABLE(reset)
141 #define cpu_do_idle			PROC_VTABLE(_do_idle)
142 #define cpu_dcache_clean_area		PROC_TABLE(dcache_clean_area)
143 #define cpu_set_pte_ext			PROC_TABLE(set_pte_ext)
144 #define cpu_do_switch_mm		PROC_VTABLE(switch_mm)
145 
146 /* These two are private to arch/arm/kernel/suspend.c */
147 #define cpu_do_suspend			PROC_VTABLE(do_suspend)
148 #define cpu_do_resume			PROC_VTABLE(do_resume)
149 #endif
150 
151 extern void cpu_resume(void);
152 
153 #include <asm/memory.h>
154 
155 #ifdef CONFIG_MMU
156 
157 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
158 
159 #ifdef CONFIG_ARM_LPAE
160 
161 #define cpu_get_ttbr(nr)					\
162 	({							\
163 		u64 ttbr;					\
164 		__asm__("mrrc	p15, " #nr ", %Q0, %R0, c2"	\
165 			: "=r" (ttbr));				\
166 		ttbr;						\
167 	})
168 
169 #define cpu_get_pgd()	\
170 	({						\
171 		u64 pg = cpu_get_ttbr(0);		\
172 		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\
173 		(pgd_t *)phys_to_virt(pg);		\
174 	})
175 #else
176 #define cpu_get_pgd()	\
177 	({						\
178 		unsigned long pg;			\
179 		__asm__("mrc	p15, 0, %0, c2, c0, 0"	\
180 			 : "=r" (pg) : : "cc");		\
181 		pg &= ~0x3fff;				\
182 		(pgd_t *)phys_to_virt(pg);		\
183 	})
184 #endif
185 
186 #else	/*!CONFIG_MMU */
187 
188 #define cpu_switch_mm(pgd,mm)	{ }
189 
190 #endif
191 
192 #endif /* __ASSEMBLY__ */
193 #endif /* __KERNEL__ */
194 #endif /* __ASM_PROCFNS_H */
195