xref: /linux/arch/powerpc/include/asm/hw_irq.h (revision 6ed7ffddcf61f668114edb676417e5fb33773b59)
1 /*
2  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3  */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13 
14 #ifdef CONFIG_PPC64
15 
16 /*
17  * PACA flags in paca->irq_happened.
18  *
19  * This bits are set when interrupts occur while soft-disabled
20  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21  * is set whenever we manually hard disable.
22  */
23 #define PACA_IRQ_HARD_DIS	0x01
24 #define PACA_IRQ_DBELL		0x02
25 #define PACA_IRQ_EE		0x04
26 #define PACA_IRQ_DEC		0x08 /* Or FIT */
27 #define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
28 
29 #endif /* CONFIG_PPC64 */
30 
31 #ifndef __ASSEMBLY__
32 
33 extern void __replay_interrupt(unsigned int vector);
34 
35 extern void timer_interrupt(struct pt_regs *);
36 extern void performance_monitor_exception(struct pt_regs *regs);
37 extern void WatchdogException(struct pt_regs *regs);
38 extern void unknown_exception(struct pt_regs *regs);
39 
40 #ifdef CONFIG_PPC64
41 #include <asm/paca.h>
42 
43 static inline unsigned long arch_local_save_flags(void)
44 {
45 	unsigned long flags;
46 
47 	asm volatile(
48 		"lbz %0,%1(13)"
49 		: "=r" (flags)
50 		: "i" (offsetof(struct paca_struct, soft_enabled)));
51 
52 	return flags;
53 }
54 
55 static inline unsigned long arch_local_irq_disable(void)
56 {
57 	unsigned long flags, zero;
58 
59 	asm volatile(
60 		"li %1,0; lbz %0,%2(13); stb %1,%2(13)"
61 		: "=r" (flags), "=&r" (zero)
62 		: "i" (offsetof(struct paca_struct, soft_enabled))
63 		: "memory");
64 
65 	return flags;
66 }
67 
68 extern void arch_local_irq_restore(unsigned long);
69 
70 static inline void arch_local_irq_enable(void)
71 {
72 	arch_local_irq_restore(1);
73 }
74 
75 static inline unsigned long arch_local_irq_save(void)
76 {
77 	return arch_local_irq_disable();
78 }
79 
80 static inline bool arch_irqs_disabled_flags(unsigned long flags)
81 {
82 	return flags == 0;
83 }
84 
85 static inline bool arch_irqs_disabled(void)
86 {
87 	return arch_irqs_disabled_flags(arch_local_save_flags());
88 }
89 
90 #ifdef CONFIG_PPC_BOOK3E
91 #define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory")
92 #define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory")
93 #else
94 #define __hard_irq_enable()	__mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
95 #define __hard_irq_disable()	__mtmsrd(local_paca->kernel_msr, 1)
96 #endif
97 
98 static inline void hard_irq_disable(void)
99 {
100 	__hard_irq_disable();
101 	get_paca()->soft_enabled = 0;
102 	get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
103 }
104 
105 /* include/linux/interrupt.h needs hard_irq_disable to be a macro */
106 #define hard_irq_disable	hard_irq_disable
107 
108 static inline bool lazy_irq_pending(void)
109 {
110 	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
111 }
112 
113 /*
114  * This is called by asynchronous interrupts to conditionally
115  * re-enable hard interrupts when soft-disabled after having
116  * cleared the source of the interrupt
117  */
118 static inline void may_hard_irq_enable(void)
119 {
120 	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
121 	if (!(get_paca()->irq_happened & PACA_IRQ_EE))
122 		__hard_irq_enable();
123 }
124 
125 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
126 {
127 	return !regs->softe;
128 }
129 
130 extern bool prep_irq_for_idle(void);
131 
132 #else /* CONFIG_PPC64 */
133 
134 #define SET_MSR_EE(x)	mtmsr(x)
135 
136 static inline unsigned long arch_local_save_flags(void)
137 {
138 	return mfmsr();
139 }
140 
141 static inline void arch_local_irq_restore(unsigned long flags)
142 {
143 #if defined(CONFIG_BOOKE)
144 	asm volatile("wrtee %0" : : "r" (flags) : "memory");
145 #else
146 	mtmsr(flags);
147 #endif
148 }
149 
150 static inline unsigned long arch_local_irq_save(void)
151 {
152 	unsigned long flags = arch_local_save_flags();
153 #ifdef CONFIG_BOOKE
154 	asm volatile("wrteei 0" : : : "memory");
155 #else
156 	SET_MSR_EE(flags & ~MSR_EE);
157 #endif
158 	return flags;
159 }
160 
161 static inline void arch_local_irq_disable(void)
162 {
163 #ifdef CONFIG_BOOKE
164 	asm volatile("wrteei 0" : : : "memory");
165 #else
166 	arch_local_irq_save();
167 #endif
168 }
169 
170 static inline void arch_local_irq_enable(void)
171 {
172 #ifdef CONFIG_BOOKE
173 	asm volatile("wrteei 1" : : : "memory");
174 #else
175 	unsigned long msr = mfmsr();
176 	SET_MSR_EE(msr | MSR_EE);
177 #endif
178 }
179 
180 static inline bool arch_irqs_disabled_flags(unsigned long flags)
181 {
182 	return (flags & MSR_EE) == 0;
183 }
184 
185 static inline bool arch_irqs_disabled(void)
186 {
187 	return arch_irqs_disabled_flags(arch_local_save_flags());
188 }
189 
190 #define hard_irq_disable()		arch_local_irq_disable()
191 
192 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
193 {
194 	return !(regs->msr & MSR_EE);
195 }
196 
197 static inline void may_hard_irq_enable(void) { }
198 
199 #endif /* CONFIG_PPC64 */
200 
201 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
202 
203 /*
204  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
205  * or should we not care like we do now ? --BenH.
206  */
207 struct irq_chip;
208 
209 #endif  /* __ASSEMBLY__ */
210 #endif	/* __KERNEL__ */
211 #endif	/* _ASM_POWERPC_HW_IRQ_H */
212