xref: /linux/arch/parisc/include/asm/mmu_context.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2deae26bfSKyle McMartin #ifndef __PARISC_MMU_CONTEXT_H
3deae26bfSKyle McMartin #define __PARISC_MMU_CONTEXT_H
4deae26bfSKyle McMartin 
5deae26bfSKyle McMartin #include <linux/mm.h>
6deae26bfSKyle McMartin #include <linux/sched.h>
760063497SArun Sharma #include <linux/atomic.h>
8b7795074SHelge Deller #include <linux/spinlock.h>
9deae26bfSKyle McMartin #include <asm-generic/mm_hooks.h>
10deae26bfSKyle McMartin 
11deae26bfSKyle McMartin /* on PA-RISC, we actually have enough contexts to justify an allocator
12deae26bfSKyle McMartin  * for them.  prumpf */
13deae26bfSKyle McMartin 
14deae26bfSKyle McMartin extern unsigned long alloc_sid(void);
15deae26bfSKyle McMartin extern void free_sid(unsigned long);
16deae26bfSKyle McMartin 
174146bdabSNicholas Piggin #define init_new_context init_new_context
18deae26bfSKyle McMartin static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)19deae26bfSKyle McMartin init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20deae26bfSKyle McMartin {
21deae26bfSKyle McMartin 	BUG_ON(atomic_read(&mm->mm_users) != 1);
22deae26bfSKyle McMartin 
23df24e178SHelge Deller 	mm->context.space_id = alloc_sid();
24deae26bfSKyle McMartin 	return 0;
25deae26bfSKyle McMartin }
26deae26bfSKyle McMartin 
274146bdabSNicholas Piggin #define destroy_context destroy_context
28deae26bfSKyle McMartin static inline void
destroy_context(struct mm_struct * mm)29deae26bfSKyle McMartin destroy_context(struct mm_struct *mm)
30deae26bfSKyle McMartin {
31df24e178SHelge Deller 	free_sid(mm->context.space_id);
32df24e178SHelge Deller 	mm->context.space_id = 0;
33deae26bfSKyle McMartin }
34deae26bfSKyle McMartin 
__space_to_prot(mm_context_t context)3570da2d96SKyle McMartin static inline unsigned long __space_to_prot(mm_context_t context)
36a60715f5SKyle McMartin {
37a60715f5SKyle McMartin #if SPACEID_SHIFT == 0
38df24e178SHelge Deller 	return context.space_id << 1;
39a60715f5SKyle McMartin #else
40df24e178SHelge Deller 	return context.space_id >> (SPACEID_SHIFT - 1);
41a60715f5SKyle McMartin #endif
42a60715f5SKyle McMartin }
43a60715f5SKyle McMartin 
load_context(mm_context_t context)44deae26bfSKyle McMartin static inline void load_context(mm_context_t context)
45deae26bfSKyle McMartin {
46*360bd6c6SHelge Deller 	mtsp(context.space_id, SR_USER);
47a60715f5SKyle McMartin 	mtctl(__space_to_prot(context), 8);
48deae26bfSKyle McMartin }
49deae26bfSKyle McMartin 
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)50649aa242SHelge Deller static inline void switch_mm_irqs_off(struct mm_struct *prev,
51649aa242SHelge Deller 		struct mm_struct *next, struct task_struct *tsk)
52deae26bfSKyle McMartin {
53deae26bfSKyle McMartin 	if (prev != next) {
54b7795074SHelge Deller #ifdef CONFIG_TLB_PTLOCK
55b7795074SHelge Deller 		/* put physical address of page_table_lock in cr28 (tr4)
56b7795074SHelge Deller 		   for TLB faults */
57b7795074SHelge Deller 		spinlock_t *pgd_lock = &next->page_table_lock;
58b7795074SHelge Deller 		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
59b7795074SHelge Deller #endif
60deae26bfSKyle McMartin 		mtctl(__pa(next->pgd), 25);
61deae26bfSKyle McMartin 		load_context(next->context);
62deae26bfSKyle McMartin 	}
63deae26bfSKyle McMartin }
64deae26bfSKyle McMartin 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)65649aa242SHelge Deller static inline void switch_mm(struct mm_struct *prev,
66649aa242SHelge Deller 		struct mm_struct *next, struct task_struct *tsk)
67649aa242SHelge Deller {
68649aa242SHelge Deller 	unsigned long flags;
69649aa242SHelge Deller 
70d2883fa1SJohn David Anglin 	if (prev == next)
71d2883fa1SJohn David Anglin 		return;
72d2883fa1SJohn David Anglin 
73649aa242SHelge Deller 	local_irq_save(flags);
74649aa242SHelge Deller 	switch_mm_irqs_off(prev, next, tsk);
75649aa242SHelge Deller 	local_irq_restore(flags);
76649aa242SHelge Deller }
77649aa242SHelge Deller #define switch_mm_irqs_off switch_mm_irqs_off
78649aa242SHelge Deller 
794146bdabSNicholas Piggin #define activate_mm activate_mm
activate_mm(struct mm_struct * prev,struct mm_struct * next)80deae26bfSKyle McMartin static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
81deae26bfSKyle McMartin {
82deae26bfSKyle McMartin 	/*
83deae26bfSKyle McMartin 	 * Activate_mm is our one chance to allocate a space id
84deae26bfSKyle McMartin 	 * for a new mm created in the exec path. There's also
85deae26bfSKyle McMartin 	 * some lazy tlb stuff, which is currently dead code, but
86deae26bfSKyle McMartin 	 * we only allocate a space id if one hasn't been allocated
87deae26bfSKyle McMartin 	 * already, so we should be OK.
88deae26bfSKyle McMartin 	 */
89deae26bfSKyle McMartin 
90deae26bfSKyle McMartin 	BUG_ON(next == &init_mm); /* Should never happen */
91deae26bfSKyle McMartin 
92df24e178SHelge Deller 	if (next->context.space_id == 0)
93df24e178SHelge Deller 		next->context.space_id = alloc_sid();
94deae26bfSKyle McMartin 
95deae26bfSKyle McMartin 	switch_mm(prev,next,current);
96deae26bfSKyle McMartin }
974146bdabSNicholas Piggin 
984146bdabSNicholas Piggin #include <asm-generic/mmu_context.h>
994146bdabSNicholas Piggin 
100deae26bfSKyle McMartin #endif
101