xref: /linux/arch/loongarch/include/asm/cmpxchg.h (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef __ASM_CMPXCHG_H
6 #define __ASM_CMPXCHG_H
7 
8 #include <linux/bits.h>
9 #include <linux/build_bug.h>
10 #include <asm/barrier.h>
11 
12 #define __xchg_asm(amswap_db, m, val)		\
13 ({						\
14 		__typeof(val) __ret;		\
15 						\
16 		__asm__ __volatile__ (		\
17 		" "amswap_db" %1, %z2, %0 \n"	\
18 		: "+ZB" (*m), "=&r" (__ret)	\
19 		: "Jr" (val)			\
20 		: "memory");			\
21 						\
22 		__ret;				\
23 })
24 
25 static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
26 					unsigned int size)
27 {
28 	unsigned int shift;
29 	u32 old32, mask, temp;
30 	volatile u32 *ptr32;
31 
32 	/* Mask value to the correct size. */
33 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
34 	val &= mask;
35 
36 	/*
37 	 * Calculate a shift & mask that correspond to the value we wish to
38 	 * exchange within the naturally aligned 4 byte integerthat includes
39 	 * it.
40 	 */
41 	shift = (unsigned long)ptr & 0x3;
42 	shift *= BITS_PER_BYTE;
43 	mask <<= shift;
44 
45 	/*
46 	 * Calculate a pointer to the naturally aligned 4 byte integer that
47 	 * includes our byte of interest, and load its value.
48 	 */
49 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
50 
51 	asm volatile (
52 	"1:	ll.w		%0, %3		\n"
53 	"	andn		%1, %0, %z4	\n"
54 	"	or		%1, %1, %z5	\n"
55 	"	sc.w		%1, %2		\n"
56 	"	beqz		%1, 1b		\n"
57 	: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
58 	: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
59 	: "memory");
60 
61 	return (old32 & mask) >> shift;
62 }
63 
64 static __always_inline unsigned long
65 __arch_xchg(volatile void *ptr, unsigned long x, int size)
66 {
67 	switch (size) {
68 	case 1:
69 	case 2:
70 		return __xchg_small(ptr, x, size);
71 
72 	case 4:
73 		return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
74 
75 	case 8:
76 		return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
77 
78 	default:
79 		BUILD_BUG();
80 	}
81 
82 	return 0;
83 }
84 
85 #define arch_xchg(ptr, x)						\
86 ({									\
87 	__typeof__(*(ptr)) __res;					\
88 									\
89 	__res = (__typeof__(*(ptr)))					\
90 		__arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr)));	\
91 									\
92 	__res;								\
93 })
94 
95 #define __cmpxchg_asm(ld, st, m, old, new)				\
96 ({									\
97 	__typeof(old) __ret;						\
98 									\
99 	__asm__ __volatile__(						\
100 	"1:	" ld "	%0, %2		# __cmpxchg_asm \n"		\
101 	"	bne	%0, %z3, 2f			\n"		\
102 	"	move	$t0, %z4			\n"		\
103 	"	" st "	$t0, %1				\n"		\
104 	"	beqz	$t0, 1b				\n"		\
105 	"2:						\n"		\
106 	__WEAK_LLSC_MB							\
107 	: "=&r" (__ret), "=ZB"(*m)					\
108 	: "ZB"(*m), "Jr" (old), "Jr" (new)				\
109 	: "t0", "memory");						\
110 									\
111 	__ret;								\
112 })
113 
114 static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
115 					   unsigned int new, unsigned int size)
116 {
117 	unsigned int shift;
118 	u32 old32, mask, temp;
119 	volatile u32 *ptr32;
120 
121 	/* Mask inputs to the correct size. */
122 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
123 	old &= mask;
124 	new &= mask;
125 
126 	/*
127 	 * Calculate a shift & mask that correspond to the value we wish to
128 	 * compare & exchange within the naturally aligned 4 byte integer
129 	 * that includes it.
130 	 */
131 	shift = (unsigned long)ptr & 0x3;
132 	shift *= BITS_PER_BYTE;
133 	old <<= shift;
134 	new <<= shift;
135 	mask <<= shift;
136 
137 	/*
138 	 * Calculate a pointer to the naturally aligned 4 byte integer that
139 	 * includes our byte of interest, and load its value.
140 	 */
141 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
142 
143 	asm volatile (
144 	"1:	ll.w		%0, %3		\n"
145 	"	and		%1, %0, %z4	\n"
146 	"	bne		%1, %z5, 2f	\n"
147 	"	andn		%1, %0, %z4	\n"
148 	"	or		%1, %1, %z6	\n"
149 	"	sc.w		%1, %2		\n"
150 	"	beqz		%1, 1b		\n"
151 	"	b		3f		\n"
152 	"2:					\n"
153 	__WEAK_LLSC_MB
154 	"3:					\n"
155 	: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
156 	: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
157 	: "memory");
158 
159 	return (old32 & mask) >> shift;
160 }
161 
162 static __always_inline unsigned long
163 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
164 {
165 	switch (size) {
166 	case 1:
167 	case 2:
168 		return __cmpxchg_small(ptr, old, new, size);
169 
170 	case 4:
171 		return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
172 				     (u32)old, new);
173 
174 	case 8:
175 		return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
176 				     (u64)old, new);
177 
178 	default:
179 		BUILD_BUG();
180 	}
181 
182 	return 0;
183 }
184 
185 #define arch_cmpxchg_local(ptr, old, new)				\
186 	((__typeof__(*(ptr)))						\
187 		__cmpxchg((ptr),					\
188 			  (unsigned long)(__typeof__(*(ptr)))(old),	\
189 			  (unsigned long)(__typeof__(*(ptr)))(new),	\
190 			  sizeof(*(ptr))))
191 
192 #define arch_cmpxchg(ptr, old, new)					\
193 ({									\
194 	__typeof__(*(ptr)) __res;					\
195 									\
196 	__res = arch_cmpxchg_local((ptr), (old), (new));		\
197 									\
198 	__res;								\
199 })
200 
201 #ifdef CONFIG_64BIT
202 #define arch_cmpxchg64_local(ptr, o, n)					\
203   ({									\
204 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
205 	arch_cmpxchg_local((ptr), (o), (n));				\
206   })
207 
208 #define arch_cmpxchg64(ptr, o, n)					\
209   ({									\
210 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
211 	arch_cmpxchg((ptr), (o), (n));					\
212   })
213 #else
214 #include <asm-generic/cmpxchg-local.h>
215 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
216 #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
217 #endif
218 
219 #endif /* __ASM_CMPXCHG_H */
220