xref: /linux/arch/s390/lib/uaccess.c (revision 06ed6aa56ffac9241e03a24649e8d048f8f1b10c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Standard user space access functions based on mvcp/mvcs and doing
4  *  interesting things in the secondary space mode.
5  *
6  *    Copyright IBM Corp. 2006,2014
7  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8  *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10 
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
18 
19 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
20 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
21 
22 static int __init uaccess_init(void)
23 {
24 	if (test_facility(27))
25 		static_branch_enable(&have_mvcos);
26 	return 0;
27 }
28 early_initcall(uaccess_init);
29 
30 static inline int copy_with_mvcos(void)
31 {
32 	if (static_branch_likely(&have_mvcos))
33 		return 1;
34 	return 0;
35 }
36 #else
37 static inline int copy_with_mvcos(void)
38 {
39 	return 1;
40 }
41 #endif
42 
43 void set_fs(mm_segment_t fs)
44 {
45 	current->thread.mm_segment = fs;
46 	if (fs == USER_DS) {
47 		__ctl_load(S390_lowcore.user_asce, 1, 1);
48 		clear_cpu_flag(CIF_ASCE_PRIMARY);
49 	} else {
50 		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
51 		set_cpu_flag(CIF_ASCE_PRIMARY);
52 	}
53 	if (fs & 1) {
54 		if (fs == USER_DS_SACF)
55 			__ctl_load(S390_lowcore.user_asce, 7, 7);
56 		else
57 			__ctl_load(S390_lowcore.kernel_asce, 7, 7);
58 		set_cpu_flag(CIF_ASCE_SECONDARY);
59 	}
60 }
61 EXPORT_SYMBOL(set_fs);
62 
63 mm_segment_t enable_sacf_uaccess(void)
64 {
65 	mm_segment_t old_fs;
66 	unsigned long asce, cr;
67 	unsigned long flags;
68 
69 	old_fs = current->thread.mm_segment;
70 	if (old_fs & 1)
71 		return old_fs;
72 	/* protect against a concurrent page table upgrade */
73 	local_irq_save(flags);
74 	current->thread.mm_segment |= 1;
75 	asce = S390_lowcore.kernel_asce;
76 	if (likely(old_fs == USER_DS)) {
77 		__ctl_store(cr, 1, 1);
78 		if (cr != S390_lowcore.kernel_asce) {
79 			__ctl_load(S390_lowcore.kernel_asce, 1, 1);
80 			set_cpu_flag(CIF_ASCE_PRIMARY);
81 		}
82 		asce = S390_lowcore.user_asce;
83 	}
84 	__ctl_store(cr, 7, 7);
85 	if (cr != asce) {
86 		__ctl_load(asce, 7, 7);
87 		set_cpu_flag(CIF_ASCE_SECONDARY);
88 	}
89 	local_irq_restore(flags);
90 	return old_fs;
91 }
92 EXPORT_SYMBOL(enable_sacf_uaccess);
93 
94 void disable_sacf_uaccess(mm_segment_t old_fs)
95 {
96 	current->thread.mm_segment = old_fs;
97 	if (old_fs == USER_DS && test_facility(27)) {
98 		__ctl_load(S390_lowcore.user_asce, 1, 1);
99 		clear_cpu_flag(CIF_ASCE_PRIMARY);
100 	}
101 }
102 EXPORT_SYMBOL(disable_sacf_uaccess);
103 
104 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
105 						 unsigned long size)
106 {
107 	register unsigned long reg0 asm("0") = 0x01UL;
108 	unsigned long tmp1, tmp2;
109 
110 	tmp1 = -4096UL;
111 	asm volatile(
112 		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
113 		"6: jz    4f\n"
114 		"1: algr  %0,%3\n"
115 		"   slgr  %1,%3\n"
116 		"   slgr  %2,%3\n"
117 		"   j     0b\n"
118 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
119 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
120 		"   slgr  %4,%1\n"
121 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
122 		"   jnh   5f\n"
123 		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
124 		"7: slgr  %0,%4\n"
125 		"   j     5f\n"
126 		"4: slgr  %0,%0\n"
127 		"5:\n"
128 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
129 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
130 		: "d" (reg0) : "cc", "memory");
131 	return size;
132 }
133 
134 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
135 						unsigned long size)
136 {
137 	unsigned long tmp1, tmp2;
138 	mm_segment_t old_fs;
139 
140 	old_fs = enable_sacf_uaccess();
141 	tmp1 = -256UL;
142 	asm volatile(
143 		"   sacf  0\n"
144 		"0: mvcp  0(%0,%2),0(%1),%3\n"
145 		"7: jz    5f\n"
146 		"1: algr  %0,%3\n"
147 		"   la    %1,256(%1)\n"
148 		"   la    %2,256(%2)\n"
149 		"2: mvcp  0(%0,%2),0(%1),%3\n"
150 		"8: jnz   1b\n"
151 		"   j     5f\n"
152 		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
153 		"   lghi  %3,-4096\n"
154 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
155 		"   slgr  %4,%1\n"
156 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
157 		"   jnh   6f\n"
158 		"4: mvcp  0(%4,%2),0(%1),%3\n"
159 		"9: slgr  %0,%4\n"
160 		"   j     6f\n"
161 		"5: slgr  %0,%0\n"
162 		"6: sacf  768\n"
163 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
164 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
165 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
166 		: : "cc", "memory");
167 	disable_sacf_uaccess(old_fs);
168 	return size;
169 }
170 
171 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
172 {
173 	if (copy_with_mvcos())
174 		return copy_from_user_mvcos(to, from, n);
175 	return copy_from_user_mvcp(to, from, n);
176 }
177 EXPORT_SYMBOL(raw_copy_from_user);
178 
179 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
180 					       unsigned long size)
181 {
182 	register unsigned long reg0 asm("0") = 0x010000UL;
183 	unsigned long tmp1, tmp2;
184 
185 	tmp1 = -4096UL;
186 	asm volatile(
187 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
188 		"6: jz    4f\n"
189 		"1: algr  %0,%3\n"
190 		"   slgr  %1,%3\n"
191 		"   slgr  %2,%3\n"
192 		"   j     0b\n"
193 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
194 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
195 		"   slgr  %4,%1\n"
196 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
197 		"   jnh   5f\n"
198 		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
199 		"7: slgr  %0,%4\n"
200 		"   j     5f\n"
201 		"4: slgr  %0,%0\n"
202 		"5:\n"
203 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
204 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
205 		: "d" (reg0) : "cc", "memory");
206 	return size;
207 }
208 
209 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
210 					      unsigned long size)
211 {
212 	unsigned long tmp1, tmp2;
213 	mm_segment_t old_fs;
214 
215 	old_fs = enable_sacf_uaccess();
216 	tmp1 = -256UL;
217 	asm volatile(
218 		"   sacf  0\n"
219 		"0: mvcs  0(%0,%1),0(%2),%3\n"
220 		"7: jz    5f\n"
221 		"1: algr  %0,%3\n"
222 		"   la    %1,256(%1)\n"
223 		"   la    %2,256(%2)\n"
224 		"2: mvcs  0(%0,%1),0(%2),%3\n"
225 		"8: jnz   1b\n"
226 		"   j     5f\n"
227 		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
228 		"   lghi  %3,-4096\n"
229 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
230 		"   slgr  %4,%1\n"
231 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
232 		"   jnh   6f\n"
233 		"4: mvcs  0(%4,%1),0(%2),%3\n"
234 		"9: slgr  %0,%4\n"
235 		"   j     6f\n"
236 		"5: slgr  %0,%0\n"
237 		"6: sacf  768\n"
238 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
239 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
240 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
241 		: : "cc", "memory");
242 	disable_sacf_uaccess(old_fs);
243 	return size;
244 }
245 
246 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
247 {
248 	if (copy_with_mvcos())
249 		return copy_to_user_mvcos(to, from, n);
250 	return copy_to_user_mvcs(to, from, n);
251 }
252 EXPORT_SYMBOL(raw_copy_to_user);
253 
254 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
255 					       unsigned long size)
256 {
257 	register unsigned long reg0 asm("0") = 0x010001UL;
258 	unsigned long tmp1, tmp2;
259 
260 	tmp1 = -4096UL;
261 	/* FIXME: copy with reduced length. */
262 	asm volatile(
263 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
264 		"   jz	  2f\n"
265 		"1: algr  %0,%3\n"
266 		"   slgr  %1,%3\n"
267 		"   slgr  %2,%3\n"
268 		"   j	  0b\n"
269 		"2:slgr  %0,%0\n"
270 		"3: \n"
271 		EX_TABLE(0b,3b)
272 		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
273 		: "d" (reg0) : "cc", "memory");
274 	return size;
275 }
276 
277 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
278 					     unsigned long size)
279 {
280 	mm_segment_t old_fs;
281 	unsigned long tmp1;
282 
283 	old_fs = enable_sacf_uaccess();
284 	asm volatile(
285 		"   sacf  256\n"
286 		"   aghi  %0,-1\n"
287 		"   jo	  5f\n"
288 		"   bras  %3,3f\n"
289 		"0: aghi  %0,257\n"
290 		"1: mvc	  0(1,%1),0(%2)\n"
291 		"   la	  %1,1(%1)\n"
292 		"   la	  %2,1(%2)\n"
293 		"   aghi  %0,-1\n"
294 		"   jnz	  1b\n"
295 		"   j	  5f\n"
296 		"2: mvc	  0(256,%1),0(%2)\n"
297 		"   la	  %1,256(%1)\n"
298 		"   la	  %2,256(%2)\n"
299 		"3: aghi  %0,-256\n"
300 		"   jnm	  2b\n"
301 		"4: ex	  %0,1b-0b(%3)\n"
302 		"5: slgr  %0,%0\n"
303 		"6: sacf  768\n"
304 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
305 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
306 		: : "cc", "memory");
307 	disable_sacf_uaccess(old_fs);
308 	return size;
309 }
310 
311 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
312 {
313 	if (copy_with_mvcos())
314 		return copy_in_user_mvcos(to, from, n);
315 	return copy_in_user_mvc(to, from, n);
316 }
317 EXPORT_SYMBOL(raw_copy_in_user);
318 
319 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
320 {
321 	register unsigned long reg0 asm("0") = 0x010000UL;
322 	unsigned long tmp1, tmp2;
323 
324 	tmp1 = -4096UL;
325 	asm volatile(
326 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
327 		"   jz	  4f\n"
328 		"1: algr  %0,%2\n"
329 		"   slgr  %1,%2\n"
330 		"   j	  0b\n"
331 		"2: la	  %3,4095(%1)\n"/* %4 = to + 4095 */
332 		"   nr	  %3,%2\n"	/* %4 = (to + 4095) & -4096 */
333 		"   slgr  %3,%1\n"
334 		"   clgr  %0,%3\n"	/* copy crosses next page boundary? */
335 		"   jnh	  5f\n"
336 		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
337 		"   slgr  %0,%3\n"
338 		"   j	  5f\n"
339 		"4: slgr  %0,%0\n"
340 		"5:\n"
341 		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
342 		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
343 		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
344 	return size;
345 }
346 
347 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
348 {
349 	mm_segment_t old_fs;
350 	unsigned long tmp1, tmp2;
351 
352 	old_fs = enable_sacf_uaccess();
353 	asm volatile(
354 		"   sacf  256\n"
355 		"   aghi  %0,-1\n"
356 		"   jo    5f\n"
357 		"   bras  %3,3f\n"
358 		"   xc    0(1,%1),0(%1)\n"
359 		"0: aghi  %0,257\n"
360 		"   la    %2,255(%1)\n" /* %2 = ptr + 255 */
361 		"   srl   %2,12\n"
362 		"   sll   %2,12\n"	/* %2 = (ptr + 255) & -4096 */
363 		"   slgr  %2,%1\n"
364 		"   clgr  %0,%2\n"	/* clear crosses next page boundary? */
365 		"   jnh   5f\n"
366 		"   aghi  %2,-1\n"
367 		"1: ex    %2,0(%3)\n"
368 		"   aghi  %2,1\n"
369 		"   slgr  %0,%2\n"
370 		"   j     5f\n"
371 		"2: xc    0(256,%1),0(%1)\n"
372 		"   la    %1,256(%1)\n"
373 		"3: aghi  %0,-256\n"
374 		"   jnm   2b\n"
375 		"4: ex    %0,0(%3)\n"
376 		"5: slgr  %0,%0\n"
377 		"6: sacf  768\n"
378 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
379 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
380 		: : "cc", "memory");
381 	disable_sacf_uaccess(old_fs);
382 	return size;
383 }
384 
385 unsigned long __clear_user(void __user *to, unsigned long size)
386 {
387 	if (copy_with_mvcos())
388 			return clear_user_mvcos(to, size);
389 	return clear_user_xc(to, size);
390 }
391 EXPORT_SYMBOL(__clear_user);
392 
393 static inline unsigned long strnlen_user_srst(const char __user *src,
394 					      unsigned long size)
395 {
396 	register unsigned long reg0 asm("0") = 0;
397 	unsigned long tmp1, tmp2;
398 
399 	asm volatile(
400 		"   la    %2,0(%1)\n"
401 		"   la    %3,0(%0,%1)\n"
402 		"   slgr  %0,%0\n"
403 		"   sacf  256\n"
404 		"0: srst  %3,%2\n"
405 		"   jo    0b\n"
406 		"   la    %0,1(%3)\n"	/* strnlen_user results includes \0 */
407 		"   slgr  %0,%1\n"
408 		"1: sacf  768\n"
409 		EX_TABLE(0b,1b)
410 		: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
411 		: "d" (reg0) : "cc", "memory");
412 	return size;
413 }
414 
415 unsigned long __strnlen_user(const char __user *src, unsigned long size)
416 {
417 	mm_segment_t old_fs;
418 	unsigned long len;
419 
420 	if (unlikely(!size))
421 		return 0;
422 	old_fs = enable_sacf_uaccess();
423 	len = strnlen_user_srst(src, size);
424 	disable_sacf_uaccess(old_fs);
425 	return len;
426 }
427 EXPORT_SYMBOL(__strnlen_user);
428 
429 long __strncpy_from_user(char *dst, const char __user *src, long size)
430 {
431 	size_t done, len, offset, len_str;
432 
433 	if (unlikely(size <= 0))
434 		return 0;
435 	done = 0;
436 	do {
437 		offset = (size_t)src & (L1_CACHE_BYTES - 1);
438 		len = min(size - done, L1_CACHE_BYTES - offset);
439 		if (copy_from_user(dst, src, len))
440 			return -EFAULT;
441 		len_str = strnlen(dst, len);
442 		done += len_str;
443 		src += len_str;
444 		dst += len_str;
445 	} while ((len_str == len) && (done < size));
446 	return done;
447 }
448 EXPORT_SYMBOL(__strncpy_from_user);
449