xref: /linux/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 struct other_val {
9 	long long foo;
10 	long long bar;
11 };
12 
13 struct {
14 	__uint(type, BPF_MAP_TYPE_HASH);
15 	__uint(max_entries, 1);
16 	__type(key, long long);
17 	__type(value, struct other_val);
18 } map_hash_16b SEC(".maps");
19 
20 #define MAX_ENTRIES 11
21 
22 struct test_val {
23 	unsigned int index;
24 	int foo[MAX_ENTRIES];
25 };
26 
27 struct {
28 	__uint(type, BPF_MAP_TYPE_HASH);
29 	__uint(max_entries, 1);
30 	__type(key, long long);
31 	__type(value, struct test_val);
32 } map_hash_48b SEC(".maps");
33 
34 struct {
35 	__uint(type, BPF_MAP_TYPE_HASH);
36 	__uint(max_entries, 1);
37 	__type(key, long long);
38 	__type(value, long long);
39 } map_hash_8b SEC(".maps");
40 
41 SEC("tracepoint")
42 __description("helper access to map: full range")
43 __success
44 __naked void access_to_map_full_range(void)
45 {
46 	asm volatile ("					\
47 	r2 = r10;					\
48 	r2 += -8;					\
49 	r1 = 0;						\
50 	*(u64*)(r2 + 0) = r1;				\
51 	r1 = %[map_hash_48b] ll;			\
52 	call %[bpf_map_lookup_elem];			\
53 	if r0 == 0 goto l0_%=;				\
54 	r1 = r0;					\
55 	r2 = %[sizeof_test_val];			\
56 	r3 = 0;						\
57 	call %[bpf_probe_read_kernel];			\
58 l0_%=:	exit;						\
59 "	:
60 	: __imm(bpf_map_lookup_elem),
61 	  __imm(bpf_probe_read_kernel),
62 	  __imm_addr(map_hash_48b),
63 	  __imm_const(sizeof_test_val, sizeof(struct test_val))
64 	: __clobber_all);
65 }
66 
67 SEC("tracepoint")
68 __description("helper access to map: partial range")
69 __success
70 __naked void access_to_map_partial_range(void)
71 {
72 	asm volatile ("					\
73 	r2 = r10;					\
74 	r2 += -8;					\
75 	r1 = 0;						\
76 	*(u64*)(r2 + 0) = r1;				\
77 	r1 = %[map_hash_48b] ll;			\
78 	call %[bpf_map_lookup_elem];			\
79 	if r0 == 0 goto l0_%=;				\
80 	r1 = r0;					\
81 	r2 = 8;						\
82 	r3 = 0;						\
83 	call %[bpf_probe_read_kernel];			\
84 l0_%=:	exit;						\
85 "	:
86 	: __imm(bpf_map_lookup_elem),
87 	  __imm(bpf_probe_read_kernel),
88 	  __imm_addr(map_hash_48b)
89 	: __clobber_all);
90 }
91 
92 SEC("tracepoint")
93 __description("helper access to map: empty range")
94 __failure __msg("invalid access to map value, value_size=48 off=0 size=0")
95 __naked void access_to_map_empty_range(void)
96 {
97 	asm volatile ("					\
98 	r2 = r10;					\
99 	r2 += -8;					\
100 	r1 = 0;						\
101 	*(u64*)(r2 + 0) = r1;				\
102 	r1 = %[map_hash_48b] ll;			\
103 	call %[bpf_map_lookup_elem];			\
104 	if r0 == 0 goto l0_%=;				\
105 	r1 = r0;					\
106 	r2 = 0;						\
107 	call %[bpf_trace_printk];			\
108 l0_%=:	exit;						\
109 "	:
110 	: __imm(bpf_map_lookup_elem),
111 	  __imm(bpf_trace_printk),
112 	  __imm_addr(map_hash_48b)
113 	: __clobber_all);
114 }
115 
116 SEC("tracepoint")
117 __description("helper access to map: out-of-bound range")
118 __failure __msg("invalid access to map value, value_size=48 off=0 size=56")
119 __naked void map_out_of_bound_range(void)
120 {
121 	asm volatile ("					\
122 	r2 = r10;					\
123 	r2 += -8;					\
124 	r1 = 0;						\
125 	*(u64*)(r2 + 0) = r1;				\
126 	r1 = %[map_hash_48b] ll;			\
127 	call %[bpf_map_lookup_elem];			\
128 	if r0 == 0 goto l0_%=;				\
129 	r1 = r0;					\
130 	r2 = %[__imm_0];				\
131 	r3 = 0;						\
132 	call %[bpf_probe_read_kernel];			\
133 l0_%=:	exit;						\
134 "	:
135 	: __imm(bpf_map_lookup_elem),
136 	  __imm(bpf_probe_read_kernel),
137 	  __imm_addr(map_hash_48b),
138 	  __imm_const(__imm_0, sizeof(struct test_val) + 8)
139 	: __clobber_all);
140 }
141 
142 SEC("tracepoint")
143 __description("helper access to map: negative range")
144 __failure __msg("R2 min value is negative")
145 __naked void access_to_map_negative_range(void)
146 {
147 	asm volatile ("					\
148 	r2 = r10;					\
149 	r2 += -8;					\
150 	r1 = 0;						\
151 	*(u64*)(r2 + 0) = r1;				\
152 	r1 = %[map_hash_48b] ll;			\
153 	call %[bpf_map_lookup_elem];			\
154 	if r0 == 0 goto l0_%=;				\
155 	r1 = r0;					\
156 	r2 = -8;					\
157 	r3 = 0;						\
158 	call %[bpf_probe_read_kernel];			\
159 l0_%=:	exit;						\
160 "	:
161 	: __imm(bpf_map_lookup_elem),
162 	  __imm(bpf_probe_read_kernel),
163 	  __imm_addr(map_hash_48b)
164 	: __clobber_all);
165 }
166 
167 SEC("tracepoint")
168 __description("helper access to adjusted map (via const imm): full range")
169 __success
170 __naked void via_const_imm_full_range(void)
171 {
172 	asm volatile ("					\
173 	r2 = r10;					\
174 	r2 += -8;					\
175 	r1 = 0;						\
176 	*(u64*)(r2 + 0) = r1;				\
177 	r1 = %[map_hash_48b] ll;			\
178 	call %[bpf_map_lookup_elem];			\
179 	if r0 == 0 goto l0_%=;				\
180 	r1 = r0;					\
181 	r1 += %[test_val_foo];				\
182 	r2 = %[__imm_0];				\
183 	r3 = 0;						\
184 	call %[bpf_probe_read_kernel];			\
185 l0_%=:	exit;						\
186 "	:
187 	: __imm(bpf_map_lookup_elem),
188 	  __imm(bpf_probe_read_kernel),
189 	  __imm_addr(map_hash_48b),
190 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
191 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
192 	: __clobber_all);
193 }
194 
195 SEC("tracepoint")
196 __description("helper access to adjusted map (via const imm): partial range")
197 __success
198 __naked void via_const_imm_partial_range(void)
199 {
200 	asm volatile ("					\
201 	r2 = r10;					\
202 	r2 += -8;					\
203 	r1 = 0;						\
204 	*(u64*)(r2 + 0) = r1;				\
205 	r1 = %[map_hash_48b] ll;			\
206 	call %[bpf_map_lookup_elem];			\
207 	if r0 == 0 goto l0_%=;				\
208 	r1 = r0;					\
209 	r1 += %[test_val_foo];				\
210 	r2 = 8;						\
211 	r3 = 0;						\
212 	call %[bpf_probe_read_kernel];			\
213 l0_%=:	exit;						\
214 "	:
215 	: __imm(bpf_map_lookup_elem),
216 	  __imm(bpf_probe_read_kernel),
217 	  __imm_addr(map_hash_48b),
218 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
219 	: __clobber_all);
220 }
221 
222 SEC("tracepoint")
223 __description("helper access to adjusted map (via const imm): empty range")
224 __failure __msg("invalid access to map value, value_size=48 off=4 size=0")
225 __naked void via_const_imm_empty_range(void)
226 {
227 	asm volatile ("					\
228 	r2 = r10;					\
229 	r2 += -8;					\
230 	r1 = 0;						\
231 	*(u64*)(r2 + 0) = r1;				\
232 	r1 = %[map_hash_48b] ll;			\
233 	call %[bpf_map_lookup_elem];			\
234 	if r0 == 0 goto l0_%=;				\
235 	r1 = r0;					\
236 	r1 += %[test_val_foo];				\
237 	r2 = 0;						\
238 	call %[bpf_trace_printk];			\
239 l0_%=:	exit;						\
240 "	:
241 	: __imm(bpf_map_lookup_elem),
242 	  __imm(bpf_trace_printk),
243 	  __imm_addr(map_hash_48b),
244 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
245 	: __clobber_all);
246 }
247 
248 SEC("tracepoint")
249 __description("helper access to adjusted map (via const imm): out-of-bound range")
250 __failure __msg("invalid access to map value, value_size=48 off=4 size=52")
251 __naked void imm_out_of_bound_range(void)
252 {
253 	asm volatile ("					\
254 	r2 = r10;					\
255 	r2 += -8;					\
256 	r1 = 0;						\
257 	*(u64*)(r2 + 0) = r1;				\
258 	r1 = %[map_hash_48b] ll;			\
259 	call %[bpf_map_lookup_elem];			\
260 	if r0 == 0 goto l0_%=;				\
261 	r1 = r0;					\
262 	r1 += %[test_val_foo];				\
263 	r2 = %[__imm_0];				\
264 	r3 = 0;						\
265 	call %[bpf_probe_read_kernel];			\
266 l0_%=:	exit;						\
267 "	:
268 	: __imm(bpf_map_lookup_elem),
269 	  __imm(bpf_probe_read_kernel),
270 	  __imm_addr(map_hash_48b),
271 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
272 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
273 	: __clobber_all);
274 }
275 
276 SEC("tracepoint")
277 __description("helper access to adjusted map (via const imm): negative range (> adjustment)")
278 __failure __msg("R2 min value is negative")
279 __naked void const_imm_negative_range_adjustment_1(void)
280 {
281 	asm volatile ("					\
282 	r2 = r10;					\
283 	r2 += -8;					\
284 	r1 = 0;						\
285 	*(u64*)(r2 + 0) = r1;				\
286 	r1 = %[map_hash_48b] ll;			\
287 	call %[bpf_map_lookup_elem];			\
288 	if r0 == 0 goto l0_%=;				\
289 	r1 = r0;					\
290 	r1 += %[test_val_foo];				\
291 	r2 = -8;					\
292 	r3 = 0;						\
293 	call %[bpf_probe_read_kernel];			\
294 l0_%=:	exit;						\
295 "	:
296 	: __imm(bpf_map_lookup_elem),
297 	  __imm(bpf_probe_read_kernel),
298 	  __imm_addr(map_hash_48b),
299 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
300 	: __clobber_all);
301 }
302 
303 SEC("tracepoint")
304 __description("helper access to adjusted map (via const imm): negative range (< adjustment)")
305 __failure __msg("R2 min value is negative")
306 __naked void const_imm_negative_range_adjustment_2(void)
307 {
308 	asm volatile ("					\
309 	r2 = r10;					\
310 	r2 += -8;					\
311 	r1 = 0;						\
312 	*(u64*)(r2 + 0) = r1;				\
313 	r1 = %[map_hash_48b] ll;			\
314 	call %[bpf_map_lookup_elem];			\
315 	if r0 == 0 goto l0_%=;				\
316 	r1 = r0;					\
317 	r1 += %[test_val_foo];				\
318 	r2 = -1;					\
319 	r3 = 0;						\
320 	call %[bpf_probe_read_kernel];			\
321 l0_%=:	exit;						\
322 "	:
323 	: __imm(bpf_map_lookup_elem),
324 	  __imm(bpf_probe_read_kernel),
325 	  __imm_addr(map_hash_48b),
326 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
327 	: __clobber_all);
328 }
329 
330 SEC("tracepoint")
331 __description("helper access to adjusted map (via const reg): full range")
332 __success
333 __naked void via_const_reg_full_range(void)
334 {
335 	asm volatile ("					\
336 	r2 = r10;					\
337 	r2 += -8;					\
338 	r1 = 0;						\
339 	*(u64*)(r2 + 0) = r1;				\
340 	r1 = %[map_hash_48b] ll;			\
341 	call %[bpf_map_lookup_elem];			\
342 	if r0 == 0 goto l0_%=;				\
343 	r1 = r0;					\
344 	r3 = %[test_val_foo];				\
345 	r1 += r3;					\
346 	r2 = %[__imm_0];				\
347 	r3 = 0;						\
348 	call %[bpf_probe_read_kernel];			\
349 l0_%=:	exit;						\
350 "	:
351 	: __imm(bpf_map_lookup_elem),
352 	  __imm(bpf_probe_read_kernel),
353 	  __imm_addr(map_hash_48b),
354 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
355 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
356 	: __clobber_all);
357 }
358 
359 SEC("tracepoint")
360 __description("helper access to adjusted map (via const reg): partial range")
361 __success
362 __naked void via_const_reg_partial_range(void)
363 {
364 	asm volatile ("					\
365 	r2 = r10;					\
366 	r2 += -8;					\
367 	r1 = 0;						\
368 	*(u64*)(r2 + 0) = r1;				\
369 	r1 = %[map_hash_48b] ll;			\
370 	call %[bpf_map_lookup_elem];			\
371 	if r0 == 0 goto l0_%=;				\
372 	r1 = r0;					\
373 	r3 = %[test_val_foo];				\
374 	r1 += r3;					\
375 	r2 = 8;						\
376 	r3 = 0;						\
377 	call %[bpf_probe_read_kernel];			\
378 l0_%=:	exit;						\
379 "	:
380 	: __imm(bpf_map_lookup_elem),
381 	  __imm(bpf_probe_read_kernel),
382 	  __imm_addr(map_hash_48b),
383 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
384 	: __clobber_all);
385 }
386 
387 SEC("tracepoint")
388 __description("helper access to adjusted map (via const reg): empty range")
389 __failure __msg("R1 min value is outside of the allowed memory range")
390 __naked void via_const_reg_empty_range(void)
391 {
392 	asm volatile ("					\
393 	r2 = r10;					\
394 	r2 += -8;					\
395 	r1 = 0;						\
396 	*(u64*)(r2 + 0) = r1;				\
397 	r1 = %[map_hash_48b] ll;			\
398 	call %[bpf_map_lookup_elem];			\
399 	if r0 == 0 goto l0_%=;				\
400 	r1 = r0;					\
401 	r3 = 0;						\
402 	r1 += r3;					\
403 	r2 = 0;						\
404 	call %[bpf_trace_printk];			\
405 l0_%=:	exit;						\
406 "	:
407 	: __imm(bpf_map_lookup_elem),
408 	  __imm(bpf_trace_printk),
409 	  __imm_addr(map_hash_48b)
410 	: __clobber_all);
411 }
412 
413 SEC("tracepoint")
414 __description("helper access to adjusted map (via const reg): out-of-bound range")
415 __failure __msg("invalid access to map value, value_size=48 off=4 size=52")
416 __naked void reg_out_of_bound_range(void)
417 {
418 	asm volatile ("					\
419 	r2 = r10;					\
420 	r2 += -8;					\
421 	r1 = 0;						\
422 	*(u64*)(r2 + 0) = r1;				\
423 	r1 = %[map_hash_48b] ll;			\
424 	call %[bpf_map_lookup_elem];			\
425 	if r0 == 0 goto l0_%=;				\
426 	r1 = r0;					\
427 	r3 = %[test_val_foo];				\
428 	r1 += r3;					\
429 	r2 = %[__imm_0];				\
430 	r3 = 0;						\
431 	call %[bpf_probe_read_kernel];			\
432 l0_%=:	exit;						\
433 "	:
434 	: __imm(bpf_map_lookup_elem),
435 	  __imm(bpf_probe_read_kernel),
436 	  __imm_addr(map_hash_48b),
437 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
438 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
439 	: __clobber_all);
440 }
441 
442 SEC("tracepoint")
443 __description("helper access to adjusted map (via const reg): negative range (> adjustment)")
444 __failure __msg("R2 min value is negative")
445 __naked void const_reg_negative_range_adjustment_1(void)
446 {
447 	asm volatile ("					\
448 	r2 = r10;					\
449 	r2 += -8;					\
450 	r1 = 0;						\
451 	*(u64*)(r2 + 0) = r1;				\
452 	r1 = %[map_hash_48b] ll;			\
453 	call %[bpf_map_lookup_elem];			\
454 	if r0 == 0 goto l0_%=;				\
455 	r1 = r0;					\
456 	r3 = %[test_val_foo];				\
457 	r1 += r3;					\
458 	r2 = -8;					\
459 	r3 = 0;						\
460 	call %[bpf_probe_read_kernel];			\
461 l0_%=:	exit;						\
462 "	:
463 	: __imm(bpf_map_lookup_elem),
464 	  __imm(bpf_probe_read_kernel),
465 	  __imm_addr(map_hash_48b),
466 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
467 	: __clobber_all);
468 }
469 
470 SEC("tracepoint")
471 __description("helper access to adjusted map (via const reg): negative range (< adjustment)")
472 __failure __msg("R2 min value is negative")
473 __naked void const_reg_negative_range_adjustment_2(void)
474 {
475 	asm volatile ("					\
476 	r2 = r10;					\
477 	r2 += -8;					\
478 	r1 = 0;						\
479 	*(u64*)(r2 + 0) = r1;				\
480 	r1 = %[map_hash_48b] ll;			\
481 	call %[bpf_map_lookup_elem];			\
482 	if r0 == 0 goto l0_%=;				\
483 	r1 = r0;					\
484 	r3 = %[test_val_foo];				\
485 	r1 += r3;					\
486 	r2 = -1;					\
487 	r3 = 0;						\
488 	call %[bpf_probe_read_kernel];			\
489 l0_%=:	exit;						\
490 "	:
491 	: __imm(bpf_map_lookup_elem),
492 	  __imm(bpf_probe_read_kernel),
493 	  __imm_addr(map_hash_48b),
494 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
495 	: __clobber_all);
496 }
497 
498 SEC("tracepoint")
499 __description("helper access to adjusted map (via variable): full range")
500 __success
501 __naked void map_via_variable_full_range(void)
502 {
503 	asm volatile ("					\
504 	r2 = r10;					\
505 	r2 += -8;					\
506 	r1 = 0;						\
507 	*(u64*)(r2 + 0) = r1;				\
508 	r1 = %[map_hash_48b] ll;			\
509 	call %[bpf_map_lookup_elem];			\
510 	if r0 == 0 goto l0_%=;				\
511 	r1 = r0;					\
512 	r3 = *(u32*)(r0 + 0);				\
513 	if r3 > %[test_val_foo] goto l0_%=;		\
514 	r1 += r3;					\
515 	r2 = %[__imm_0];				\
516 	r3 = 0;						\
517 	call %[bpf_probe_read_kernel];			\
518 l0_%=:	exit;						\
519 "	:
520 	: __imm(bpf_map_lookup_elem),
521 	  __imm(bpf_probe_read_kernel),
522 	  __imm_addr(map_hash_48b),
523 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)),
524 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
525 	: __clobber_all);
526 }
527 
528 SEC("tracepoint")
529 __description("helper access to adjusted map (via variable): partial range")
530 __success
531 __naked void map_via_variable_partial_range(void)
532 {
533 	asm volatile ("					\
534 	r2 = r10;					\
535 	r2 += -8;					\
536 	r1 = 0;						\
537 	*(u64*)(r2 + 0) = r1;				\
538 	r1 = %[map_hash_48b] ll;			\
539 	call %[bpf_map_lookup_elem];			\
540 	if r0 == 0 goto l0_%=;				\
541 	r1 = r0;					\
542 	r3 = *(u32*)(r0 + 0);				\
543 	if r3 > %[test_val_foo] goto l0_%=;		\
544 	r1 += r3;					\
545 	r2 = 8;						\
546 	r3 = 0;						\
547 	call %[bpf_probe_read_kernel];			\
548 l0_%=:	exit;						\
549 "	:
550 	: __imm(bpf_map_lookup_elem),
551 	  __imm(bpf_probe_read_kernel),
552 	  __imm_addr(map_hash_48b),
553 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
554 	: __clobber_all);
555 }
556 
557 SEC("tracepoint")
558 __description("helper access to adjusted map (via variable): empty range")
559 __failure __msg("R1 min value is outside of the allowed memory range")
560 __naked void map_via_variable_empty_range(void)
561 {
562 	asm volatile ("					\
563 	r2 = r10;					\
564 	r2 += -8;					\
565 	r1 = 0;						\
566 	*(u64*)(r2 + 0) = r1;				\
567 	r1 = %[map_hash_48b] ll;			\
568 	call %[bpf_map_lookup_elem];			\
569 	if r0 == 0 goto l0_%=;				\
570 	r1 = r0;					\
571 	r3 = *(u32*)(r0 + 0);				\
572 	if r3 > %[test_val_foo] goto l0_%=;		\
573 	r1 += r3;					\
574 	r2 = 0;						\
575 	call %[bpf_trace_printk];			\
576 l0_%=:	exit;						\
577 "	:
578 	: __imm(bpf_map_lookup_elem),
579 	  __imm(bpf_trace_printk),
580 	  __imm_addr(map_hash_48b),
581 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
582 	: __clobber_all);
583 }
584 
585 SEC("tracepoint")
586 __description("helper access to adjusted map (via variable): no max check")
587 __failure __msg("R1 unbounded memory access")
588 __naked void via_variable_no_max_check_1(void)
589 {
590 	asm volatile ("					\
591 	r2 = r10;					\
592 	r2 += -8;					\
593 	r1 = 0;						\
594 	*(u64*)(r2 + 0) = r1;				\
595 	r1 = %[map_hash_48b] ll;			\
596 	call %[bpf_map_lookup_elem];			\
597 	if r0 == 0 goto l0_%=;				\
598 	r1 = r0;					\
599 	r3 = *(u32*)(r0 + 0);				\
600 	r1 += r3;					\
601 	r2 = 1;						\
602 	r3 = 0;						\
603 	call %[bpf_probe_read_kernel];			\
604 l0_%=:	exit;						\
605 "	:
606 	: __imm(bpf_map_lookup_elem),
607 	  __imm(bpf_probe_read_kernel),
608 	  __imm_addr(map_hash_48b)
609 	: __clobber_all);
610 }
611 
612 SEC("tracepoint")
613 __description("helper access to adjusted map (via variable): wrong max check")
614 __failure __msg("invalid access to map value, value_size=48 off=4 size=45")
615 __naked void via_variable_wrong_max_check_1(void)
616 {
617 	asm volatile ("					\
618 	r2 = r10;					\
619 	r2 += -8;					\
620 	r1 = 0;						\
621 	*(u64*)(r2 + 0) = r1;				\
622 	r1 = %[map_hash_48b] ll;			\
623 	call %[bpf_map_lookup_elem];			\
624 	if r0 == 0 goto l0_%=;				\
625 	r1 = r0;					\
626 	r3 = *(u32*)(r0 + 0);				\
627 	if r3 > %[test_val_foo] goto l0_%=;		\
628 	r1 += r3;					\
629 	r2 = %[__imm_0];				\
630 	r3 = 0;						\
631 	call %[bpf_probe_read_kernel];			\
632 l0_%=:	exit;						\
633 "	:
634 	: __imm(bpf_map_lookup_elem),
635 	  __imm(bpf_probe_read_kernel),
636 	  __imm_addr(map_hash_48b),
637 	  __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1),
638 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
639 	: __clobber_all);
640 }
641 
642 SEC("tracepoint")
643 __description("helper access to map: bounds check using <, good access")
644 __success
645 __naked void bounds_check_using_good_access_1(void)
646 {
647 	asm volatile ("					\
648 	r2 = r10;					\
649 	r2 += -8;					\
650 	r1 = 0;						\
651 	*(u64*)(r2 + 0) = r1;				\
652 	r1 = %[map_hash_48b] ll;			\
653 	call %[bpf_map_lookup_elem];			\
654 	if r0 == 0 goto l0_%=;				\
655 	r1 = r0;					\
656 	r3 = *(u32*)(r0 + 0);				\
657 	if r3 < 32 goto l1_%=;				\
658 	r0 = 0;						\
659 l0_%=:	exit;						\
660 l1_%=:	r1 += r3;					\
661 	r0 = 0;						\
662 	*(u8*)(r1 + 0) = r0;				\
663 	r0 = 0;						\
664 	exit;						\
665 "	:
666 	: __imm(bpf_map_lookup_elem),
667 	  __imm_addr(map_hash_48b)
668 	: __clobber_all);
669 }
670 
671 SEC("tracepoint")
672 __description("helper access to map: bounds check using <, bad access")
673 __failure __msg("R1 unbounded memory access")
674 __naked void bounds_check_using_bad_access_1(void)
675 {
676 	asm volatile ("					\
677 	r2 = r10;					\
678 	r2 += -8;					\
679 	r1 = 0;						\
680 	*(u64*)(r2 + 0) = r1;				\
681 	r1 = %[map_hash_48b] ll;			\
682 	call %[bpf_map_lookup_elem];			\
683 	if r0 == 0 goto l0_%=;				\
684 	r1 = r0;					\
685 	r3 = *(u32*)(r0 + 0);				\
686 	if r3 < 32 goto l1_%=;				\
687 	r1 += r3;					\
688 l0_%=:	r0 = 0;						\
689 	*(u8*)(r1 + 0) = r0;				\
690 	r0 = 0;						\
691 	exit;						\
692 l1_%=:	r0 = 0;						\
693 	exit;						\
694 "	:
695 	: __imm(bpf_map_lookup_elem),
696 	  __imm_addr(map_hash_48b)
697 	: __clobber_all);
698 }
699 
700 SEC("tracepoint")
701 __description("helper access to map: bounds check using <=, good access")
702 __success
703 __naked void bounds_check_using_good_access_2(void)
704 {
705 	asm volatile ("					\
706 	r2 = r10;					\
707 	r2 += -8;					\
708 	r1 = 0;						\
709 	*(u64*)(r2 + 0) = r1;				\
710 	r1 = %[map_hash_48b] ll;			\
711 	call %[bpf_map_lookup_elem];			\
712 	if r0 == 0 goto l0_%=;				\
713 	r1 = r0;					\
714 	r3 = *(u32*)(r0 + 0);				\
715 	if r3 <= 32 goto l1_%=;				\
716 	r0 = 0;						\
717 l0_%=:	exit;						\
718 l1_%=:	r1 += r3;					\
719 	r0 = 0;						\
720 	*(u8*)(r1 + 0) = r0;				\
721 	r0 = 0;						\
722 	exit;						\
723 "	:
724 	: __imm(bpf_map_lookup_elem),
725 	  __imm_addr(map_hash_48b)
726 	: __clobber_all);
727 }
728 
729 SEC("tracepoint")
730 __description("helper access to map: bounds check using <=, bad access")
731 __failure __msg("R1 unbounded memory access")
732 __naked void bounds_check_using_bad_access_2(void)
733 {
734 	asm volatile ("					\
735 	r2 = r10;					\
736 	r2 += -8;					\
737 	r1 = 0;						\
738 	*(u64*)(r2 + 0) = r1;				\
739 	r1 = %[map_hash_48b] ll;			\
740 	call %[bpf_map_lookup_elem];			\
741 	if r0 == 0 goto l0_%=;				\
742 	r1 = r0;					\
743 	r3 = *(u32*)(r0 + 0);				\
744 	if r3 <= 32 goto l1_%=;				\
745 	r1 += r3;					\
746 l0_%=:	r0 = 0;						\
747 	*(u8*)(r1 + 0) = r0;				\
748 	r0 = 0;						\
749 	exit;						\
750 l1_%=:	r0 = 0;						\
751 	exit;						\
752 "	:
753 	: __imm(bpf_map_lookup_elem),
754 	  __imm_addr(map_hash_48b)
755 	: __clobber_all);
756 }
757 
758 SEC("tracepoint")
759 __description("helper access to map: bounds check using s<, good access")
760 __success
761 __naked void check_using_s_good_access_1(void)
762 {
763 	asm volatile ("					\
764 	r2 = r10;					\
765 	r2 += -8;					\
766 	r1 = 0;						\
767 	*(u64*)(r2 + 0) = r1;				\
768 	r1 = %[map_hash_48b] ll;			\
769 	call %[bpf_map_lookup_elem];			\
770 	if r0 == 0 goto l0_%=;				\
771 	r1 = r0;					\
772 	r3 = *(u32*)(r0 + 0);				\
773 	if r3 s< 32 goto l1_%=;				\
774 l2_%=:	r0 = 0;						\
775 l0_%=:	exit;						\
776 l1_%=:	if r3 s< 0 goto l2_%=;				\
777 	r1 += r3;					\
778 	r0 = 0;						\
779 	*(u8*)(r1 + 0) = r0;				\
780 	r0 = 0;						\
781 	exit;						\
782 "	:
783 	: __imm(bpf_map_lookup_elem),
784 	  __imm_addr(map_hash_48b)
785 	: __clobber_all);
786 }
787 
788 SEC("tracepoint")
789 __description("helper access to map: bounds check using s<, good access 2")
790 __success
791 __naked void using_s_good_access_2_1(void)
792 {
793 	asm volatile ("					\
794 	r2 = r10;					\
795 	r2 += -8;					\
796 	r1 = 0;						\
797 	*(u64*)(r2 + 0) = r1;				\
798 	r1 = %[map_hash_48b] ll;			\
799 	call %[bpf_map_lookup_elem];			\
800 	if r0 == 0 goto l0_%=;				\
801 	r1 = r0;					\
802 	r3 = *(u32*)(r0 + 0);				\
803 	if r3 s< 32 goto l1_%=;				\
804 l2_%=:	r0 = 0;						\
805 l0_%=:	exit;						\
806 l1_%=:	if r3 s< -3 goto l2_%=;				\
807 	r1 += r3;					\
808 	r0 = 0;						\
809 	*(u8*)(r1 + 0) = r0;				\
810 	r0 = 0;						\
811 	exit;						\
812 "	:
813 	: __imm(bpf_map_lookup_elem),
814 	  __imm_addr(map_hash_48b)
815 	: __clobber_all);
816 }
817 
818 SEC("tracepoint")
819 __description("helper access to map: bounds check using s<, bad access")
820 __failure __msg("R1 min value is negative")
821 __naked void check_using_s_bad_access_1(void)
822 {
823 	asm volatile ("					\
824 	r2 = r10;					\
825 	r2 += -8;					\
826 	r1 = 0;						\
827 	*(u64*)(r2 + 0) = r1;				\
828 	r1 = %[map_hash_48b] ll;			\
829 	call %[bpf_map_lookup_elem];			\
830 	if r0 == 0 goto l0_%=;				\
831 	r1 = r0;					\
832 	r3 = *(u64*)(r0 + 0);				\
833 	if r3 s< 32 goto l1_%=;				\
834 l2_%=:	r0 = 0;						\
835 l0_%=:	exit;						\
836 l1_%=:	if r3 s< -3 goto l2_%=;				\
837 	r1 += r3;					\
838 	r0 = 0;						\
839 	*(u8*)(r1 + 0) = r0;				\
840 	r0 = 0;						\
841 	exit;						\
842 "	:
843 	: __imm(bpf_map_lookup_elem),
844 	  __imm_addr(map_hash_48b)
845 	: __clobber_all);
846 }
847 
848 SEC("tracepoint")
849 __description("helper access to map: bounds check using s<=, good access")
850 __success
851 __naked void check_using_s_good_access_2(void)
852 {
853 	asm volatile ("					\
854 	r2 = r10;					\
855 	r2 += -8;					\
856 	r1 = 0;						\
857 	*(u64*)(r2 + 0) = r1;				\
858 	r1 = %[map_hash_48b] ll;			\
859 	call %[bpf_map_lookup_elem];			\
860 	if r0 == 0 goto l0_%=;				\
861 	r1 = r0;					\
862 	r3 = *(u32*)(r0 + 0);				\
863 	if r3 s<= 32 goto l1_%=;			\
864 l2_%=:	r0 = 0;						\
865 l0_%=:	exit;						\
866 l1_%=:	if r3 s<= 0 goto l2_%=;				\
867 	r1 += r3;					\
868 	r0 = 0;						\
869 	*(u8*)(r1 + 0) = r0;				\
870 	r0 = 0;						\
871 	exit;						\
872 "	:
873 	: __imm(bpf_map_lookup_elem),
874 	  __imm_addr(map_hash_48b)
875 	: __clobber_all);
876 }
877 
878 SEC("tracepoint")
879 __description("helper access to map: bounds check using s<=, good access 2")
880 __success
881 __naked void using_s_good_access_2_2(void)
882 {
883 	asm volatile ("					\
884 	r2 = r10;					\
885 	r2 += -8;					\
886 	r1 = 0;						\
887 	*(u64*)(r2 + 0) = r1;				\
888 	r1 = %[map_hash_48b] ll;			\
889 	call %[bpf_map_lookup_elem];			\
890 	if r0 == 0 goto l0_%=;				\
891 	r1 = r0;					\
892 	r3 = *(u32*)(r0 + 0);				\
893 	if r3 s<= 32 goto l1_%=;			\
894 l2_%=:	r0 = 0;						\
895 l0_%=:	exit;						\
896 l1_%=:	if r3 s<= -3 goto l2_%=;			\
897 	r1 += r3;					\
898 	r0 = 0;						\
899 	*(u8*)(r1 + 0) = r0;				\
900 	r0 = 0;						\
901 	exit;						\
902 "	:
903 	: __imm(bpf_map_lookup_elem),
904 	  __imm_addr(map_hash_48b)
905 	: __clobber_all);
906 }
907 
908 SEC("tracepoint")
909 __description("helper access to map: bounds check using s<=, bad access")
910 __failure __msg("R1 min value is negative")
911 __naked void check_using_s_bad_access_2(void)
912 {
913 	asm volatile ("					\
914 	r2 = r10;					\
915 	r2 += -8;					\
916 	r1 = 0;						\
917 	*(u64*)(r2 + 0) = r1;				\
918 	r1 = %[map_hash_48b] ll;			\
919 	call %[bpf_map_lookup_elem];			\
920 	if r0 == 0 goto l0_%=;				\
921 	r1 = r0;					\
922 	r3 = *(u64*)(r0 + 0);				\
923 	if r3 s<= 32 goto l1_%=;			\
924 l2_%=:	r0 = 0;						\
925 l0_%=:	exit;						\
926 l1_%=:	if r3 s<= -3 goto l2_%=;			\
927 	r1 += r3;					\
928 	r0 = 0;						\
929 	*(u8*)(r1 + 0) = r0;				\
930 	r0 = 0;						\
931 	exit;						\
932 "	:
933 	: __imm(bpf_map_lookup_elem),
934 	  __imm_addr(map_hash_48b)
935 	: __clobber_all);
936 }
937 
938 SEC("tracepoint")
939 __description("map lookup helper access to map")
940 __success
941 __naked void lookup_helper_access_to_map(void)
942 {
943 	asm volatile ("					\
944 	r2 = r10;					\
945 	r2 += -8;					\
946 	r1 = 0;						\
947 	*(u64*)(r2 + 0) = r1;				\
948 	r1 = %[map_hash_16b] ll;			\
949 	call %[bpf_map_lookup_elem];			\
950 	if r0 == 0 goto l0_%=;				\
951 	r2 = r0;					\
952 	r1 = %[map_hash_16b] ll;			\
953 	call %[bpf_map_lookup_elem];			\
954 l0_%=:	exit;						\
955 "	:
956 	: __imm(bpf_map_lookup_elem),
957 	  __imm_addr(map_hash_16b)
958 	: __clobber_all);
959 }
960 
961 SEC("tracepoint")
962 __description("map update helper access to map")
963 __success
964 __naked void update_helper_access_to_map(void)
965 {
966 	asm volatile ("					\
967 	r2 = r10;					\
968 	r2 += -8;					\
969 	r1 = 0;						\
970 	*(u64*)(r2 + 0) = r1;				\
971 	r1 = %[map_hash_16b] ll;			\
972 	call %[bpf_map_lookup_elem];			\
973 	if r0 == 0 goto l0_%=;				\
974 	r4 = 0;						\
975 	r3 = r0;					\
976 	r2 = r0;					\
977 	r1 = %[map_hash_16b] ll;			\
978 	call %[bpf_map_update_elem];			\
979 l0_%=:	exit;						\
980 "	:
981 	: __imm(bpf_map_lookup_elem),
982 	  __imm(bpf_map_update_elem),
983 	  __imm_addr(map_hash_16b)
984 	: __clobber_all);
985 }
986 
987 SEC("tracepoint")
988 __description("map update helper access to map: wrong size")
989 __failure __msg("invalid access to map value, value_size=8 off=0 size=16")
990 __naked void access_to_map_wrong_size(void)
991 {
992 	asm volatile ("					\
993 	r2 = r10;					\
994 	r2 += -8;					\
995 	r1 = 0;						\
996 	*(u64*)(r2 + 0) = r1;				\
997 	r1 = %[map_hash_8b] ll;				\
998 	call %[bpf_map_lookup_elem];			\
999 	if r0 == 0 goto l0_%=;				\
1000 	r4 = 0;						\
1001 	r3 = r0;					\
1002 	r2 = r0;					\
1003 	r1 = %[map_hash_16b] ll;			\
1004 	call %[bpf_map_update_elem];			\
1005 l0_%=:	exit;						\
1006 "	:
1007 	: __imm(bpf_map_lookup_elem),
1008 	  __imm(bpf_map_update_elem),
1009 	  __imm_addr(map_hash_16b),
1010 	  __imm_addr(map_hash_8b)
1011 	: __clobber_all);
1012 }
1013 
1014 SEC("tracepoint")
1015 __description("map helper access to adjusted map (via const imm)")
1016 __success
1017 __naked void adjusted_map_via_const_imm(void)
1018 {
1019 	asm volatile ("					\
1020 	r2 = r10;					\
1021 	r2 += -8;					\
1022 	r1 = 0;						\
1023 	*(u64*)(r2 + 0) = r1;				\
1024 	r1 = %[map_hash_16b] ll;			\
1025 	call %[bpf_map_lookup_elem];			\
1026 	if r0 == 0 goto l0_%=;				\
1027 	r2 = r0;					\
1028 	r2 += %[other_val_bar];				\
1029 	r1 = %[map_hash_16b] ll;			\
1030 	call %[bpf_map_lookup_elem];			\
1031 l0_%=:	exit;						\
1032 "	:
1033 	: __imm(bpf_map_lookup_elem),
1034 	  __imm_addr(map_hash_16b),
1035 	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1036 	: __clobber_all);
1037 }
1038 
1039 SEC("tracepoint")
1040 __description("map helper access to adjusted map (via const imm): out-of-bound 1")
1041 __failure __msg("invalid access to map value, value_size=16 off=12 size=8")
1042 __naked void imm_out_of_bound_1(void)
1043 {
1044 	asm volatile ("					\
1045 	r2 = r10;					\
1046 	r2 += -8;					\
1047 	r1 = 0;						\
1048 	*(u64*)(r2 + 0) = r1;				\
1049 	r1 = %[map_hash_16b] ll;			\
1050 	call %[bpf_map_lookup_elem];			\
1051 	if r0 == 0 goto l0_%=;				\
1052 	r2 = r0;					\
1053 	r2 += %[__imm_0];				\
1054 	r1 = %[map_hash_16b] ll;			\
1055 	call %[bpf_map_lookup_elem];			\
1056 l0_%=:	exit;						\
1057 "	:
1058 	: __imm(bpf_map_lookup_elem),
1059 	  __imm_addr(map_hash_16b),
1060 	  __imm_const(__imm_0, sizeof(struct other_val) - 4)
1061 	: __clobber_all);
1062 }
1063 
1064 SEC("tracepoint")
1065 __description("map helper access to adjusted map (via const imm): out-of-bound 2")
1066 __failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
1067 __naked void imm_out_of_bound_2(void)
1068 {
1069 	asm volatile ("					\
1070 	r2 = r10;					\
1071 	r2 += -8;					\
1072 	r1 = 0;						\
1073 	*(u64*)(r2 + 0) = r1;				\
1074 	r1 = %[map_hash_16b] ll;			\
1075 	call %[bpf_map_lookup_elem];			\
1076 	if r0 == 0 goto l0_%=;				\
1077 	r2 = r0;					\
1078 	r2 += -4;					\
1079 	r1 = %[map_hash_16b] ll;			\
1080 	call %[bpf_map_lookup_elem];			\
1081 l0_%=:	exit;						\
1082 "	:
1083 	: __imm(bpf_map_lookup_elem),
1084 	  __imm_addr(map_hash_16b)
1085 	: __clobber_all);
1086 }
1087 
1088 SEC("tracepoint")
1089 __description("map helper access to adjusted map (via const reg)")
1090 __success
1091 __naked void adjusted_map_via_const_reg(void)
1092 {
1093 	asm volatile ("					\
1094 	r2 = r10;					\
1095 	r2 += -8;					\
1096 	r1 = 0;						\
1097 	*(u64*)(r2 + 0) = r1;				\
1098 	r1 = %[map_hash_16b] ll;			\
1099 	call %[bpf_map_lookup_elem];			\
1100 	if r0 == 0 goto l0_%=;				\
1101 	r2 = r0;					\
1102 	r3 = %[other_val_bar];				\
1103 	r2 += r3;					\
1104 	r1 = %[map_hash_16b] ll;			\
1105 	call %[bpf_map_lookup_elem];			\
1106 l0_%=:	exit;						\
1107 "	:
1108 	: __imm(bpf_map_lookup_elem),
1109 	  __imm_addr(map_hash_16b),
1110 	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1111 	: __clobber_all);
1112 }
1113 
1114 SEC("tracepoint")
1115 __description("map helper access to adjusted map (via const reg): out-of-bound 1")
1116 __failure __msg("invalid access to map value, value_size=16 off=12 size=8")
1117 __naked void reg_out_of_bound_1(void)
1118 {
1119 	asm volatile ("					\
1120 	r2 = r10;					\
1121 	r2 += -8;					\
1122 	r1 = 0;						\
1123 	*(u64*)(r2 + 0) = r1;				\
1124 	r1 = %[map_hash_16b] ll;			\
1125 	call %[bpf_map_lookup_elem];			\
1126 	if r0 == 0 goto l0_%=;				\
1127 	r2 = r0;					\
1128 	r3 = %[__imm_0];				\
1129 	r2 += r3;					\
1130 	r1 = %[map_hash_16b] ll;			\
1131 	call %[bpf_map_lookup_elem];			\
1132 l0_%=:	exit;						\
1133 "	:
1134 	: __imm(bpf_map_lookup_elem),
1135 	  __imm_addr(map_hash_16b),
1136 	  __imm_const(__imm_0, sizeof(struct other_val) - 4)
1137 	: __clobber_all);
1138 }
1139 
1140 SEC("tracepoint")
1141 __description("map helper access to adjusted map (via const reg): out-of-bound 2")
1142 __failure __msg("invalid access to map value, value_size=16 off=-4 size=8")
1143 __naked void reg_out_of_bound_2(void)
1144 {
1145 	asm volatile ("					\
1146 	r2 = r10;					\
1147 	r2 += -8;					\
1148 	r1 = 0;						\
1149 	*(u64*)(r2 + 0) = r1;				\
1150 	r1 = %[map_hash_16b] ll;			\
1151 	call %[bpf_map_lookup_elem];			\
1152 	if r0 == 0 goto l0_%=;				\
1153 	r2 = r0;					\
1154 	r3 = -4;					\
1155 	r2 += r3;					\
1156 	r1 = %[map_hash_16b] ll;			\
1157 	call %[bpf_map_lookup_elem];			\
1158 l0_%=:	exit;						\
1159 "	:
1160 	: __imm(bpf_map_lookup_elem),
1161 	  __imm_addr(map_hash_16b)
1162 	: __clobber_all);
1163 }
1164 
1165 SEC("tracepoint")
1166 __description("map helper access to adjusted map (via variable)")
1167 __success
1168 __naked void to_adjusted_map_via_variable(void)
1169 {
1170 	asm volatile ("					\
1171 	r2 = r10;					\
1172 	r2 += -8;					\
1173 	r1 = 0;						\
1174 	*(u64*)(r2 + 0) = r1;				\
1175 	r1 = %[map_hash_16b] ll;			\
1176 	call %[bpf_map_lookup_elem];			\
1177 	if r0 == 0 goto l0_%=;				\
1178 	r2 = r0;					\
1179 	r3 = *(u32*)(r0 + 0);				\
1180 	if r3 > %[other_val_bar] goto l0_%=;		\
1181 	r2 += r3;					\
1182 	r1 = %[map_hash_16b] ll;			\
1183 	call %[bpf_map_lookup_elem];			\
1184 l0_%=:	exit;						\
1185 "	:
1186 	: __imm(bpf_map_lookup_elem),
1187 	  __imm_addr(map_hash_16b),
1188 	  __imm_const(other_val_bar, offsetof(struct other_val, bar))
1189 	: __clobber_all);
1190 }
1191 
1192 SEC("tracepoint")
1193 __description("map helper access to adjusted map (via variable): no max check")
1194 __failure
1195 __msg("R2 unbounded memory access, make sure to bounds check any such access")
1196 __naked void via_variable_no_max_check_2(void)
1197 {
1198 	asm volatile ("					\
1199 	r2 = r10;					\
1200 	r2 += -8;					\
1201 	r1 = 0;						\
1202 	*(u64*)(r2 + 0) = r1;				\
1203 	r1 = %[map_hash_16b] ll;			\
1204 	call %[bpf_map_lookup_elem];			\
1205 	if r0 == 0 goto l0_%=;				\
1206 	r2 = r0;					\
1207 	r3 = *(u32*)(r0 + 0);				\
1208 	r2 += r3;					\
1209 	r1 = %[map_hash_16b] ll;			\
1210 	call %[bpf_map_lookup_elem];			\
1211 l0_%=:	exit;						\
1212 "	:
1213 	: __imm(bpf_map_lookup_elem),
1214 	  __imm_addr(map_hash_16b)
1215 	: __clobber_all);
1216 }
1217 
1218 SEC("tracepoint")
1219 __description("map helper access to adjusted map (via variable): wrong max check")
1220 __failure __msg("invalid access to map value, value_size=16 off=9 size=8")
1221 __naked void via_variable_wrong_max_check_2(void)
1222 {
1223 	asm volatile ("					\
1224 	r2 = r10;					\
1225 	r2 += -8;					\
1226 	r1 = 0;						\
1227 	*(u64*)(r2 + 0) = r1;				\
1228 	r1 = %[map_hash_16b] ll;			\
1229 	call %[bpf_map_lookup_elem];			\
1230 	if r0 == 0 goto l0_%=;				\
1231 	r2 = r0;					\
1232 	r3 = *(u32*)(r0 + 0);				\
1233 	if r3 > %[__imm_0] goto l0_%=;			\
1234 	r2 += r3;					\
1235 	r1 = %[map_hash_16b] ll;			\
1236 	call %[bpf_map_lookup_elem];			\
1237 l0_%=:	exit;						\
1238 "	:
1239 	: __imm(bpf_map_lookup_elem),
1240 	  __imm_addr(map_hash_16b),
1241 	  __imm_const(__imm_0, offsetof(struct other_val, bar) + 1)
1242 	: __clobber_all);
1243 }
1244 
1245 char _license[] SEC("license") = "GPL";
1246