xref: /linux/tools/include/uapi/linux/bpf.h (revision 19d0070a2792181f79df01277fe00b83b9f7eda7)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  */
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
10 
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
13 
14 /* Extended instruction set based on top of classic BPF */
15 
16 /* instruction classes */
17 #define BPF_JMP32	0x06	/* jmp mode in word width */
18 #define BPF_ALU64	0x07	/* alu mode in double word width */
19 
20 /* ld/ldx fields */
21 #define BPF_DW		0x18	/* double word (64-bit) */
22 #define BPF_XADD	0xc0	/* exclusive add */
23 
24 /* alu/jmp fields */
25 #define BPF_MOV		0xb0	/* mov reg to reg */
26 #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
27 
28 /* change endianness of a register */
29 #define BPF_END		0xd0	/* flags for endianness conversion: */
30 #define BPF_TO_LE	0x00	/* convert to little-endian */
31 #define BPF_TO_BE	0x08	/* convert to big-endian */
32 #define BPF_FROM_LE	BPF_TO_LE
33 #define BPF_FROM_BE	BPF_TO_BE
34 
35 /* jmp encodings */
36 #define BPF_JNE		0x50	/* jump != */
37 #define BPF_JLT		0xa0	/* LT is unsigned, '<' */
38 #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
39 #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
40 #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
41 #define BPF_JSLT	0xc0	/* SLT is signed, '<' */
42 #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
43 #define BPF_CALL	0x80	/* function call */
44 #define BPF_EXIT	0x90	/* function return */
45 
46 /* Register numbers */
47 enum {
48 	BPF_REG_0 = 0,
49 	BPF_REG_1,
50 	BPF_REG_2,
51 	BPF_REG_3,
52 	BPF_REG_4,
53 	BPF_REG_5,
54 	BPF_REG_6,
55 	BPF_REG_7,
56 	BPF_REG_8,
57 	BPF_REG_9,
58 	BPF_REG_10,
59 	__MAX_BPF_REG,
60 };
61 
62 /* BPF has 10 general purpose 64-bit registers and stack frame. */
63 #define MAX_BPF_REG	__MAX_BPF_REG
64 
65 struct bpf_insn {
66 	__u8	code;		/* opcode */
67 	__u8	dst_reg:4;	/* dest register */
68 	__u8	src_reg:4;	/* source register */
69 	__s16	off;		/* signed offset */
70 	__s32	imm;		/* signed immediate constant */
71 };
72 
73 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
74 struct bpf_lpm_trie_key {
75 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
76 	__u8	data[0];	/* Arbitrary size */
77 };
78 
79 struct bpf_cgroup_storage_key {
80 	__u64	cgroup_inode_id;	/* cgroup inode id */
81 	__u32	attach_type;		/* program attach type */
82 };
83 
84 /* BPF syscall commands, see bpf(2) man-page for details. */
85 enum bpf_cmd {
86 	BPF_MAP_CREATE,
87 	BPF_MAP_LOOKUP_ELEM,
88 	BPF_MAP_UPDATE_ELEM,
89 	BPF_MAP_DELETE_ELEM,
90 	BPF_MAP_GET_NEXT_KEY,
91 	BPF_PROG_LOAD,
92 	BPF_OBJ_PIN,
93 	BPF_OBJ_GET,
94 	BPF_PROG_ATTACH,
95 	BPF_PROG_DETACH,
96 	BPF_PROG_TEST_RUN,
97 	BPF_PROG_GET_NEXT_ID,
98 	BPF_MAP_GET_NEXT_ID,
99 	BPF_PROG_GET_FD_BY_ID,
100 	BPF_MAP_GET_FD_BY_ID,
101 	BPF_OBJ_GET_INFO_BY_FD,
102 	BPF_PROG_QUERY,
103 	BPF_RAW_TRACEPOINT_OPEN,
104 	BPF_BTF_LOAD,
105 	BPF_BTF_GET_FD_BY_ID,
106 	BPF_TASK_FD_QUERY,
107 	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
108 	BPF_MAP_FREEZE,
109 	BPF_BTF_GET_NEXT_ID,
110 	BPF_MAP_LOOKUP_BATCH,
111 	BPF_MAP_LOOKUP_AND_DELETE_BATCH,
112 	BPF_MAP_UPDATE_BATCH,
113 	BPF_MAP_DELETE_BATCH,
114 	BPF_LINK_CREATE,
115 	BPF_LINK_UPDATE,
116 	BPF_LINK_GET_FD_BY_ID,
117 	BPF_LINK_GET_NEXT_ID,
118 	BPF_ENABLE_STATS,
119 	BPF_ITER_CREATE,
120 };
121 
122 enum bpf_map_type {
123 	BPF_MAP_TYPE_UNSPEC,
124 	BPF_MAP_TYPE_HASH,
125 	BPF_MAP_TYPE_ARRAY,
126 	BPF_MAP_TYPE_PROG_ARRAY,
127 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
128 	BPF_MAP_TYPE_PERCPU_HASH,
129 	BPF_MAP_TYPE_PERCPU_ARRAY,
130 	BPF_MAP_TYPE_STACK_TRACE,
131 	BPF_MAP_TYPE_CGROUP_ARRAY,
132 	BPF_MAP_TYPE_LRU_HASH,
133 	BPF_MAP_TYPE_LRU_PERCPU_HASH,
134 	BPF_MAP_TYPE_LPM_TRIE,
135 	BPF_MAP_TYPE_ARRAY_OF_MAPS,
136 	BPF_MAP_TYPE_HASH_OF_MAPS,
137 	BPF_MAP_TYPE_DEVMAP,
138 	BPF_MAP_TYPE_SOCKMAP,
139 	BPF_MAP_TYPE_CPUMAP,
140 	BPF_MAP_TYPE_XSKMAP,
141 	BPF_MAP_TYPE_SOCKHASH,
142 	BPF_MAP_TYPE_CGROUP_STORAGE,
143 	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
144 	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
145 	BPF_MAP_TYPE_QUEUE,
146 	BPF_MAP_TYPE_STACK,
147 	BPF_MAP_TYPE_SK_STORAGE,
148 	BPF_MAP_TYPE_DEVMAP_HASH,
149 	BPF_MAP_TYPE_STRUCT_OPS,
150 	BPF_MAP_TYPE_RINGBUF,
151 };
152 
153 /* Note that tracing related programs such as
154  * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
155  * are not subject to a stable API since kernel internal data
156  * structures can change from release to release and may
157  * therefore break existing tracing BPF programs. Tracing BPF
158  * programs correspond to /a/ specific kernel which is to be
159  * analyzed, and not /a/ specific kernel /and/ all future ones.
160  */
161 enum bpf_prog_type {
162 	BPF_PROG_TYPE_UNSPEC,
163 	BPF_PROG_TYPE_SOCKET_FILTER,
164 	BPF_PROG_TYPE_KPROBE,
165 	BPF_PROG_TYPE_SCHED_CLS,
166 	BPF_PROG_TYPE_SCHED_ACT,
167 	BPF_PROG_TYPE_TRACEPOINT,
168 	BPF_PROG_TYPE_XDP,
169 	BPF_PROG_TYPE_PERF_EVENT,
170 	BPF_PROG_TYPE_CGROUP_SKB,
171 	BPF_PROG_TYPE_CGROUP_SOCK,
172 	BPF_PROG_TYPE_LWT_IN,
173 	BPF_PROG_TYPE_LWT_OUT,
174 	BPF_PROG_TYPE_LWT_XMIT,
175 	BPF_PROG_TYPE_SOCK_OPS,
176 	BPF_PROG_TYPE_SK_SKB,
177 	BPF_PROG_TYPE_CGROUP_DEVICE,
178 	BPF_PROG_TYPE_SK_MSG,
179 	BPF_PROG_TYPE_RAW_TRACEPOINT,
180 	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
181 	BPF_PROG_TYPE_LWT_SEG6LOCAL,
182 	BPF_PROG_TYPE_LIRC_MODE2,
183 	BPF_PROG_TYPE_SK_REUSEPORT,
184 	BPF_PROG_TYPE_FLOW_DISSECTOR,
185 	BPF_PROG_TYPE_CGROUP_SYSCTL,
186 	BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
187 	BPF_PROG_TYPE_CGROUP_SOCKOPT,
188 	BPF_PROG_TYPE_TRACING,
189 	BPF_PROG_TYPE_STRUCT_OPS,
190 	BPF_PROG_TYPE_EXT,
191 	BPF_PROG_TYPE_LSM,
192 };
193 
194 enum bpf_attach_type {
195 	BPF_CGROUP_INET_INGRESS,
196 	BPF_CGROUP_INET_EGRESS,
197 	BPF_CGROUP_INET_SOCK_CREATE,
198 	BPF_CGROUP_SOCK_OPS,
199 	BPF_SK_SKB_STREAM_PARSER,
200 	BPF_SK_SKB_STREAM_VERDICT,
201 	BPF_CGROUP_DEVICE,
202 	BPF_SK_MSG_VERDICT,
203 	BPF_CGROUP_INET4_BIND,
204 	BPF_CGROUP_INET6_BIND,
205 	BPF_CGROUP_INET4_CONNECT,
206 	BPF_CGROUP_INET6_CONNECT,
207 	BPF_CGROUP_INET4_POST_BIND,
208 	BPF_CGROUP_INET6_POST_BIND,
209 	BPF_CGROUP_UDP4_SENDMSG,
210 	BPF_CGROUP_UDP6_SENDMSG,
211 	BPF_LIRC_MODE2,
212 	BPF_FLOW_DISSECTOR,
213 	BPF_CGROUP_SYSCTL,
214 	BPF_CGROUP_UDP4_RECVMSG,
215 	BPF_CGROUP_UDP6_RECVMSG,
216 	BPF_CGROUP_GETSOCKOPT,
217 	BPF_CGROUP_SETSOCKOPT,
218 	BPF_TRACE_RAW_TP,
219 	BPF_TRACE_FENTRY,
220 	BPF_TRACE_FEXIT,
221 	BPF_MODIFY_RETURN,
222 	BPF_LSM_MAC,
223 	BPF_TRACE_ITER,
224 	BPF_CGROUP_INET4_GETPEERNAME,
225 	BPF_CGROUP_INET6_GETPEERNAME,
226 	BPF_CGROUP_INET4_GETSOCKNAME,
227 	BPF_CGROUP_INET6_GETSOCKNAME,
228 	BPF_XDP_DEVMAP,
229 	__MAX_BPF_ATTACH_TYPE
230 };
231 
232 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
233 
234 enum bpf_link_type {
235 	BPF_LINK_TYPE_UNSPEC = 0,
236 	BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
237 	BPF_LINK_TYPE_TRACING = 2,
238 	BPF_LINK_TYPE_CGROUP = 3,
239 	BPF_LINK_TYPE_ITER = 4,
240 	BPF_LINK_TYPE_NETNS = 5,
241 
242 	MAX_BPF_LINK_TYPE,
243 };
244 
245 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
246  *
247  * NONE(default): No further bpf programs allowed in the subtree.
248  *
249  * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
250  * the program in this cgroup yields to sub-cgroup program.
251  *
252  * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
253  * that cgroup program gets run in addition to the program in this cgroup.
254  *
255  * Only one program is allowed to be attached to a cgroup with
256  * NONE or BPF_F_ALLOW_OVERRIDE flag.
257  * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
258  * release old program and attach the new one. Attach flags has to match.
259  *
260  * Multiple programs are allowed to be attached to a cgroup with
261  * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
262  * (those that were attached first, run first)
263  * The programs of sub-cgroup are executed first, then programs of
264  * this cgroup and then programs of parent cgroup.
265  * When children program makes decision (like picking TCP CA or sock bind)
266  * parent program has a chance to override it.
267  *
268  * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
269  * programs for a cgroup. Though it's possible to replace an old program at
270  * any position by also specifying BPF_F_REPLACE flag and position itself in
271  * replace_bpf_fd attribute. Old program at this position will be released.
272  *
273  * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
274  * A cgroup with NONE doesn't allow any programs in sub-cgroups.
275  * Ex1:
276  * cgrp1 (MULTI progs A, B) ->
277  *    cgrp2 (OVERRIDE prog C) ->
278  *      cgrp3 (MULTI prog D) ->
279  *        cgrp4 (OVERRIDE prog E) ->
280  *          cgrp5 (NONE prog F)
281  * the event in cgrp5 triggers execution of F,D,A,B in that order.
282  * if prog F is detached, the execution is E,D,A,B
283  * if prog F and D are detached, the execution is E,A,B
284  * if prog F, E and D are detached, the execution is C,A,B
285  *
286  * All eligible programs are executed regardless of return code from
287  * earlier programs.
288  */
289 #define BPF_F_ALLOW_OVERRIDE	(1U << 0)
290 #define BPF_F_ALLOW_MULTI	(1U << 1)
291 #define BPF_F_REPLACE		(1U << 2)
292 
293 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
294  * verifier will perform strict alignment checking as if the kernel
295  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
296  * and NET_IP_ALIGN defined to 2.
297  */
298 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
299 
300 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
301  * verifier will allow any alignment whatsoever.  On platforms
302  * with strict alignment requirements for loads ands stores (such
303  * as sparc and mips) the verifier validates that all loads and
304  * stores provably follow this requirement.  This flag turns that
305  * checking and enforcement off.
306  *
307  * It is mostly used for testing when we want to validate the
308  * context and memory access aspects of the verifier, but because
309  * of an unaligned access the alignment check would trigger before
310  * the one we are interested in.
311  */
312 #define BPF_F_ANY_ALIGNMENT	(1U << 1)
313 
314 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
315  * Verifier does sub-register def/use analysis and identifies instructions whose
316  * def only matters for low 32-bit, high 32-bit is never referenced later
317  * through implicit zero extension. Therefore verifier notifies JIT back-ends
318  * that it is safe to ignore clearing high 32-bit for these instructions. This
319  * saves some back-ends a lot of code-gen. However such optimization is not
320  * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
321  * hence hasn't used verifier's analysis result. But, we really want to have a
322  * way to be able to verify the correctness of the described optimization on
323  * x86_64 on which testsuites are frequently exercised.
324  *
325  * So, this flag is introduced. Once it is set, verifier will randomize high
326  * 32-bit for those instructions who has been identified as safe to ignore them.
327  * Then, if verifier is not doing correct analysis, such randomization will
328  * regress tests to expose bugs.
329  */
330 #define BPF_F_TEST_RND_HI32	(1U << 2)
331 
332 /* The verifier internal test flag. Behavior is undefined */
333 #define BPF_F_TEST_STATE_FREQ	(1U << 3)
334 
335 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
336  * two extensions:
337  *
338  * insn[0].src_reg:  BPF_PSEUDO_MAP_FD   BPF_PSEUDO_MAP_VALUE
339  * insn[0].imm:      map fd              map fd
340  * insn[1].imm:      0                   offset into value
341  * insn[0].off:      0                   0
342  * insn[1].off:      0                   0
343  * ldimm64 rewrite:  address of map      address of map[0]+offset
344  * verifier type:    CONST_PTR_TO_MAP    PTR_TO_MAP_VALUE
345  */
346 #define BPF_PSEUDO_MAP_FD	1
347 #define BPF_PSEUDO_MAP_VALUE	2
348 
349 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
350  * offset to another bpf function
351  */
352 #define BPF_PSEUDO_CALL		1
353 
354 /* flags for BPF_MAP_UPDATE_ELEM command */
355 enum {
356 	BPF_ANY		= 0, /* create new element or update existing */
357 	BPF_NOEXIST	= 1, /* create new element if it didn't exist */
358 	BPF_EXIST	= 2, /* update existing element */
359 	BPF_F_LOCK	= 4, /* spin_lock-ed map_lookup/map_update */
360 };
361 
362 /* flags for BPF_MAP_CREATE command */
363 enum {
364 	BPF_F_NO_PREALLOC	= (1U << 0),
365 /* Instead of having one common LRU list in the
366  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
367  * which can scale and perform better.
368  * Note, the LRU nodes (including free nodes) cannot be moved
369  * across different LRU lists.
370  */
371 	BPF_F_NO_COMMON_LRU	= (1U << 1),
372 /* Specify numa node during map creation */
373 	BPF_F_NUMA_NODE		= (1U << 2),
374 
375 /* Flags for accessing BPF object from syscall side. */
376 	BPF_F_RDONLY		= (1U << 3),
377 	BPF_F_WRONLY		= (1U << 4),
378 
379 /* Flag for stack_map, store build_id+offset instead of pointer */
380 	BPF_F_STACK_BUILD_ID	= (1U << 5),
381 
382 /* Zero-initialize hash function seed. This should only be used for testing. */
383 	BPF_F_ZERO_SEED		= (1U << 6),
384 
385 /* Flags for accessing BPF object from program side. */
386 	BPF_F_RDONLY_PROG	= (1U << 7),
387 	BPF_F_WRONLY_PROG	= (1U << 8),
388 
389 /* Clone map from listener for newly accepted socket */
390 	BPF_F_CLONE		= (1U << 9),
391 
392 /* Enable memory-mapping BPF map */
393 	BPF_F_MMAPABLE		= (1U << 10),
394 };
395 
396 /* Flags for BPF_PROG_QUERY. */
397 
398 /* Query effective (directly attached + inherited from ancestor cgroups)
399  * programs that will be executed for events within a cgroup.
400  * attach_flags with this flag are returned only for directly attached programs.
401  */
402 #define BPF_F_QUERY_EFFECTIVE	(1U << 0)
403 
404 /* type for BPF_ENABLE_STATS */
405 enum bpf_stats_type {
406 	/* enabled run_time_ns and run_cnt */
407 	BPF_STATS_RUN_TIME = 0,
408 };
409 
410 enum bpf_stack_build_id_status {
411 	/* user space need an empty entry to identify end of a trace */
412 	BPF_STACK_BUILD_ID_EMPTY = 0,
413 	/* with valid build_id and offset */
414 	BPF_STACK_BUILD_ID_VALID = 1,
415 	/* couldn't get build_id, fallback to ip */
416 	BPF_STACK_BUILD_ID_IP = 2,
417 };
418 
419 #define BPF_BUILD_ID_SIZE 20
420 struct bpf_stack_build_id {
421 	__s32		status;
422 	unsigned char	build_id[BPF_BUILD_ID_SIZE];
423 	union {
424 		__u64	offset;
425 		__u64	ip;
426 	};
427 };
428 
429 #define BPF_OBJ_NAME_LEN 16U
430 
431 union bpf_attr {
432 	struct { /* anonymous struct used by BPF_MAP_CREATE command */
433 		__u32	map_type;	/* one of enum bpf_map_type */
434 		__u32	key_size;	/* size of key in bytes */
435 		__u32	value_size;	/* size of value in bytes */
436 		__u32	max_entries;	/* max number of entries in a map */
437 		__u32	map_flags;	/* BPF_MAP_CREATE related
438 					 * flags defined above.
439 					 */
440 		__u32	inner_map_fd;	/* fd pointing to the inner map */
441 		__u32	numa_node;	/* numa node (effective only if
442 					 * BPF_F_NUMA_NODE is set).
443 					 */
444 		char	map_name[BPF_OBJ_NAME_LEN];
445 		__u32	map_ifindex;	/* ifindex of netdev to create on */
446 		__u32	btf_fd;		/* fd pointing to a BTF type data */
447 		__u32	btf_key_type_id;	/* BTF type_id of the key */
448 		__u32	btf_value_type_id;	/* BTF type_id of the value */
449 		__u32	btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
450 						   * struct stored as the
451 						   * map value
452 						   */
453 	};
454 
455 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
456 		__u32		map_fd;
457 		__aligned_u64	key;
458 		union {
459 			__aligned_u64 value;
460 			__aligned_u64 next_key;
461 		};
462 		__u64		flags;
463 	};
464 
465 	struct { /* struct used by BPF_MAP_*_BATCH commands */
466 		__aligned_u64	in_batch;	/* start batch,
467 						 * NULL to start from beginning
468 						 */
469 		__aligned_u64	out_batch;	/* output: next start batch */
470 		__aligned_u64	keys;
471 		__aligned_u64	values;
472 		__u32		count;		/* input/output:
473 						 * input: # of key/value
474 						 * elements
475 						 * output: # of filled elements
476 						 */
477 		__u32		map_fd;
478 		__u64		elem_flags;
479 		__u64		flags;
480 	} batch;
481 
482 	struct { /* anonymous struct used by BPF_PROG_LOAD command */
483 		__u32		prog_type;	/* one of enum bpf_prog_type */
484 		__u32		insn_cnt;
485 		__aligned_u64	insns;
486 		__aligned_u64	license;
487 		__u32		log_level;	/* verbosity level of verifier */
488 		__u32		log_size;	/* size of user buffer */
489 		__aligned_u64	log_buf;	/* user supplied buffer */
490 		__u32		kern_version;	/* not used */
491 		__u32		prog_flags;
492 		char		prog_name[BPF_OBJ_NAME_LEN];
493 		__u32		prog_ifindex;	/* ifindex of netdev to prep for */
494 		/* For some prog types expected attach type must be known at
495 		 * load time to verify attach type specific parts of prog
496 		 * (context accesses, allowed helpers, etc).
497 		 */
498 		__u32		expected_attach_type;
499 		__u32		prog_btf_fd;	/* fd pointing to BTF type data */
500 		__u32		func_info_rec_size;	/* userspace bpf_func_info size */
501 		__aligned_u64	func_info;	/* func info */
502 		__u32		func_info_cnt;	/* number of bpf_func_info records */
503 		__u32		line_info_rec_size;	/* userspace bpf_line_info size */
504 		__aligned_u64	line_info;	/* line info */
505 		__u32		line_info_cnt;	/* number of bpf_line_info records */
506 		__u32		attach_btf_id;	/* in-kernel BTF type id to attach to */
507 		__u32		attach_prog_fd; /* 0 to attach to vmlinux */
508 	};
509 
510 	struct { /* anonymous struct used by BPF_OBJ_* commands */
511 		__aligned_u64	pathname;
512 		__u32		bpf_fd;
513 		__u32		file_flags;
514 	};
515 
516 	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
517 		__u32		target_fd;	/* container object to attach to */
518 		__u32		attach_bpf_fd;	/* eBPF program to attach */
519 		__u32		attach_type;
520 		__u32		attach_flags;
521 		__u32		replace_bpf_fd;	/* previously attached eBPF
522 						 * program to replace if
523 						 * BPF_F_REPLACE is used
524 						 */
525 	};
526 
527 	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
528 		__u32		prog_fd;
529 		__u32		retval;
530 		__u32		data_size_in;	/* input: len of data_in */
531 		__u32		data_size_out;	/* input/output: len of data_out
532 						 *   returns ENOSPC if data_out
533 						 *   is too small.
534 						 */
535 		__aligned_u64	data_in;
536 		__aligned_u64	data_out;
537 		__u32		repeat;
538 		__u32		duration;
539 		__u32		ctx_size_in;	/* input: len of ctx_in */
540 		__u32		ctx_size_out;	/* input/output: len of ctx_out
541 						 *   returns ENOSPC if ctx_out
542 						 *   is too small.
543 						 */
544 		__aligned_u64	ctx_in;
545 		__aligned_u64	ctx_out;
546 	} test;
547 
548 	struct { /* anonymous struct used by BPF_*_GET_*_ID */
549 		union {
550 			__u32		start_id;
551 			__u32		prog_id;
552 			__u32		map_id;
553 			__u32		btf_id;
554 			__u32		link_id;
555 		};
556 		__u32		next_id;
557 		__u32		open_flags;
558 	};
559 
560 	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
561 		__u32		bpf_fd;
562 		__u32		info_len;
563 		__aligned_u64	info;
564 	} info;
565 
566 	struct { /* anonymous struct used by BPF_PROG_QUERY command */
567 		__u32		target_fd;	/* container object to query */
568 		__u32		attach_type;
569 		__u32		query_flags;
570 		__u32		attach_flags;
571 		__aligned_u64	prog_ids;
572 		__u32		prog_cnt;
573 	} query;
574 
575 	struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
576 		__u64 name;
577 		__u32 prog_fd;
578 	} raw_tracepoint;
579 
580 	struct { /* anonymous struct for BPF_BTF_LOAD */
581 		__aligned_u64	btf;
582 		__aligned_u64	btf_log_buf;
583 		__u32		btf_size;
584 		__u32		btf_log_size;
585 		__u32		btf_log_level;
586 	};
587 
588 	struct {
589 		__u32		pid;		/* input: pid */
590 		__u32		fd;		/* input: fd */
591 		__u32		flags;		/* input: flags */
592 		__u32		buf_len;	/* input/output: buf len */
593 		__aligned_u64	buf;		/* input/output:
594 						 *   tp_name for tracepoint
595 						 *   symbol for kprobe
596 						 *   filename for uprobe
597 						 */
598 		__u32		prog_id;	/* output: prod_id */
599 		__u32		fd_type;	/* output: BPF_FD_TYPE_* */
600 		__u64		probe_offset;	/* output: probe_offset */
601 		__u64		probe_addr;	/* output: probe_addr */
602 	} task_fd_query;
603 
604 	struct { /* struct used by BPF_LINK_CREATE command */
605 		__u32		prog_fd;	/* eBPF program to attach */
606 		__u32		target_fd;	/* object to attach to */
607 		__u32		attach_type;	/* attach type */
608 		__u32		flags;		/* extra flags */
609 	} link_create;
610 
611 	struct { /* struct used by BPF_LINK_UPDATE command */
612 		__u32		link_fd;	/* link fd */
613 		/* new program fd to update link with */
614 		__u32		new_prog_fd;
615 		__u32		flags;		/* extra flags */
616 		/* expected link's program fd; is specified only if
617 		 * BPF_F_REPLACE flag is set in flags */
618 		__u32		old_prog_fd;
619 	} link_update;
620 
621 	struct { /* struct used by BPF_ENABLE_STATS command */
622 		__u32		type;
623 	} enable_stats;
624 
625 	struct { /* struct used by BPF_ITER_CREATE command */
626 		__u32		link_fd;
627 		__u32		flags;
628 	} iter_create;
629 
630 } __attribute__((aligned(8)));
631 
632 /* The description below is an attempt at providing documentation to eBPF
633  * developers about the multiple available eBPF helper functions. It can be
634  * parsed and used to produce a manual page. The workflow is the following,
635  * and requires the rst2man utility:
636  *
637  *     $ ./scripts/bpf_helpers_doc.py \
638  *             --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
639  *     $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
640  *     $ man /tmp/bpf-helpers.7
641  *
642  * Note that in order to produce this external documentation, some RST
643  * formatting is used in the descriptions to get "bold" and "italics" in
644  * manual pages. Also note that the few trailing white spaces are
645  * intentional, removing them would break paragraphs for rst2man.
646  *
647  * Start of BPF helper function descriptions:
648  *
649  * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
650  * 	Description
651  * 		Perform a lookup in *map* for an entry associated to *key*.
652  * 	Return
653  * 		Map value associated to *key*, or **NULL** if no entry was
654  * 		found.
655  *
656  * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
657  * 	Description
658  * 		Add or update the value of the entry associated to *key* in
659  * 		*map* with *value*. *flags* is one of:
660  *
661  * 		**BPF_NOEXIST**
662  * 			The entry for *key* must not exist in the map.
663  * 		**BPF_EXIST**
664  * 			The entry for *key* must already exist in the map.
665  * 		**BPF_ANY**
666  * 			No condition on the existence of the entry for *key*.
667  *
668  * 		Flag value **BPF_NOEXIST** cannot be used for maps of types
669  * 		**BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY**  (all
670  * 		elements always exist), the helper would return an error.
671  * 	Return
672  * 		0 on success, or a negative error in case of failure.
673  *
674  * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
675  * 	Description
676  * 		Delete entry with *key* from *map*.
677  * 	Return
678  * 		0 on success, or a negative error in case of failure.
679  *
680  * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
681  * 	Description
682  * 		For tracing programs, safely attempt to read *size* bytes from
683  * 		kernel space address *unsafe_ptr* and store the data in *dst*.
684  *
685  * 		Generally, use **bpf_probe_read_user**\ () or
686  * 		**bpf_probe_read_kernel**\ () instead.
687  * 	Return
688  * 		0 on success, or a negative error in case of failure.
689  *
690  * u64 bpf_ktime_get_ns(void)
691  * 	Description
692  * 		Return the time elapsed since system boot, in nanoseconds.
693  * 		Does not include time the system was suspended.
694  * 		See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
695  * 	Return
696  * 		Current *ktime*.
697  *
698  * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
699  * 	Description
700  * 		This helper is a "printk()-like" facility for debugging. It
701  * 		prints a message defined by format *fmt* (of size *fmt_size*)
702  * 		to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
703  * 		available. It can take up to three additional **u64**
704  * 		arguments (as an eBPF helpers, the total number of arguments is
705  * 		limited to five).
706  *
707  * 		Each time the helper is called, it appends a line to the trace.
708  * 		Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
709  * 		open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
710  * 		The format of the trace is customizable, and the exact output
711  * 		one will get depends on the options set in
712  * 		*\/sys/kernel/debug/tracing/trace_options* (see also the
713  * 		*README* file under the same directory). However, it usually
714  * 		defaults to something like:
715  *
716  * 		::
717  *
718  * 			telnet-470   [001] .N.. 419421.045894: 0x00000001: <formatted msg>
719  *
720  * 		In the above:
721  *
722  * 			* ``telnet`` is the name of the current task.
723  * 			* ``470`` is the PID of the current task.
724  * 			* ``001`` is the CPU number on which the task is
725  * 			  running.
726  * 			* In ``.N..``, each character refers to a set of
727  * 			  options (whether irqs are enabled, scheduling
728  * 			  options, whether hard/softirqs are running, level of
729  * 			  preempt_disabled respectively). **N** means that
730  * 			  **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
731  * 			  are set.
732  * 			* ``419421.045894`` is a timestamp.
733  * 			* ``0x00000001`` is a fake value used by BPF for the
734  * 			  instruction pointer register.
735  * 			* ``<formatted msg>`` is the message formatted with
736  * 			  *fmt*.
737  *
738  * 		The conversion specifiers supported by *fmt* are similar, but
739  * 		more limited than for printk(). They are **%d**, **%i**,
740  * 		**%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
741  * 		**%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
742  * 		of field, padding with zeroes, etc.) is available, and the
743  * 		helper will return **-EINVAL** (but print nothing) if it
744  * 		encounters an unknown specifier.
745  *
746  * 		Also, note that **bpf_trace_printk**\ () is slow, and should
747  * 		only be used for debugging purposes. For this reason, a notice
748  * 		bloc (spanning several lines) is printed to kernel logs and
749  * 		states that the helper should not be used "for production use"
750  * 		the first time this helper is used (or more precisely, when
751  * 		**trace_printk**\ () buffers are allocated). For passing values
752  * 		to user space, perf events should be preferred.
753  * 	Return
754  * 		The number of bytes written to the buffer, or a negative error
755  * 		in case of failure.
756  *
757  * u32 bpf_get_prandom_u32(void)
758  * 	Description
759  * 		Get a pseudo-random number.
760  *
761  * 		From a security point of view, this helper uses its own
762  * 		pseudo-random internal state, and cannot be used to infer the
763  * 		seed of other random functions in the kernel. However, it is
764  * 		essential to note that the generator used by the helper is not
765  * 		cryptographically secure.
766  * 	Return
767  * 		A random 32-bit unsigned value.
768  *
769  * u32 bpf_get_smp_processor_id(void)
770  * 	Description
771  * 		Get the SMP (symmetric multiprocessing) processor id. Note that
772  * 		all programs run with preemption disabled, which means that the
773  * 		SMP processor id is stable during all the execution of the
774  * 		program.
775  * 	Return
776  * 		The SMP id of the processor running the program.
777  *
778  * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
779  * 	Description
780  * 		Store *len* bytes from address *from* into the packet
781  * 		associated to *skb*, at *offset*. *flags* are a combination of
782  * 		**BPF_F_RECOMPUTE_CSUM** (automatically recompute the
783  * 		checksum for the packet after storing the bytes) and
784  * 		**BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
785  * 		**->swhash** and *skb*\ **->l4hash** to 0).
786  *
787  * 		A call to this helper is susceptible to change the underlying
788  * 		packet buffer. Therefore, at load time, all checks on pointers
789  * 		previously done by the verifier are invalidated and must be
790  * 		performed again, if the helper is used in combination with
791  * 		direct packet access.
792  * 	Return
793  * 		0 on success, or a negative error in case of failure.
794  *
795  * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
796  * 	Description
797  * 		Recompute the layer 3 (e.g. IP) checksum for the packet
798  * 		associated to *skb*. Computation is incremental, so the helper
799  * 		must know the former value of the header field that was
800  * 		modified (*from*), the new value of this field (*to*), and the
801  * 		number of bytes (2 or 4) for this field, stored in *size*.
802  * 		Alternatively, it is possible to store the difference between
803  * 		the previous and the new values of the header field in *to*, by
804  * 		setting *from* and *size* to 0. For both methods, *offset*
805  * 		indicates the location of the IP checksum within the packet.
806  *
807  * 		This helper works in combination with **bpf_csum_diff**\ (),
808  * 		which does not update the checksum in-place, but offers more
809  * 		flexibility and can handle sizes larger than 2 or 4 for the
810  * 		checksum to update.
811  *
812  * 		A call to this helper is susceptible to change the underlying
813  * 		packet buffer. Therefore, at load time, all checks on pointers
814  * 		previously done by the verifier are invalidated and must be
815  * 		performed again, if the helper is used in combination with
816  * 		direct packet access.
817  * 	Return
818  * 		0 on success, or a negative error in case of failure.
819  *
820  * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
821  * 	Description
822  * 		Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
823  * 		packet associated to *skb*. Computation is incremental, so the
824  * 		helper must know the former value of the header field that was
825  * 		modified (*from*), the new value of this field (*to*), and the
826  * 		number of bytes (2 or 4) for this field, stored on the lowest
827  * 		four bits of *flags*. Alternatively, it is possible to store
828  * 		the difference between the previous and the new values of the
829  * 		header field in *to*, by setting *from* and the four lowest
830  * 		bits of *flags* to 0. For both methods, *offset* indicates the
831  * 		location of the IP checksum within the packet. In addition to
832  * 		the size of the field, *flags* can be added (bitwise OR) actual
833  * 		flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
834  * 		untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
835  * 		for updates resulting in a null checksum the value is set to
836  * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
837  * 		the checksum is to be computed against a pseudo-header.
838  *
839  * 		This helper works in combination with **bpf_csum_diff**\ (),
840  * 		which does not update the checksum in-place, but offers more
841  * 		flexibility and can handle sizes larger than 2 or 4 for the
842  * 		checksum to update.
843  *
844  * 		A call to this helper is susceptible to change the underlying
845  * 		packet buffer. Therefore, at load time, all checks on pointers
846  * 		previously done by the verifier are invalidated and must be
847  * 		performed again, if the helper is used in combination with
848  * 		direct packet access.
849  * 	Return
850  * 		0 on success, or a negative error in case of failure.
851  *
852  * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
853  * 	Description
854  * 		This special helper is used to trigger a "tail call", or in
855  * 		other words, to jump into another eBPF program. The same stack
856  * 		frame is used (but values on stack and in registers for the
857  * 		caller are not accessible to the callee). This mechanism allows
858  * 		for program chaining, either for raising the maximum number of
859  * 		available eBPF instructions, or to execute given programs in
860  * 		conditional blocks. For security reasons, there is an upper
861  * 		limit to the number of successive tail calls that can be
862  * 		performed.
863  *
864  * 		Upon call of this helper, the program attempts to jump into a
865  * 		program referenced at index *index* in *prog_array_map*, a
866  * 		special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
867  * 		*ctx*, a pointer to the context.
868  *
869  * 		If the call succeeds, the kernel immediately runs the first
870  * 		instruction of the new program. This is not a function call,
871  * 		and it never returns to the previous program. If the call
872  * 		fails, then the helper has no effect, and the caller continues
873  * 		to run its subsequent instructions. A call can fail if the
874  * 		destination program for the jump does not exist (i.e. *index*
875  * 		is superior to the number of entries in *prog_array_map*), or
876  * 		if the maximum number of tail calls has been reached for this
877  * 		chain of programs. This limit is defined in the kernel by the
878  * 		macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
879  * 		which is currently set to 32.
880  * 	Return
881  * 		0 on success, or a negative error in case of failure.
882  *
883  * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
884  * 	Description
885  * 		Clone and redirect the packet associated to *skb* to another
886  * 		net device of index *ifindex*. Both ingress and egress
887  * 		interfaces can be used for redirection. The **BPF_F_INGRESS**
888  * 		value in *flags* is used to make the distinction (ingress path
889  * 		is selected if the flag is present, egress path otherwise).
890  * 		This is the only flag supported for now.
891  *
892  * 		In comparison with **bpf_redirect**\ () helper,
893  * 		**bpf_clone_redirect**\ () has the associated cost of
894  * 		duplicating the packet buffer, but this can be executed out of
895  * 		the eBPF program. Conversely, **bpf_redirect**\ () is more
896  * 		efficient, but it is handled through an action code where the
897  * 		redirection happens only after the eBPF program has returned.
898  *
899  * 		A call to this helper is susceptible to change the underlying
900  * 		packet buffer. Therefore, at load time, all checks on pointers
901  * 		previously done by the verifier are invalidated and must be
902  * 		performed again, if the helper is used in combination with
903  * 		direct packet access.
904  * 	Return
905  * 		0 on success, or a negative error in case of failure.
906  *
907  * u64 bpf_get_current_pid_tgid(void)
908  * 	Return
909  * 		A 64-bit integer containing the current tgid and pid, and
910  * 		created as such:
911  * 		*current_task*\ **->tgid << 32 \|**
912  * 		*current_task*\ **->pid**.
913  *
914  * u64 bpf_get_current_uid_gid(void)
915  * 	Return
916  * 		A 64-bit integer containing the current GID and UID, and
917  * 		created as such: *current_gid* **<< 32 \|** *current_uid*.
918  *
919  * int bpf_get_current_comm(void *buf, u32 size_of_buf)
920  * 	Description
921  * 		Copy the **comm** attribute of the current task into *buf* of
922  * 		*size_of_buf*. The **comm** attribute contains the name of
923  * 		the executable (excluding the path) for the current task. The
924  * 		*size_of_buf* must be strictly positive. On success, the
925  * 		helper makes sure that the *buf* is NUL-terminated. On failure,
926  * 		it is filled with zeroes.
927  * 	Return
928  * 		0 on success, or a negative error in case of failure.
929  *
930  * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
931  * 	Description
932  * 		Retrieve the classid for the current task, i.e. for the net_cls
933  * 		cgroup to which *skb* belongs.
934  *
935  * 		This helper can be used on TC egress path, but not on ingress.
936  *
937  * 		The net_cls cgroup provides an interface to tag network packets
938  * 		based on a user-provided identifier for all traffic coming from
939  * 		the tasks belonging to the related cgroup. See also the related
940  * 		kernel documentation, available from the Linux sources in file
941  * 		*Documentation/admin-guide/cgroup-v1/net_cls.rst*.
942  *
943  * 		The Linux kernel has two versions for cgroups: there are
944  * 		cgroups v1 and cgroups v2. Both are available to users, who can
945  * 		use a mixture of them, but note that the net_cls cgroup is for
946  * 		cgroup v1 only. This makes it incompatible with BPF programs
947  * 		run on cgroups, which is a cgroup-v2-only feature (a socket can
948  * 		only hold data for one version of cgroups at a time).
949  *
950  * 		This helper is only available is the kernel was compiled with
951  * 		the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
952  * 		"**y**" or to "**m**".
953  * 	Return
954  * 		The classid, or 0 for the default unconfigured classid.
955  *
956  * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
957  * 	Description
958  * 		Push a *vlan_tci* (VLAN tag control information) of protocol
959  * 		*vlan_proto* to the packet associated to *skb*, then update
960  * 		the checksum. Note that if *vlan_proto* is different from
961  * 		**ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
962  * 		be **ETH_P_8021Q**.
963  *
964  * 		A call to this helper is susceptible to change the underlying
965  * 		packet buffer. Therefore, at load time, all checks on pointers
966  * 		previously done by the verifier are invalidated and must be
967  * 		performed again, if the helper is used in combination with
968  * 		direct packet access.
969  * 	Return
970  * 		0 on success, or a negative error in case of failure.
971  *
972  * int bpf_skb_vlan_pop(struct sk_buff *skb)
973  * 	Description
974  * 		Pop a VLAN header from the packet associated to *skb*.
975  *
976  * 		A call to this helper is susceptible to change the underlying
977  * 		packet buffer. Therefore, at load time, all checks on pointers
978  * 		previously done by the verifier are invalidated and must be
979  * 		performed again, if the helper is used in combination with
980  * 		direct packet access.
981  * 	Return
982  * 		0 on success, or a negative error in case of failure.
983  *
984  * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
985  * 	Description
986  * 		Get tunnel metadata. This helper takes a pointer *key* to an
987  * 		empty **struct bpf_tunnel_key** of **size**, that will be
988  * 		filled with tunnel metadata for the packet associated to *skb*.
989  * 		The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
990  * 		indicates that the tunnel is based on IPv6 protocol instead of
991  * 		IPv4.
992  *
993  * 		The **struct bpf_tunnel_key** is an object that generalizes the
994  * 		principal parameters used by various tunneling protocols into a
995  * 		single struct. This way, it can be used to easily make a
996  * 		decision based on the contents of the encapsulation header,
997  * 		"summarized" in this struct. In particular, it holds the IP
998  * 		address of the remote end (IPv4 or IPv6, depending on the case)
999  * 		in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
1000  * 		this struct exposes the *key*\ **->tunnel_id**, which is
1001  * 		generally mapped to a VNI (Virtual Network Identifier), making
1002  * 		it programmable together with the **bpf_skb_set_tunnel_key**\
1003  * 		() helper.
1004  *
1005  * 		Let's imagine that the following code is part of a program
1006  * 		attached to the TC ingress interface, on one end of a GRE
1007  * 		tunnel, and is supposed to filter out all messages coming from
1008  * 		remote ends with IPv4 address other than 10.0.0.1:
1009  *
1010  * 		::
1011  *
1012  * 			int ret;
1013  * 			struct bpf_tunnel_key key = {};
1014  *
1015  * 			ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
1016  * 			if (ret < 0)
1017  * 				return TC_ACT_SHOT;	// drop packet
1018  *
1019  * 			if (key.remote_ipv4 != 0x0a000001)
1020  * 				return TC_ACT_SHOT;	// drop packet
1021  *
1022  * 			return TC_ACT_OK;		// accept packet
1023  *
1024  * 		This interface can also be used with all encapsulation devices
1025  * 		that can operate in "collect metadata" mode: instead of having
1026  * 		one network device per specific configuration, the "collect
1027  * 		metadata" mode only requires a single device where the
1028  * 		configuration can be extracted from this helper.
1029  *
1030  * 		This can be used together with various tunnels such as VXLan,
1031  * 		Geneve, GRE or IP in IP (IPIP).
1032  * 	Return
1033  * 		0 on success, or a negative error in case of failure.
1034  *
1035  * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
1036  * 	Description
1037  * 		Populate tunnel metadata for packet associated to *skb.* The
1038  * 		tunnel metadata is set to the contents of *key*, of *size*. The
1039  * 		*flags* can be set to a combination of the following values:
1040  *
1041  * 		**BPF_F_TUNINFO_IPV6**
1042  * 			Indicate that the tunnel is based on IPv6 protocol
1043  * 			instead of IPv4.
1044  * 		**BPF_F_ZERO_CSUM_TX**
1045  * 			For IPv4 packets, add a flag to tunnel metadata
1046  * 			indicating that checksum computation should be skipped
1047  * 			and checksum set to zeroes.
1048  * 		**BPF_F_DONT_FRAGMENT**
1049  * 			Add a flag to tunnel metadata indicating that the
1050  * 			packet should not be fragmented.
1051  * 		**BPF_F_SEQ_NUMBER**
1052  * 			Add a flag to tunnel metadata indicating that a
1053  * 			sequence number should be added to tunnel header before
1054  * 			sending the packet. This flag was added for GRE
1055  * 			encapsulation, but might be used with other protocols
1056  * 			as well in the future.
1057  *
1058  * 		Here is a typical usage on the transmit path:
1059  *
1060  * 		::
1061  *
1062  * 			struct bpf_tunnel_key key;
1063  * 			     populate key ...
1064  * 			bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
1065  * 			bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
1066  *
1067  * 		See also the description of the **bpf_skb_get_tunnel_key**\ ()
1068  * 		helper for additional information.
1069  * 	Return
1070  * 		0 on success, or a negative error in case of failure.
1071  *
1072  * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
1073  * 	Description
1074  * 		Read the value of a perf event counter. This helper relies on a
1075  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
1076  * 		the perf event counter is selected when *map* is updated with
1077  * 		perf event file descriptors. The *map* is an array whose size
1078  * 		is the number of available CPUs, and each cell contains a value
1079  * 		relative to one CPU. The value to retrieve is indicated by
1080  * 		*flags*, that contains the index of the CPU to look up, masked
1081  * 		with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1082  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
1083  * 		current CPU should be retrieved.
1084  *
1085  * 		Note that before Linux 4.13, only hardware perf event can be
1086  * 		retrieved.
1087  *
1088  * 		Also, be aware that the newer helper
1089  * 		**bpf_perf_event_read_value**\ () is recommended over
1090  * 		**bpf_perf_event_read**\ () in general. The latter has some ABI
1091  * 		quirks where error and counter value are used as a return code
1092  * 		(which is wrong to do since ranges may overlap). This issue is
1093  * 		fixed with **bpf_perf_event_read_value**\ (), which at the same
1094  * 		time provides more features over the **bpf_perf_event_read**\
1095  * 		() interface. Please refer to the description of
1096  * 		**bpf_perf_event_read_value**\ () for details.
1097  * 	Return
1098  * 		The value of the perf event counter read from the map, or a
1099  * 		negative error code in case of failure.
1100  *
1101  * int bpf_redirect(u32 ifindex, u64 flags)
1102  * 	Description
1103  * 		Redirect the packet to another net device of index *ifindex*.
1104  * 		This helper is somewhat similar to **bpf_clone_redirect**\
1105  * 		(), except that the packet is not cloned, which provides
1106  * 		increased performance.
1107  *
1108  * 		Except for XDP, both ingress and egress interfaces can be used
1109  * 		for redirection. The **BPF_F_INGRESS** value in *flags* is used
1110  * 		to make the distinction (ingress path is selected if the flag
1111  * 		is present, egress path otherwise). Currently, XDP only
1112  * 		supports redirection to the egress interface, and accepts no
1113  * 		flag at all.
1114  *
1115  * 		The same effect can also be attained with the more generic
1116  * 		**bpf_redirect_map**\ (), which uses a BPF map to store the
1117  * 		redirect target instead of providing it directly to the helper.
1118  * 	Return
1119  * 		For XDP, the helper returns **XDP_REDIRECT** on success or
1120  * 		**XDP_ABORTED** on error. For other program types, the values
1121  * 		are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
1122  * 		error.
1123  *
1124  * u32 bpf_get_route_realm(struct sk_buff *skb)
1125  * 	Description
1126  * 		Retrieve the realm or the route, that is to say the
1127  * 		**tclassid** field of the destination for the *skb*. The
1128  * 		indentifier retrieved is a user-provided tag, similar to the
1129  * 		one used with the net_cls cgroup (see description for
1130  * 		**bpf_get_cgroup_classid**\ () helper), but here this tag is
1131  * 		held by a route (a destination entry), not by a task.
1132  *
1133  * 		Retrieving this identifier works with the clsact TC egress hook
1134  * 		(see also **tc-bpf(8)**), or alternatively on conventional
1135  * 		classful egress qdiscs, but not on TC ingress path. In case of
1136  * 		clsact TC egress hook, this has the advantage that, internally,
1137  * 		the destination entry has not been dropped yet in the transmit
1138  * 		path. Therefore, the destination entry does not need to be
1139  * 		artificially held via **netif_keep_dst**\ () for a classful
1140  * 		qdisc until the *skb* is freed.
1141  *
1142  * 		This helper is available only if the kernel was compiled with
1143  * 		**CONFIG_IP_ROUTE_CLASSID** configuration option.
1144  * 	Return
1145  * 		The realm of the route for the packet associated to *skb*, or 0
1146  * 		if none was found.
1147  *
1148  * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
1149  * 	Description
1150  * 		Write raw *data* blob into a special BPF perf event held by
1151  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
1152  * 		event must have the following attributes: **PERF_SAMPLE_RAW**
1153  * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
1154  * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
1155  *
1156  * 		The *flags* are used to indicate the index in *map* for which
1157  * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
1158  * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
1159  * 		to indicate that the index of the current CPU core should be
1160  * 		used.
1161  *
1162  * 		The value to write, of *size*, is passed through eBPF stack and
1163  * 		pointed by *data*.
1164  *
1165  * 		The context of the program *ctx* needs also be passed to the
1166  * 		helper.
1167  *
1168  * 		On user space, a program willing to read the values needs to
1169  * 		call **perf_event_open**\ () on the perf event (either for
1170  * 		one or for all CPUs) and to store the file descriptor into the
1171  * 		*map*. This must be done before the eBPF program can send data
1172  * 		into it. An example is available in file
1173  * 		*samples/bpf/trace_output_user.c* in the Linux kernel source
1174  * 		tree (the eBPF program counterpart is in
1175  * 		*samples/bpf/trace_output_kern.c*).
1176  *
1177  * 		**bpf_perf_event_output**\ () achieves better performance
1178  * 		than **bpf_trace_printk**\ () for sharing data with user
1179  * 		space, and is much better suitable for streaming data from eBPF
1180  * 		programs.
1181  *
1182  * 		Note that this helper is not restricted to tracing use cases
1183  * 		and can be used with programs attached to TC or XDP as well,
1184  * 		where it allows for passing data to user space listeners. Data
1185  * 		can be:
1186  *
1187  * 		* Only custom structs,
1188  * 		* Only the packet payload, or
1189  * 		* A combination of both.
1190  * 	Return
1191  * 		0 on success, or a negative error in case of failure.
1192  *
1193  * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
1194  * 	Description
1195  * 		This helper was provided as an easy way to load data from a
1196  * 		packet. It can be used to load *len* bytes from *offset* from
1197  * 		the packet associated to *skb*, into the buffer pointed by
1198  * 		*to*.
1199  *
1200  * 		Since Linux 4.7, usage of this helper has mostly been replaced
1201  * 		by "direct packet access", enabling packet data to be
1202  * 		manipulated with *skb*\ **->data** and *skb*\ **->data_end**
1203  * 		pointing respectively to the first byte of packet data and to
1204  * 		the byte after the last byte of packet data. However, it
1205  * 		remains useful if one wishes to read large quantities of data
1206  * 		at once from a packet into the eBPF stack.
1207  * 	Return
1208  * 		0 on success, or a negative error in case of failure.
1209  *
1210  * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
1211  * 	Description
1212  * 		Walk a user or a kernel stack and return its id. To achieve
1213  * 		this, the helper needs *ctx*, which is a pointer to the context
1214  * 		on which the tracing program is executed, and a pointer to a
1215  * 		*map* of type **BPF_MAP_TYPE_STACK_TRACE**.
1216  *
1217  * 		The last argument, *flags*, holds the number of stack frames to
1218  * 		skip (from 0 to 255), masked with
1219  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1220  * 		a combination of the following flags:
1221  *
1222  * 		**BPF_F_USER_STACK**
1223  * 			Collect a user space stack instead of a kernel stack.
1224  * 		**BPF_F_FAST_STACK_CMP**
1225  * 			Compare stacks by hash only.
1226  * 		**BPF_F_REUSE_STACKID**
1227  * 			If two different stacks hash into the same *stackid*,
1228  * 			discard the old one.
1229  *
1230  * 		The stack id retrieved is a 32 bit long integer handle which
1231  * 		can be further combined with other data (including other stack
1232  * 		ids) and used as a key into maps. This can be useful for
1233  * 		generating a variety of graphs (such as flame graphs or off-cpu
1234  * 		graphs).
1235  *
1236  * 		For walking a stack, this helper is an improvement over
1237  * 		**bpf_probe_read**\ (), which can be used with unrolled loops
1238  * 		but is not efficient and consumes a lot of eBPF instructions.
1239  * 		Instead, **bpf_get_stackid**\ () can collect up to
1240  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
1241  * 		this limit can be controlled with the **sysctl** program, and
1242  * 		that it should be manually increased in order to profile long
1243  * 		user stacks (such as stacks for Java programs). To do so, use:
1244  *
1245  * 		::
1246  *
1247  * 			# sysctl kernel.perf_event_max_stack=<new value>
1248  * 	Return
1249  * 		The positive or null stack id on success, or a negative error
1250  * 		in case of failure.
1251  *
1252  * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
1253  * 	Description
1254  * 		Compute a checksum difference, from the raw buffer pointed by
1255  * 		*from*, of length *from_size* (that must be a multiple of 4),
1256  * 		towards the raw buffer pointed by *to*, of size *to_size*
1257  * 		(same remark). An optional *seed* can be added to the value
1258  * 		(this can be cascaded, the seed may come from a previous call
1259  * 		to the helper).
1260  *
1261  * 		This is flexible enough to be used in several ways:
1262  *
1263  * 		* With *from_size* == 0, *to_size* > 0 and *seed* set to
1264  * 		  checksum, it can be used when pushing new data.
1265  * 		* With *from_size* > 0, *to_size* == 0 and *seed* set to
1266  * 		  checksum, it can be used when removing data from a packet.
1267  * 		* With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1268  * 		  can be used to compute a diff. Note that *from_size* and
1269  * 		  *to_size* do not need to be equal.
1270  *
1271  * 		This helper can be used in combination with
1272  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1273  * 		which one can feed in the difference computed with
1274  * 		**bpf_csum_diff**\ ().
1275  * 	Return
1276  * 		The checksum result, or a negative error code in case of
1277  * 		failure.
1278  *
1279  * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1280  * 	Description
1281  * 		Retrieve tunnel options metadata for the packet associated to
1282  * 		*skb*, and store the raw tunnel option data to the buffer *opt*
1283  * 		of *size*.
1284  *
1285  * 		This helper can be used with encapsulation devices that can
1286  * 		operate in "collect metadata" mode (please refer to the related
1287  * 		note in the description of **bpf_skb_get_tunnel_key**\ () for
1288  * 		more details). A particular example where this can be used is
1289  * 		in combination with the Geneve encapsulation protocol, where it
1290  * 		allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1291  * 		and retrieving arbitrary TLVs (Type-Length-Value headers) from
1292  * 		the eBPF program. This allows for full customization of these
1293  * 		headers.
1294  * 	Return
1295  * 		The size of the option data retrieved.
1296  *
1297  * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1298  * 	Description
1299  * 		Set tunnel options metadata for the packet associated to *skb*
1300  * 		to the option data contained in the raw buffer *opt* of *size*.
1301  *
1302  * 		See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1303  * 		helper for additional information.
1304  * 	Return
1305  * 		0 on success, or a negative error in case of failure.
1306  *
1307  * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1308  * 	Description
1309  * 		Change the protocol of the *skb* to *proto*. Currently
1310  * 		supported are transition from IPv4 to IPv6, and from IPv6 to
1311  * 		IPv4. The helper takes care of the groundwork for the
1312  * 		transition, including resizing the socket buffer. The eBPF
1313  * 		program is expected to fill the new headers, if any, via
1314  * 		**skb_store_bytes**\ () and to recompute the checksums with
1315  * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1316  * 		(). The main case for this helper is to perform NAT64
1317  * 		operations out of an eBPF program.
1318  *
1319  * 		Internally, the GSO type is marked as dodgy so that headers are
1320  * 		checked and segments are recalculated by the GSO/GRO engine.
1321  * 		The size for GSO target is adapted as well.
1322  *
1323  * 		All values for *flags* are reserved for future usage, and must
1324  * 		be left at zero.
1325  *
1326  * 		A call to this helper is susceptible to change the underlying
1327  * 		packet buffer. Therefore, at load time, all checks on pointers
1328  * 		previously done by the verifier are invalidated and must be
1329  * 		performed again, if the helper is used in combination with
1330  * 		direct packet access.
1331  * 	Return
1332  * 		0 on success, or a negative error in case of failure.
1333  *
1334  * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1335  * 	Description
1336  * 		Change the packet type for the packet associated to *skb*. This
1337  * 		comes down to setting *skb*\ **->pkt_type** to *type*, except
1338  * 		the eBPF program does not have a write access to *skb*\
1339  * 		**->pkt_type** beside this helper. Using a helper here allows
1340  * 		for graceful handling of errors.
1341  *
1342  * 		The major use case is to change incoming *skb*s to
1343  * 		**PACKET_HOST** in a programmatic way instead of having to
1344  * 		recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1345  * 		example.
1346  *
1347  * 		Note that *type* only allows certain values. At this time, they
1348  * 		are:
1349  *
1350  * 		**PACKET_HOST**
1351  * 			Packet is for us.
1352  * 		**PACKET_BROADCAST**
1353  * 			Send packet to all.
1354  * 		**PACKET_MULTICAST**
1355  * 			Send packet to group.
1356  * 		**PACKET_OTHERHOST**
1357  * 			Send packet to someone else.
1358  * 	Return
1359  * 		0 on success, or a negative error in case of failure.
1360  *
1361  * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1362  * 	Description
1363  * 		Check whether *skb* is a descendant of the cgroup2 held by
1364  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1365  * 	Return
1366  * 		The return value depends on the result of the test, and can be:
1367  *
1368  * 		* 0, if the *skb* failed the cgroup2 descendant test.
1369  * 		* 1, if the *skb* succeeded the cgroup2 descendant test.
1370  * 		* A negative error code, if an error occurred.
1371  *
1372  * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1373  * 	Description
1374  * 		Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1375  * 		not set, in particular if the hash was cleared due to mangling,
1376  * 		recompute this hash. Later accesses to the hash can be done
1377  * 		directly with *skb*\ **->hash**.
1378  *
1379  * 		Calling **bpf_set_hash_invalid**\ (), changing a packet
1380  * 		prototype with **bpf_skb_change_proto**\ (), or calling
1381  * 		**bpf_skb_store_bytes**\ () with the
1382  * 		**BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1383  * 		the hash and to trigger a new computation for the next call to
1384  * 		**bpf_get_hash_recalc**\ ().
1385  * 	Return
1386  * 		The 32-bit hash.
1387  *
1388  * u64 bpf_get_current_task(void)
1389  * 	Return
1390  * 		A pointer to the current task struct.
1391  *
1392  * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1393  * 	Description
1394  * 		Attempt in a safe way to write *len* bytes from the buffer
1395  * 		*src* to *dst* in memory. It only works for threads that are in
1396  * 		user context, and *dst* must be a valid user space address.
1397  *
1398  * 		This helper should not be used to implement any kind of
1399  * 		security mechanism because of TOC-TOU attacks, but rather to
1400  * 		debug, divert, and manipulate execution of semi-cooperative
1401  * 		processes.
1402  *
1403  * 		Keep in mind that this feature is meant for experiments, and it
1404  * 		has a risk of crashing the system and running programs.
1405  * 		Therefore, when an eBPF program using this helper is attached,
1406  * 		a warning including PID and process name is printed to kernel
1407  * 		logs.
1408  * 	Return
1409  * 		0 on success, or a negative error in case of failure.
1410  *
1411  * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1412  * 	Description
1413  * 		Check whether the probe is being run is the context of a given
1414  * 		subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1415  * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1416  * 	Return
1417  * 		The return value depends on the result of the test, and can be:
1418  *
1419  * 		* 0, if the *skb* task belongs to the cgroup2.
1420  * 		* 1, if the *skb* task does not belong to the cgroup2.
1421  * 		* A negative error code, if an error occurred.
1422  *
1423  * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1424  * 	Description
1425  * 		Resize (trim or grow) the packet associated to *skb* to the
1426  * 		new *len*. The *flags* are reserved for future usage, and must
1427  * 		be left at zero.
1428  *
1429  * 		The basic idea is that the helper performs the needed work to
1430  * 		change the size of the packet, then the eBPF program rewrites
1431  * 		the rest via helpers like **bpf_skb_store_bytes**\ (),
1432  * 		**bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1433  * 		and others. This helper is a slow path utility intended for
1434  * 		replies with control messages. And because it is targeted for
1435  * 		slow path, the helper itself can afford to be slow: it
1436  * 		implicitly linearizes, unclones and drops offloads from the
1437  * 		*skb*.
1438  *
1439  * 		A call to this helper is susceptible to change the underlying
1440  * 		packet buffer. Therefore, at load time, all checks on pointers
1441  * 		previously done by the verifier are invalidated and must be
1442  * 		performed again, if the helper is used in combination with
1443  * 		direct packet access.
1444  * 	Return
1445  * 		0 on success, or a negative error in case of failure.
1446  *
1447  * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1448  * 	Description
1449  * 		Pull in non-linear data in case the *skb* is non-linear and not
1450  * 		all of *len* are part of the linear section. Make *len* bytes
1451  * 		from *skb* readable and writable. If a zero value is passed for
1452  * 		*len*, then the whole length of the *skb* is pulled.
1453  *
1454  * 		This helper is only needed for reading and writing with direct
1455  * 		packet access.
1456  *
1457  * 		For direct packet access, testing that offsets to access
1458  * 		are within packet boundaries (test on *skb*\ **->data_end**) is
1459  * 		susceptible to fail if offsets are invalid, or if the requested
1460  * 		data is in non-linear parts of the *skb*. On failure the
1461  * 		program can just bail out, or in the case of a non-linear
1462  * 		buffer, use a helper to make the data available. The
1463  * 		**bpf_skb_load_bytes**\ () helper is a first solution to access
1464  * 		the data. Another one consists in using **bpf_skb_pull_data**
1465  * 		to pull in once the non-linear parts, then retesting and
1466  * 		eventually access the data.
1467  *
1468  * 		At the same time, this also makes sure the *skb* is uncloned,
1469  * 		which is a necessary condition for direct write. As this needs
1470  * 		to be an invariant for the write part only, the verifier
1471  * 		detects writes and adds a prologue that is calling
1472  * 		**bpf_skb_pull_data()** to effectively unclone the *skb* from
1473  * 		the very beginning in case it is indeed cloned.
1474  *
1475  * 		A call to this helper is susceptible to change the underlying
1476  * 		packet buffer. Therefore, at load time, all checks on pointers
1477  * 		previously done by the verifier are invalidated and must be
1478  * 		performed again, if the helper is used in combination with
1479  * 		direct packet access.
1480  * 	Return
1481  * 		0 on success, or a negative error in case of failure.
1482  *
1483  * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1484  * 	Description
1485  * 		Add the checksum *csum* into *skb*\ **->csum** in case the
1486  * 		driver has supplied a checksum for the entire packet into that
1487  * 		field. Return an error otherwise. This helper is intended to be
1488  * 		used in combination with **bpf_csum_diff**\ (), in particular
1489  * 		when the checksum needs to be updated after data has been
1490  * 		written into the packet through direct packet access.
1491  * 	Return
1492  * 		The checksum on success, or a negative error code in case of
1493  * 		failure.
1494  *
1495  * void bpf_set_hash_invalid(struct sk_buff *skb)
1496  * 	Description
1497  * 		Invalidate the current *skb*\ **->hash**. It can be used after
1498  * 		mangling on headers through direct packet access, in order to
1499  * 		indicate that the hash is outdated and to trigger a
1500  * 		recalculation the next time the kernel tries to access this
1501  * 		hash or when the **bpf_get_hash_recalc**\ () helper is called.
1502  *
1503  * int bpf_get_numa_node_id(void)
1504  * 	Description
1505  * 		Return the id of the current NUMA node. The primary use case
1506  * 		for this helper is the selection of sockets for the local NUMA
1507  * 		node, when the program is attached to sockets using the
1508  * 		**SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1509  * 		but the helper is also available to other eBPF program types,
1510  * 		similarly to **bpf_get_smp_processor_id**\ ().
1511  * 	Return
1512  * 		The id of current NUMA node.
1513  *
1514  * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1515  * 	Description
1516  * 		Grows headroom of packet associated to *skb* and adjusts the
1517  * 		offset of the MAC header accordingly, adding *len* bytes of
1518  * 		space. It automatically extends and reallocates memory as
1519  * 		required.
1520  *
1521  * 		This helper can be used on a layer 3 *skb* to push a MAC header
1522  * 		for redirection into a layer 2 device.
1523  *
1524  * 		All values for *flags* are reserved for future usage, and must
1525  * 		be left at zero.
1526  *
1527  * 		A call to this helper is susceptible to change the underlying
1528  * 		packet buffer. Therefore, at load time, all checks on pointers
1529  * 		previously done by the verifier are invalidated and must be
1530  * 		performed again, if the helper is used in combination with
1531  * 		direct packet access.
1532  * 	Return
1533  * 		0 on success, or a negative error in case of failure.
1534  *
1535  * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1536  * 	Description
1537  * 		Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1538  * 		it is possible to use a negative value for *delta*. This helper
1539  * 		can be used to prepare the packet for pushing or popping
1540  * 		headers.
1541  *
1542  * 		A call to this helper is susceptible to change the underlying
1543  * 		packet buffer. Therefore, at load time, all checks on pointers
1544  * 		previously done by the verifier are invalidated and must be
1545  * 		performed again, if the helper is used in combination with
1546  * 		direct packet access.
1547  * 	Return
1548  * 		0 on success, or a negative error in case of failure.
1549  *
1550  * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
1551  * 	Description
1552  * 		Copy a NUL terminated string from an unsafe kernel address
1553  * 		*unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
1554  * 		more details.
1555  *
1556  * 		Generally, use **bpf_probe_read_user_str**\ () or
1557  * 		**bpf_probe_read_kernel_str**\ () instead.
1558  * 	Return
1559  * 		On success, the strictly positive length of the string,
1560  * 		including the trailing NUL character. On error, a negative
1561  * 		value.
1562  *
1563  * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1564  * 	Description
1565  * 		If the **struct sk_buff** pointed by *skb* has a known socket,
1566  * 		retrieve the cookie (generated by the kernel) of this socket.
1567  * 		If no cookie has been set yet, generate a new cookie. Once
1568  * 		generated, the socket cookie remains stable for the life of the
1569  * 		socket. This helper can be useful for monitoring per socket
1570  * 		networking traffic statistics as it provides a global socket
1571  * 		identifier that can be assumed unique.
1572  * 	Return
1573  * 		A 8-byte long non-decreasing number on success, or 0 if the
1574  * 		socket field is missing inside *skb*.
1575  *
1576  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1577  * 	Description
1578  * 		Equivalent to bpf_get_socket_cookie() helper that accepts
1579  * 		*skb*, but gets socket from **struct bpf_sock_addr** context.
1580  * 	Return
1581  * 		A 8-byte long non-decreasing number.
1582  *
1583  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1584  * 	Description
1585  * 		Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
1586  * 		*skb*, but gets socket from **struct bpf_sock_ops** context.
1587  * 	Return
1588  * 		A 8-byte long non-decreasing number.
1589  *
1590  * u32 bpf_get_socket_uid(struct sk_buff *skb)
1591  * 	Return
1592  * 		The owner UID of the socket associated to *skb*. If the socket
1593  * 		is **NULL**, or if it is not a full socket (i.e. if it is a
1594  * 		time-wait or a request socket instead), **overflowuid** value
1595  * 		is returned (note that **overflowuid** might also be the actual
1596  * 		UID value for the socket).
1597  *
1598  * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1599  * 	Description
1600  * 		Set the full hash for *skb* (set the field *skb*\ **->hash**)
1601  * 		to value *hash*.
1602  * 	Return
1603  * 		0
1604  *
1605  * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
1606  * 	Description
1607  * 		Emulate a call to **setsockopt()** on the socket associated to
1608  * 		*bpf_socket*, which must be a full socket. The *level* at
1609  * 		which the option resides and the name *optname* of the option
1610  * 		must be specified, see **setsockopt(2)** for more information.
1611  * 		The option value of length *optlen* is pointed by *optval*.
1612  *
1613  * 		*bpf_socket* should be one of the following:
1614  *
1615  * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
1616  * 		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
1617  * 		  and **BPF_CGROUP_INET6_CONNECT**.
1618  *
1619  * 		This helper actually implements a subset of **setsockopt()**.
1620  * 		It supports the following *level*\ s:
1621  *
1622  * 		* **SOL_SOCKET**, which supports the following *optname*\ s:
1623  * 		  **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1624  * 		  **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1625  * 		* **IPPROTO_TCP**, which supports the following *optname*\ s:
1626  * 		  **TCP_CONGESTION**, **TCP_BPF_IW**,
1627  * 		  **TCP_BPF_SNDCWND_CLAMP**.
1628  * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1629  * 		* **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1630  * 	Return
1631  * 		0 on success, or a negative error in case of failure.
1632  *
1633  * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
1634  * 	Description
1635  * 		Grow or shrink the room for data in the packet associated to
1636  * 		*skb* by *len_diff*, and according to the selected *mode*.
1637  *
1638  * 		By default, the helper will reset any offloaded checksum
1639  * 		indicator of the skb to CHECKSUM_NONE. This can be avoided
1640  * 		by the following flag:
1641  *
1642  * 		* **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
1643  * 		  checksum data of the skb to CHECKSUM_NONE.
1644  *
1645  *		There are two supported modes at this time:
1646  *
1647  *		* **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
1648  *		  (room space is added or removed below the layer 2 header).
1649  *
1650  * 		* **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1651  * 		  (room space is added or removed below the layer 3 header).
1652  *
1653  *		The following flags are supported at this time:
1654  *
1655  *		* **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
1656  *		  Adjusting mss in this way is not allowed for datagrams.
1657  *
1658  *		* **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
1659  *		  **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
1660  *		  Any new space is reserved to hold a tunnel header.
1661  *		  Configure skb offsets and other fields accordingly.
1662  *
1663  *		* **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
1664  *		  **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
1665  *		  Use with ENCAP_L3 flags to further specify the tunnel type.
1666  *
1667  *		* **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
1668  *		  Use with ENCAP_L3/L4 flags to further specify the tunnel
1669  *		  type; *len* is the length of the inner MAC header.
1670  *
1671  * 		A call to this helper is susceptible to change the underlying
1672  * 		packet buffer. Therefore, at load time, all checks on pointers
1673  * 		previously done by the verifier are invalidated and must be
1674  * 		performed again, if the helper is used in combination with
1675  * 		direct packet access.
1676  * 	Return
1677  * 		0 on success, or a negative error in case of failure.
1678  *
1679  * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1680  * 	Description
1681  * 		Redirect the packet to the endpoint referenced by *map* at
1682  * 		index *key*. Depending on its type, this *map* can contain
1683  * 		references to net devices (for forwarding packets through other
1684  * 		ports), or to CPUs (for redirecting XDP frames to another CPU;
1685  * 		but this is only implemented for native XDP (with driver
1686  * 		support) as of this writing).
1687  *
1688  * 		The lower two bits of *flags* are used as the return code if
1689  * 		the map lookup fails. This is so that the return value can be
1690  * 		one of the XDP program return codes up to **XDP_TX**, as chosen
1691  * 		by the caller. Any higher bits in the *flags* argument must be
1692  * 		unset.
1693  *
1694  * 		See also **bpf_redirect**\ (), which only supports redirecting
1695  * 		to an ifindex, but doesn't require a map to do so.
1696  * 	Return
1697  * 		**XDP_REDIRECT** on success, or the value of the two lower bits
1698  * 		of the *flags* argument on error.
1699  *
1700  * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
1701  * 	Description
1702  * 		Redirect the packet to the socket referenced by *map* (of type
1703  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1704  * 		egress interfaces can be used for redirection. The
1705  * 		**BPF_F_INGRESS** value in *flags* is used to make the
1706  * 		distinction (ingress path is selected if the flag is present,
1707  * 		egress path otherwise). This is the only flag supported for now.
1708  * 	Return
1709  * 		**SK_PASS** on success, or **SK_DROP** on error.
1710  *
1711  * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
1712  * 	Description
1713  * 		Add an entry to, or update a *map* referencing sockets. The
1714  * 		*skops* is used as a new value for the entry associated to
1715  * 		*key*. *flags* is one of:
1716  *
1717  * 		**BPF_NOEXIST**
1718  * 			The entry for *key* must not exist in the map.
1719  * 		**BPF_EXIST**
1720  * 			The entry for *key* must already exist in the map.
1721  * 		**BPF_ANY**
1722  * 			No condition on the existence of the entry for *key*.
1723  *
1724  * 		If the *map* has eBPF programs (parser and verdict), those will
1725  * 		be inherited by the socket being added. If the socket is
1726  * 		already attached to eBPF programs, this results in an error.
1727  * 	Return
1728  * 		0 on success, or a negative error in case of failure.
1729  *
1730  * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1731  * 	Description
1732  * 		Adjust the address pointed by *xdp_md*\ **->data_meta** by
1733  * 		*delta* (which can be positive or negative). Note that this
1734  * 		operation modifies the address stored in *xdp_md*\ **->data**,
1735  * 		so the latter must be loaded only after the helper has been
1736  * 		called.
1737  *
1738  * 		The use of *xdp_md*\ **->data_meta** is optional and programs
1739  * 		are not required to use it. The rationale is that when the
1740  * 		packet is processed with XDP (e.g. as DoS filter), it is
1741  * 		possible to push further meta data along with it before passing
1742  * 		to the stack, and to give the guarantee that an ingress eBPF
1743  * 		program attached as a TC classifier on the same device can pick
1744  * 		this up for further post-processing. Since TC works with socket
1745  * 		buffers, it remains possible to set from XDP the **mark** or
1746  * 		**priority** pointers, or other pointers for the socket buffer.
1747  * 		Having this scratch space generic and programmable allows for
1748  * 		more flexibility as the user is free to store whatever meta
1749  * 		data they need.
1750  *
1751  * 		A call to this helper is susceptible to change the underlying
1752  * 		packet buffer. Therefore, at load time, all checks on pointers
1753  * 		previously done by the verifier are invalidated and must be
1754  * 		performed again, if the helper is used in combination with
1755  * 		direct packet access.
1756  * 	Return
1757  * 		0 on success, or a negative error in case of failure.
1758  *
1759  * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1760  * 	Description
1761  * 		Read the value of a perf event counter, and store it into *buf*
1762  * 		of size *buf_size*. This helper relies on a *map* of type
1763  * 		**BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1764  * 		counter is selected when *map* is updated with perf event file
1765  * 		descriptors. The *map* is an array whose size is the number of
1766  * 		available CPUs, and each cell contains a value relative to one
1767  * 		CPU. The value to retrieve is indicated by *flags*, that
1768  * 		contains the index of the CPU to look up, masked with
1769  * 		**BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1770  * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
1771  * 		current CPU should be retrieved.
1772  *
1773  * 		This helper behaves in a way close to
1774  * 		**bpf_perf_event_read**\ () helper, save that instead of
1775  * 		just returning the value observed, it fills the *buf*
1776  * 		structure. This allows for additional data to be retrieved: in
1777  * 		particular, the enabled and running times (in *buf*\
1778  * 		**->enabled** and *buf*\ **->running**, respectively) are
1779  * 		copied. In general, **bpf_perf_event_read_value**\ () is
1780  * 		recommended over **bpf_perf_event_read**\ (), which has some
1781  * 		ABI issues and provides fewer functionalities.
1782  *
1783  * 		These values are interesting, because hardware PMU (Performance
1784  * 		Monitoring Unit) counters are limited resources. When there are
1785  * 		more PMU based perf events opened than available counters,
1786  * 		kernel will multiplex these events so each event gets certain
1787  * 		percentage (but not all) of the PMU time. In case that
1788  * 		multiplexing happens, the number of samples or counter value
1789  * 		will not reflect the case compared to when no multiplexing
1790  * 		occurs. This makes comparison between different runs difficult.
1791  * 		Typically, the counter value should be normalized before
1792  * 		comparing to other experiments. The usual normalization is done
1793  * 		as follows.
1794  *
1795  * 		::
1796  *
1797  * 			normalized_counter = counter * t_enabled / t_running
1798  *
1799  * 		Where t_enabled is the time enabled for event and t_running is
1800  * 		the time running for event since last normalization. The
1801  * 		enabled and running times are accumulated since the perf event
1802  * 		open. To achieve scaling factor between two invocations of an
1803  * 		eBPF program, users can use CPU id as the key (which is
1804  * 		typical for perf array usage model) to remember the previous
1805  * 		value and do the calculation inside the eBPF program.
1806  * 	Return
1807  * 		0 on success, or a negative error in case of failure.
1808  *
1809  * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
1810  * 	Description
1811  * 		For en eBPF program attached to a perf event, retrieve the
1812  * 		value of the event counter associated to *ctx* and store it in
1813  * 		the structure pointed by *buf* and of size *buf_size*. Enabled
1814  * 		and running times are also stored in the structure (see
1815  * 		description of helper **bpf_perf_event_read_value**\ () for
1816  * 		more details).
1817  * 	Return
1818  * 		0 on success, or a negative error in case of failure.
1819  *
1820  * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
1821  * 	Description
1822  * 		Emulate a call to **getsockopt()** on the socket associated to
1823  * 		*bpf_socket*, which must be a full socket. The *level* at
1824  * 		which the option resides and the name *optname* of the option
1825  * 		must be specified, see **getsockopt(2)** for more information.
1826  * 		The retrieved value is stored in the structure pointed by
1827  * 		*opval* and of length *optlen*.
1828  *
1829  * 		*bpf_socket* should be one of the following:
1830  *
1831  * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
1832  * 		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
1833  * 		  and **BPF_CGROUP_INET6_CONNECT**.
1834  *
1835  * 		This helper actually implements a subset of **getsockopt()**.
1836  * 		It supports the following *level*\ s:
1837  *
1838  * 		* **IPPROTO_TCP**, which supports *optname*
1839  * 		  **TCP_CONGESTION**.
1840  * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1841  * 		* **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1842  * 	Return
1843  * 		0 on success, or a negative error in case of failure.
1844  *
1845  * int bpf_override_return(struct pt_regs *regs, u64 rc)
1846  * 	Description
1847  * 		Used for error injection, this helper uses kprobes to override
1848  * 		the return value of the probed function, and to set it to *rc*.
1849  * 		The first argument is the context *regs* on which the kprobe
1850  * 		works.
1851  *
1852  * 		This helper works by setting the PC (program counter)
1853  * 		to an override function which is run in place of the original
1854  * 		probed function. This means the probed function is not run at
1855  * 		all. The replacement function just returns with the required
1856  * 		value.
1857  *
1858  * 		This helper has security implications, and thus is subject to
1859  * 		restrictions. It is only available if the kernel was compiled
1860  * 		with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1861  * 		option, and in this case it only works on functions tagged with
1862  * 		**ALLOW_ERROR_INJECTION** in the kernel code.
1863  *
1864  * 		Also, the helper is only available for the architectures having
1865  * 		the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1866  * 		x86 architecture is the only one to support this feature.
1867  * 	Return
1868  * 		0
1869  *
1870  * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
1871  * 	Description
1872  * 		Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1873  * 		for the full TCP socket associated to *bpf_sock_ops* to
1874  * 		*argval*.
1875  *
1876  * 		The primary use of this field is to determine if there should
1877  * 		be calls to eBPF programs of type
1878  * 		**BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1879  * 		code. A program of the same type can change its value, per
1880  * 		connection and as necessary, when the connection is
1881  * 		established. This field is directly accessible for reading, but
1882  * 		this helper must be used for updates in order to return an
1883  * 		error if an eBPF program tries to set a callback that is not
1884  * 		supported in the current kernel.
1885  *
1886  * 		*argval* is a flag array which can combine these flags:
1887  *
1888  * 		* **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1889  * 		* **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1890  * 		* **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1891  * 		* **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
1892  *
1893  * 		Therefore, this function can be used to clear a callback flag by
1894  * 		setting the appropriate bit to zero. e.g. to disable the RTO
1895  * 		callback:
1896  *
1897  * 		**bpf_sock_ops_cb_flags_set(bpf_sock,**
1898  * 			**bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
1899  *
1900  * 		Here are some examples of where one could call such eBPF
1901  * 		program:
1902  *
1903  * 		* When RTO fires.
1904  * 		* When a packet is retransmitted.
1905  * 		* When the connection terminates.
1906  * 		* When a packet is sent.
1907  * 		* When a packet is received.
1908  * 	Return
1909  * 		Code **-EINVAL** if the socket is not a full TCP socket;
1910  * 		otherwise, a positive number containing the bits that could not
1911  * 		be set is returned (which comes down to 0 if all bits were set
1912  * 		as required).
1913  *
1914  * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1915  * 	Description
1916  * 		This helper is used in programs implementing policies at the
1917  * 		socket level. If the message *msg* is allowed to pass (i.e. if
1918  * 		the verdict eBPF program returns **SK_PASS**), redirect it to
1919  * 		the socket referenced by *map* (of type
1920  * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1921  * 		egress interfaces can be used for redirection. The
1922  * 		**BPF_F_INGRESS** value in *flags* is used to make the
1923  * 		distinction (ingress path is selected if the flag is present,
1924  * 		egress path otherwise). This is the only flag supported for now.
1925  * 	Return
1926  * 		**SK_PASS** on success, or **SK_DROP** on error.
1927  *
1928  * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1929  * 	Description
1930  * 		For socket policies, apply the verdict of the eBPF program to
1931  * 		the next *bytes* (number of bytes) of message *msg*.
1932  *
1933  * 		For example, this helper can be used in the following cases:
1934  *
1935  * 		* A single **sendmsg**\ () or **sendfile**\ () system call
1936  * 		  contains multiple logical messages that the eBPF program is
1937  * 		  supposed to read and for which it should apply a verdict.
1938  * 		* An eBPF program only cares to read the first *bytes* of a
1939  * 		  *msg*. If the message has a large payload, then setting up
1940  * 		  and calling the eBPF program repeatedly for all bytes, even
1941  * 		  though the verdict is already known, would create unnecessary
1942  * 		  overhead.
1943  *
1944  * 		When called from within an eBPF program, the helper sets a
1945  * 		counter internal to the BPF infrastructure, that is used to
1946  * 		apply the last verdict to the next *bytes*. If *bytes* is
1947  * 		smaller than the current data being processed from a
1948  * 		**sendmsg**\ () or **sendfile**\ () system call, the first
1949  * 		*bytes* will be sent and the eBPF program will be re-run with
1950  * 		the pointer for start of data pointing to byte number *bytes*
1951  * 		**+ 1**. If *bytes* is larger than the current data being
1952  * 		processed, then the eBPF verdict will be applied to multiple
1953  * 		**sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1954  * 		consumed.
1955  *
1956  * 		Note that if a socket closes with the internal counter holding
1957  * 		a non-zero value, this is not a problem because data is not
1958  * 		being buffered for *bytes* and is sent as it is received.
1959  * 	Return
1960  * 		0
1961  *
1962  * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1963  * 	Description
1964  * 		For socket policies, prevent the execution of the verdict eBPF
1965  * 		program for message *msg* until *bytes* (byte number) have been
1966  * 		accumulated.
1967  *
1968  * 		This can be used when one needs a specific number of bytes
1969  * 		before a verdict can be assigned, even if the data spans
1970  * 		multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1971  * 		case would be a user calling **sendmsg**\ () repeatedly with
1972  * 		1-byte long message segments. Obviously, this is bad for
1973  * 		performance, but it is still valid. If the eBPF program needs
1974  * 		*bytes* bytes to validate a header, this helper can be used to
1975  * 		prevent the eBPF program to be called again until *bytes* have
1976  * 		been accumulated.
1977  * 	Return
1978  * 		0
1979  *
1980  * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1981  * 	Description
1982  * 		For socket policies, pull in non-linear data from user space
1983  * 		for *msg* and set pointers *msg*\ **->data** and *msg*\
1984  * 		**->data_end** to *start* and *end* bytes offsets into *msg*,
1985  * 		respectively.
1986  *
1987  * 		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1988  * 		*msg* it can only parse data that the (**data**, **data_end**)
1989  * 		pointers have already consumed. For **sendmsg**\ () hooks this
1990  * 		is likely the first scatterlist element. But for calls relying
1991  * 		on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1992  * 		be the range (**0**, **0**) because the data is shared with
1993  * 		user space and by default the objective is to avoid allowing
1994  * 		user space to modify data while (or after) eBPF verdict is
1995  * 		being decided. This helper can be used to pull in data and to
1996  * 		set the start and end pointer to given values. Data will be
1997  * 		copied if necessary (i.e. if data was not linear and if start
1998  * 		and end pointers do not point to the same chunk).
1999  *
2000  * 		A call to this helper is susceptible to change the underlying
2001  * 		packet buffer. Therefore, at load time, all checks on pointers
2002  * 		previously done by the verifier are invalidated and must be
2003  * 		performed again, if the helper is used in combination with
2004  * 		direct packet access.
2005  *
2006  * 		All values for *flags* are reserved for future usage, and must
2007  * 		be left at zero.
2008  * 	Return
2009  * 		0 on success, or a negative error in case of failure.
2010  *
2011  * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
2012  * 	Description
2013  * 		Bind the socket associated to *ctx* to the address pointed by
2014  * 		*addr*, of length *addr_len*. This allows for making outgoing
2015  * 		connection from the desired IP address, which can be useful for
2016  * 		example when all processes inside a cgroup should use one
2017  * 		single IP address on a host that has multiple IP configured.
2018  *
2019  * 		This helper works for IPv4 and IPv6, TCP and UDP sockets. The
2020  * 		domain (*addr*\ **->sa_family**) must be **AF_INET** (or
2021  * 		**AF_INET6**). It's advised to pass zero port (**sin_port**
2022  * 		or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
2023  * 		behavior and lets the kernel efficiently pick up an unused
2024  * 		port as long as 4-tuple is unique. Passing non-zero port might
2025  * 		lead to degraded performance.
2026  * 	Return
2027  * 		0 on success, or a negative error in case of failure.
2028  *
2029  * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
2030  * 	Description
2031  * 		Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
2032  * 		possible to both shrink and grow the packet tail.
2033  * 		Shrink done via *delta* being a negative integer.
2034  *
2035  * 		A call to this helper is susceptible to change the underlying
2036  * 		packet buffer. Therefore, at load time, all checks on pointers
2037  * 		previously done by the verifier are invalidated and must be
2038  * 		performed again, if the helper is used in combination with
2039  * 		direct packet access.
2040  * 	Return
2041  * 		0 on success, or a negative error in case of failure.
2042  *
2043  * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
2044  * 	Description
2045  * 		Retrieve the XFRM state (IP transform framework, see also
2046  * 		**ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
2047  *
2048  * 		The retrieved value is stored in the **struct bpf_xfrm_state**
2049  * 		pointed by *xfrm_state* and of length *size*.
2050  *
2051  * 		All values for *flags* are reserved for future usage, and must
2052  * 		be left at zero.
2053  *
2054  * 		This helper is available only if the kernel was compiled with
2055  * 		**CONFIG_XFRM** configuration option.
2056  * 	Return
2057  * 		0 on success, or a negative error in case of failure.
2058  *
2059  * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
2060  * 	Description
2061  * 		Return a user or a kernel stack in bpf program provided buffer.
2062  * 		To achieve this, the helper needs *ctx*, which is a pointer
2063  * 		to the context on which the tracing program is executed.
2064  * 		To store the stacktrace, the bpf program provides *buf* with
2065  * 		a nonnegative *size*.
2066  *
2067  * 		The last argument, *flags*, holds the number of stack frames to
2068  * 		skip (from 0 to 255), masked with
2069  * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2070  * 		the following flags:
2071  *
2072  * 		**BPF_F_USER_STACK**
2073  * 			Collect a user space stack instead of a kernel stack.
2074  * 		**BPF_F_USER_BUILD_ID**
2075  * 			Collect buildid+offset instead of ips for user stack,
2076  * 			only valid if **BPF_F_USER_STACK** is also specified.
2077  *
2078  * 		**bpf_get_stack**\ () can collect up to
2079  * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
2080  * 		to sufficient large buffer size. Note that
2081  * 		this limit can be controlled with the **sysctl** program, and
2082  * 		that it should be manually increased in order to profile long
2083  * 		user stacks (such as stacks for Java programs). To do so, use:
2084  *
2085  * 		::
2086  *
2087  * 			# sysctl kernel.perf_event_max_stack=<new value>
2088  * 	Return
2089  * 		A non-negative value equal to or less than *size* on success,
2090  * 		or a negative error in case of failure.
2091  *
2092  * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
2093  * 	Description
2094  * 		This helper is similar to **bpf_skb_load_bytes**\ () in that
2095  * 		it provides an easy way to load *len* bytes from *offset*
2096  * 		from the packet associated to *skb*, into the buffer pointed
2097  * 		by *to*. The difference to **bpf_skb_load_bytes**\ () is that
2098  * 		a fifth argument *start_header* exists in order to select a
2099  * 		base offset to start from. *start_header* can be one of:
2100  *
2101  * 		**BPF_HDR_START_MAC**
2102  * 			Base offset to load data from is *skb*'s mac header.
2103  * 		**BPF_HDR_START_NET**
2104  * 			Base offset to load data from is *skb*'s network header.
2105  *
2106  * 		In general, "direct packet access" is the preferred method to
2107  * 		access packet data, however, this helper is in particular useful
2108  * 		in socket filters where *skb*\ **->data** does not always point
2109  * 		to the start of the mac header and where "direct packet access"
2110  * 		is not available.
2111  * 	Return
2112  * 		0 on success, or a negative error in case of failure.
2113  *
2114  * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
2115  *	Description
2116  *		Do FIB lookup in kernel tables using parameters in *params*.
2117  *		If lookup is successful and result shows packet is to be
2118  *		forwarded, the neighbor tables are searched for the nexthop.
2119  *		If successful (ie., FIB lookup shows forwarding and nexthop
2120  *		is resolved), the nexthop address is returned in ipv4_dst
2121  *		or ipv6_dst based on family, smac is set to mac address of
2122  *		egress device, dmac is set to nexthop mac address, rt_metric
2123  *		is set to metric from route (IPv4/IPv6 only), and ifindex
2124  *		is set to the device index of the nexthop from the FIB lookup.
2125  *
2126  *		*plen* argument is the size of the passed in struct.
2127  *		*flags* argument can be a combination of one or more of the
2128  *		following values:
2129  *
2130  *		**BPF_FIB_LOOKUP_DIRECT**
2131  *			Do a direct table lookup vs full lookup using FIB
2132  *			rules.
2133  *		**BPF_FIB_LOOKUP_OUTPUT**
2134  *			Perform lookup from an egress perspective (default is
2135  *			ingress).
2136  *
2137  *		*ctx* is either **struct xdp_md** for XDP programs or
2138  *		**struct sk_buff** tc cls_act programs.
2139  *	Return
2140  *		* < 0 if any input argument is invalid
2141  *		*   0 on success (packet is forwarded, nexthop neighbor exists)
2142  *		* > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
2143  *		  packet is not forwarded or needs assist from full stack
2144  *
2145  * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2146  *	Description
2147  *		Add an entry to, or update a sockhash *map* referencing sockets.
2148  *		The *skops* is used as a new value for the entry associated to
2149  *		*key*. *flags* is one of:
2150  *
2151  *		**BPF_NOEXIST**
2152  *			The entry for *key* must not exist in the map.
2153  *		**BPF_EXIST**
2154  *			The entry for *key* must already exist in the map.
2155  *		**BPF_ANY**
2156  *			No condition on the existence of the entry for *key*.
2157  *
2158  *		If the *map* has eBPF programs (parser and verdict), those will
2159  *		be inherited by the socket being added. If the socket is
2160  *		already attached to eBPF programs, this results in an error.
2161  *	Return
2162  *		0 on success, or a negative error in case of failure.
2163  *
2164  * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
2165  *	Description
2166  *		This helper is used in programs implementing policies at the
2167  *		socket level. If the message *msg* is allowed to pass (i.e. if
2168  *		the verdict eBPF program returns **SK_PASS**), redirect it to
2169  *		the socket referenced by *map* (of type
2170  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2171  *		egress interfaces can be used for redirection. The
2172  *		**BPF_F_INGRESS** value in *flags* is used to make the
2173  *		distinction (ingress path is selected if the flag is present,
2174  *		egress path otherwise). This is the only flag supported for now.
2175  *	Return
2176  *		**SK_PASS** on success, or **SK_DROP** on error.
2177  *
2178  * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
2179  *	Description
2180  *		This helper is used in programs implementing policies at the
2181  *		skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
2182  *		if the verdeict eBPF program returns **SK_PASS**), redirect it
2183  *		to the socket referenced by *map* (of type
2184  *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2185  *		egress interfaces can be used for redirection. The
2186  *		**BPF_F_INGRESS** value in *flags* is used to make the
2187  *		distinction (ingress path is selected if the flag is present,
2188  *		egress otherwise). This is the only flag supported for now.
2189  *	Return
2190  *		**SK_PASS** on success, or **SK_DROP** on error.
2191  *
2192  * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
2193  *	Description
2194  *		Encapsulate the packet associated to *skb* within a Layer 3
2195  *		protocol header. This header is provided in the buffer at
2196  *		address *hdr*, with *len* its size in bytes. *type* indicates
2197  *		the protocol of the header and can be one of:
2198  *
2199  *		**BPF_LWT_ENCAP_SEG6**
2200  *			IPv6 encapsulation with Segment Routing Header
2201  *			(**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
2202  *			the IPv6 header is computed by the kernel.
2203  *		**BPF_LWT_ENCAP_SEG6_INLINE**
2204  *			Only works if *skb* contains an IPv6 packet. Insert a
2205  *			Segment Routing Header (**struct ipv6_sr_hdr**) inside
2206  *			the IPv6 header.
2207  *		**BPF_LWT_ENCAP_IP**
2208  *			IP encapsulation (GRE/GUE/IPIP/etc). The outer header
2209  *			must be IPv4 or IPv6, followed by zero or more
2210  *			additional headers, up to **LWT_BPF_MAX_HEADROOM**
2211  *			total bytes in all prepended headers. Please note that
2212  *			if **skb_is_gso**\ (*skb*) is true, no more than two
2213  *			headers can be prepended, and the inner header, if
2214  *			present, should be either GRE or UDP/GUE.
2215  *
2216  *		**BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
2217  *		of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
2218  *		be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
2219  *		**BPF_PROG_TYPE_LWT_XMIT**.
2220  *
2221  * 		A call to this helper is susceptible to change the underlying
2222  * 		packet buffer. Therefore, at load time, all checks on pointers
2223  * 		previously done by the verifier are invalidated and must be
2224  * 		performed again, if the helper is used in combination with
2225  * 		direct packet access.
2226  *	Return
2227  * 		0 on success, or a negative error in case of failure.
2228  *
2229  * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
2230  *	Description
2231  *		Store *len* bytes from address *from* into the packet
2232  *		associated to *skb*, at *offset*. Only the flags, tag and TLVs
2233  *		inside the outermost IPv6 Segment Routing Header can be
2234  *		modified through this helper.
2235  *
2236  * 		A call to this helper is susceptible to change the underlying
2237  * 		packet buffer. Therefore, at load time, all checks on pointers
2238  * 		previously done by the verifier are invalidated and must be
2239  * 		performed again, if the helper is used in combination with
2240  * 		direct packet access.
2241  *	Return
2242  * 		0 on success, or a negative error in case of failure.
2243  *
2244  * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
2245  *	Description
2246  *		Adjust the size allocated to TLVs in the outermost IPv6
2247  *		Segment Routing Header contained in the packet associated to
2248  *		*skb*, at position *offset* by *delta* bytes. Only offsets
2249  *		after the segments are accepted. *delta* can be as well
2250  *		positive (growing) as negative (shrinking).
2251  *
2252  * 		A call to this helper is susceptible to change the underlying
2253  * 		packet buffer. Therefore, at load time, all checks on pointers
2254  * 		previously done by the verifier are invalidated and must be
2255  * 		performed again, if the helper is used in combination with
2256  * 		direct packet access.
2257  *	Return
2258  * 		0 on success, or a negative error in case of failure.
2259  *
2260  * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
2261  *	Description
2262  *		Apply an IPv6 Segment Routing action of type *action* to the
2263  *		packet associated to *skb*. Each action takes a parameter
2264  *		contained at address *param*, and of length *param_len* bytes.
2265  *		*action* can be one of:
2266  *
2267  *		**SEG6_LOCAL_ACTION_END_X**
2268  *			End.X action: Endpoint with Layer-3 cross-connect.
2269  *			Type of *param*: **struct in6_addr**.
2270  *		**SEG6_LOCAL_ACTION_END_T**
2271  *			End.T action: Endpoint with specific IPv6 table lookup.
2272  *			Type of *param*: **int**.
2273  *		**SEG6_LOCAL_ACTION_END_B6**
2274  *			End.B6 action: Endpoint bound to an SRv6 policy.
2275  *			Type of *param*: **struct ipv6_sr_hdr**.
2276  *		**SEG6_LOCAL_ACTION_END_B6_ENCAP**
2277  *			End.B6.Encap action: Endpoint bound to an SRv6
2278  *			encapsulation policy.
2279  *			Type of *param*: **struct ipv6_sr_hdr**.
2280  *
2281  * 		A call to this helper is susceptible to change the underlying
2282  * 		packet buffer. Therefore, at load time, all checks on pointers
2283  * 		previously done by the verifier are invalidated and must be
2284  * 		performed again, if the helper is used in combination with
2285  * 		direct packet access.
2286  *	Return
2287  * 		0 on success, or a negative error in case of failure.
2288  *
2289  * int bpf_rc_repeat(void *ctx)
2290  *	Description
2291  *		This helper is used in programs implementing IR decoding, to
2292  *		report a successfully decoded repeat key message. This delays
2293  *		the generation of a key up event for previously generated
2294  *		key down event.
2295  *
2296  *		Some IR protocols like NEC have a special IR message for
2297  *		repeating last button, for when a button is held down.
2298  *
2299  *		The *ctx* should point to the lirc sample as passed into
2300  *		the program.
2301  *
2302  *		This helper is only available is the kernel was compiled with
2303  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2304  *		"**y**".
2305  *	Return
2306  *		0
2307  *
2308  * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2309  *	Description
2310  *		This helper is used in programs implementing IR decoding, to
2311  *		report a successfully decoded key press with *scancode*,
2312  *		*toggle* value in the given *protocol*. The scancode will be
2313  *		translated to a keycode using the rc keymap, and reported as
2314  *		an input key down event. After a period a key up event is
2315  *		generated. This period can be extended by calling either
2316  *		**bpf_rc_keydown**\ () again with the same values, or calling
2317  *		**bpf_rc_repeat**\ ().
2318  *
2319  *		Some protocols include a toggle bit, in case the button was
2320  *		released and pressed again between consecutive scancodes.
2321  *
2322  *		The *ctx* should point to the lirc sample as passed into
2323  *		the program.
2324  *
2325  *		The *protocol* is the decoded protocol number (see
2326  *		**enum rc_proto** for some predefined values).
2327  *
2328  *		This helper is only available is the kernel was compiled with
2329  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2330  *		"**y**".
2331  *	Return
2332  *		0
2333  *
2334  * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
2335  * 	Description
2336  * 		Return the cgroup v2 id of the socket associated with the *skb*.
2337  * 		This is roughly similar to the **bpf_get_cgroup_classid**\ ()
2338  * 		helper for cgroup v1 by providing a tag resp. identifier that
2339  * 		can be matched on or used for map lookups e.g. to implement
2340  * 		policy. The cgroup v2 id of a given path in the hierarchy is
2341  * 		exposed in user space through the f_handle API in order to get
2342  * 		to the same 64-bit id.
2343  *
2344  * 		This helper can be used on TC egress path, but not on ingress,
2345  * 		and is available only if the kernel was compiled with the
2346  * 		**CONFIG_SOCK_CGROUP_DATA** configuration option.
2347  * 	Return
2348  * 		The id is returned or 0 in case the id could not be retrieved.
2349  *
2350  * u64 bpf_get_current_cgroup_id(void)
2351  * 	Return
2352  * 		A 64-bit integer containing the current cgroup id based
2353  * 		on the cgroup within which the current task is running.
2354  *
2355  * void *bpf_get_local_storage(void *map, u64 flags)
2356  *	Description
2357  *		Get the pointer to the local storage area.
2358  *		The type and the size of the local storage is defined
2359  *		by the *map* argument.
2360  *		The *flags* meaning is specific for each map type,
2361  *		and has to be 0 for cgroup local storage.
2362  *
2363  *		Depending on the BPF program type, a local storage area
2364  *		can be shared between multiple instances of the BPF program,
2365  *		running simultaneously.
2366  *
2367  *		A user should care about the synchronization by himself.
2368  *		For example, by using the **BPF_STX_XADD** instruction to alter
2369  *		the shared data.
2370  *	Return
2371  *		A pointer to the local storage area.
2372  *
2373  * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2374  *	Description
2375  *		Select a **SO_REUSEPORT** socket from a
2376  *		**BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
2377  *		It checks the selected socket is matching the incoming
2378  *		request in the socket buffer.
2379  *	Return
2380  *		0 on success, or a negative error in case of failure.
2381  *
2382  * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2383  *	Description
2384  *		Return id of cgroup v2 that is ancestor of cgroup associated
2385  *		with the *skb* at the *ancestor_level*.  The root cgroup is at
2386  *		*ancestor_level* zero and each step down the hierarchy
2387  *		increments the level. If *ancestor_level* == level of cgroup
2388  *		associated with *skb*, then return value will be same as that
2389  *		of **bpf_skb_cgroup_id**\ ().
2390  *
2391  *		The helper is useful to implement policies based on cgroups
2392  *		that are upper in hierarchy than immediate cgroup associated
2393  *		with *skb*.
2394  *
2395  *		The format of returned id and helper limitations are same as in
2396  *		**bpf_skb_cgroup_id**\ ().
2397  *	Return
2398  *		The id is returned or 0 in case the id could not be retrieved.
2399  *
2400  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2401  *	Description
2402  *		Look for TCP socket matching *tuple*, optionally in a child
2403  *		network namespace *netns*. The return value must be checked,
2404  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
2405  *
2406  *		The *ctx* should point to the context of the program, such as
2407  *		the skb or socket (depending on the hook in use). This is used
2408  *		to determine the base network namespace for the lookup.
2409  *
2410  *		*tuple_size* must be one of:
2411  *
2412  *		**sizeof**\ (*tuple*\ **->ipv4**)
2413  *			Look for an IPv4 socket.
2414  *		**sizeof**\ (*tuple*\ **->ipv6**)
2415  *			Look for an IPv6 socket.
2416  *
2417  *		If the *netns* is a negative signed 32-bit integer, then the
2418  *		socket lookup table in the netns associated with the *ctx* will
2419  *		will be used. For the TC hooks, this is the netns of the device
2420  *		in the skb. For socket hooks, this is the netns of the socket.
2421  *		If *netns* is any other signed 32-bit value greater than or
2422  *		equal to zero then it specifies the ID of the netns relative to
2423  *		the netns associated with the *ctx*. *netns* values beyond the
2424  *		range of 32-bit integers are reserved for future use.
2425  *
2426  *		All values for *flags* are reserved for future usage, and must
2427  *		be left at zero.
2428  *
2429  *		This helper is available only if the kernel was compiled with
2430  *		**CONFIG_NET** configuration option.
2431  *	Return
2432  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2433  *		For sockets with reuseport option, the **struct bpf_sock**
2434  *		result is from *reuse*\ **->socks**\ [] using the hash of the
2435  *		tuple.
2436  *
2437  * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2438  *	Description
2439  *		Look for UDP socket matching *tuple*, optionally in a child
2440  *		network namespace *netns*. The return value must be checked,
2441  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
2442  *
2443  *		The *ctx* should point to the context of the program, such as
2444  *		the skb or socket (depending on the hook in use). This is used
2445  *		to determine the base network namespace for the lookup.
2446  *
2447  *		*tuple_size* must be one of:
2448  *
2449  *		**sizeof**\ (*tuple*\ **->ipv4**)
2450  *			Look for an IPv4 socket.
2451  *		**sizeof**\ (*tuple*\ **->ipv6**)
2452  *			Look for an IPv6 socket.
2453  *
2454  *		If the *netns* is a negative signed 32-bit integer, then the
2455  *		socket lookup table in the netns associated with the *ctx* will
2456  *		will be used. For the TC hooks, this is the netns of the device
2457  *		in the skb. For socket hooks, this is the netns of the socket.
2458  *		If *netns* is any other signed 32-bit value greater than or
2459  *		equal to zero then it specifies the ID of the netns relative to
2460  *		the netns associated with the *ctx*. *netns* values beyond the
2461  *		range of 32-bit integers are reserved for future use.
2462  *
2463  *		All values for *flags* are reserved for future usage, and must
2464  *		be left at zero.
2465  *
2466  *		This helper is available only if the kernel was compiled with
2467  *		**CONFIG_NET** configuration option.
2468  *	Return
2469  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2470  *		For sockets with reuseport option, the **struct bpf_sock**
2471  *		result is from *reuse*\ **->socks**\ [] using the hash of the
2472  *		tuple.
2473  *
2474  * int bpf_sk_release(struct bpf_sock *sock)
2475  *	Description
2476  *		Release the reference held by *sock*. *sock* must be a
2477  *		non-**NULL** pointer that was returned from
2478  *		**bpf_sk_lookup_xxx**\ ().
2479  *	Return
2480  *		0 on success, or a negative error in case of failure.
2481  *
2482  * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
2483  * 	Description
2484  * 		Push an element *value* in *map*. *flags* is one of:
2485  *
2486  * 		**BPF_EXIST**
2487  * 			If the queue/stack is full, the oldest element is
2488  * 			removed to make room for this.
2489  * 	Return
2490  * 		0 on success, or a negative error in case of failure.
2491  *
2492  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
2493  * 	Description
2494  * 		Pop an element from *map*.
2495  * 	Return
2496  * 		0 on success, or a negative error in case of failure.
2497  *
2498  * int bpf_map_peek_elem(struct bpf_map *map, void *value)
2499  * 	Description
2500  * 		Get an element from *map* without removing it.
2501  * 	Return
2502  * 		0 on success, or a negative error in case of failure.
2503  *
2504  * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2505  *	Description
2506  *		For socket policies, insert *len* bytes into *msg* at offset
2507  *		*start*.
2508  *
2509  *		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2510  *		*msg* it may want to insert metadata or options into the *msg*.
2511  *		This can later be read and used by any of the lower layer BPF
2512  *		hooks.
2513  *
2514  *		This helper may fail if under memory pressure (a malloc
2515  *		fails) in these cases BPF programs will get an appropriate
2516  *		error and BPF programs will need to handle them.
2517  *	Return
2518  *		0 on success, or a negative error in case of failure.
2519  *
2520  * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2521  *	Description
2522  *		Will remove *len* bytes from a *msg* starting at byte *start*.
2523  *		This may result in **ENOMEM** errors under certain situations if
2524  *		an allocation and copy are required due to a full ring buffer.
2525  *		However, the helper will try to avoid doing the allocation
2526  *		if possible. Other errors can occur if input parameters are
2527  *		invalid either due to *start* byte not being valid part of *msg*
2528  *		payload and/or *pop* value being to large.
2529  *	Return
2530  *		0 on success, or a negative error in case of failure.
2531  *
2532  * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
2533  *	Description
2534  *		This helper is used in programs implementing IR decoding, to
2535  *		report a successfully decoded pointer movement.
2536  *
2537  *		The *ctx* should point to the lirc sample as passed into
2538  *		the program.
2539  *
2540  *		This helper is only available is the kernel was compiled with
2541  *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2542  *		"**y**".
2543  *	Return
2544  *		0
2545  *
2546  * int bpf_spin_lock(struct bpf_spin_lock *lock)
2547  *	Description
2548  *		Acquire a spinlock represented by the pointer *lock*, which is
2549  *		stored as part of a value of a map. Taking the lock allows to
2550  *		safely update the rest of the fields in that value. The
2551  *		spinlock can (and must) later be released with a call to
2552  *		**bpf_spin_unlock**\ (\ *lock*\ ).
2553  *
2554  *		Spinlocks in BPF programs come with a number of restrictions
2555  *		and constraints:
2556  *
2557  *		* **bpf_spin_lock** objects are only allowed inside maps of
2558  *		  types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
2559  *		  list could be extended in the future).
2560  *		* BTF description of the map is mandatory.
2561  *		* The BPF program can take ONE lock at a time, since taking two
2562  *		  or more could cause dead locks.
2563  *		* Only one **struct bpf_spin_lock** is allowed per map element.
2564  *		* When the lock is taken, calls (either BPF to BPF or helpers)
2565  *		  are not allowed.
2566  *		* The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
2567  *		  allowed inside a spinlock-ed region.
2568  *		* The BPF program MUST call **bpf_spin_unlock**\ () to release
2569  *		  the lock, on all execution paths, before it returns.
2570  *		* The BPF program can access **struct bpf_spin_lock** only via
2571  *		  the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
2572  *		  helpers. Loading or storing data into the **struct
2573  *		  bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
2574  *		* To use the **bpf_spin_lock**\ () helper, the BTF description
2575  *		  of the map value must be a struct and have **struct
2576  *		  bpf_spin_lock** *anyname*\ **;** field at the top level.
2577  *		  Nested lock inside another struct is not allowed.
2578  *		* The **struct bpf_spin_lock** *lock* field in a map value must
2579  *		  be aligned on a multiple of 4 bytes in that value.
2580  *		* Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
2581  *		  the **bpf_spin_lock** field to user space.
2582  *		* Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
2583  *		  a BPF program, do not update the **bpf_spin_lock** field.
2584  *		* **bpf_spin_lock** cannot be on the stack or inside a
2585  *		  networking packet (it can only be inside of a map values).
2586  *		* **bpf_spin_lock** is available to root only.
2587  *		* Tracing programs and socket filter programs cannot use
2588  *		  **bpf_spin_lock**\ () due to insufficient preemption checks
2589  *		  (but this may change in the future).
2590  *		* **bpf_spin_lock** is not allowed in inner maps of map-in-map.
2591  *	Return
2592  *		0
2593  *
2594  * int bpf_spin_unlock(struct bpf_spin_lock *lock)
2595  *	Description
2596  *		Release the *lock* previously locked by a call to
2597  *		**bpf_spin_lock**\ (\ *lock*\ ).
2598  *	Return
2599  *		0
2600  *
2601  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
2602  *	Description
2603  *		This helper gets a **struct bpf_sock** pointer such
2604  *		that all the fields in this **bpf_sock** can be accessed.
2605  *	Return
2606  *		A **struct bpf_sock** pointer on success, or **NULL** in
2607  *		case of failure.
2608  *
2609  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
2610  *	Description
2611  *		This helper gets a **struct bpf_tcp_sock** pointer from a
2612  *		**struct bpf_sock** pointer.
2613  *	Return
2614  *		A **struct bpf_tcp_sock** pointer on success, or **NULL** in
2615  *		case of failure.
2616  *
2617  * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
2618  *	Description
2619  *		Set ECN (Explicit Congestion Notification) field of IP header
2620  *		to **CE** (Congestion Encountered) if current value is **ECT**
2621  *		(ECN Capable Transport). Otherwise, do nothing. Works with IPv6
2622  *		and IPv4.
2623  *	Return
2624  *		1 if the **CE** flag is set (either by the current helper call
2625  *		or because it was already present), 0 if it is not set.
2626  *
2627  * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
2628  *	Description
2629  *		Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
2630  *		**bpf_sk_release**\ () is unnecessary and not allowed.
2631  *	Return
2632  *		A **struct bpf_sock** pointer on success, or **NULL** in
2633  *		case of failure.
2634  *
2635  * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2636  *	Description
2637  *		Look for TCP socket matching *tuple*, optionally in a child
2638  *		network namespace *netns*. The return value must be checked,
2639  *		and if non-**NULL**, released via **bpf_sk_release**\ ().
2640  *
2641  *		This function is identical to **bpf_sk_lookup_tcp**\ (), except
2642  *		that it also returns timewait or request sockets. Use
2643  *		**bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
2644  *		full structure.
2645  *
2646  *		This helper is available only if the kernel was compiled with
2647  *		**CONFIG_NET** configuration option.
2648  *	Return
2649  *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2650  *		For sockets with reuseport option, the **struct bpf_sock**
2651  *		result is from *reuse*\ **->socks**\ [] using the hash of the
2652  *		tuple.
2653  *
2654  * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2655  * 	Description
2656  * 		Check whether *iph* and *th* contain a valid SYN cookie ACK for
2657  * 		the listening socket in *sk*.
2658  *
2659  * 		*iph* points to the start of the IPv4 or IPv6 header, while
2660  * 		*iph_len* contains **sizeof**\ (**struct iphdr**) or
2661  * 		**sizeof**\ (**struct ip6hdr**).
2662  *
2663  * 		*th* points to the start of the TCP header, while *th_len*
2664  * 		contains **sizeof**\ (**struct tcphdr**).
2665  * 	Return
2666  * 		0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
2667  * 		error otherwise.
2668  *
2669  * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
2670  *	Description
2671  *		Get name of sysctl in /proc/sys/ and copy it into provided by
2672  *		program buffer *buf* of size *buf_len*.
2673  *
2674  *		The buffer is always NUL terminated, unless it's zero-sized.
2675  *
2676  *		If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
2677  *		copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
2678  *		only (e.g. "tcp_mem").
2679  *	Return
2680  *		Number of character copied (not including the trailing NUL).
2681  *
2682  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
2683  *		truncated name in this case).
2684  *
2685  * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2686  *	Description
2687  *		Get current value of sysctl as it is presented in /proc/sys
2688  *		(incl. newline, etc), and copy it as a string into provided
2689  *		by program buffer *buf* of size *buf_len*.
2690  *
2691  *		The whole value is copied, no matter what file position user
2692  *		space issued e.g. sys_read at.
2693  *
2694  *		The buffer is always NUL terminated, unless it's zero-sized.
2695  *	Return
2696  *		Number of character copied (not including the trailing NUL).
2697  *
2698  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
2699  *		truncated name in this case).
2700  *
2701  *		**-EINVAL** if current value was unavailable, e.g. because
2702  *		sysctl is uninitialized and read returns -EIO for it.
2703  *
2704  * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2705  *	Description
2706  *		Get new value being written by user space to sysctl (before
2707  *		the actual write happens) and copy it as a string into
2708  *		provided by program buffer *buf* of size *buf_len*.
2709  *
2710  *		User space may write new value at file position > 0.
2711  *
2712  *		The buffer is always NUL terminated, unless it's zero-sized.
2713  *	Return
2714  *		Number of character copied (not including the trailing NUL).
2715  *
2716  *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
2717  *		truncated name in this case).
2718  *
2719  *		**-EINVAL** if sysctl is being read.
2720  *
2721  * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
2722  *	Description
2723  *		Override new value being written by user space to sysctl with
2724  *		value provided by program in buffer *buf* of size *buf_len*.
2725  *
2726  *		*buf* should contain a string in same form as provided by user
2727  *		space on sysctl write.
2728  *
2729  *		User space may write new value at file position > 0. To override
2730  *		the whole sysctl value file position should be set to zero.
2731  *	Return
2732  *		0 on success.
2733  *
2734  *		**-E2BIG** if the *buf_len* is too big.
2735  *
2736  *		**-EINVAL** if sysctl is being read.
2737  *
2738  * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
2739  *	Description
2740  *		Convert the initial part of the string from buffer *buf* of
2741  *		size *buf_len* to a long integer according to the given base
2742  *		and save the result in *res*.
2743  *
2744  *		The string may begin with an arbitrary amount of white space
2745  *		(as determined by **isspace**\ (3)) followed by a single
2746  *		optional '**-**' sign.
2747  *
2748  *		Five least significant bits of *flags* encode base, other bits
2749  *		are currently unused.
2750  *
2751  *		Base must be either 8, 10, 16 or 0 to detect it automatically
2752  *		similar to user space **strtol**\ (3).
2753  *	Return
2754  *		Number of characters consumed on success. Must be positive but
2755  *		no more than *buf_len*.
2756  *
2757  *		**-EINVAL** if no valid digits were found or unsupported base
2758  *		was provided.
2759  *
2760  *		**-ERANGE** if resulting value was out of range.
2761  *
2762  * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
2763  *	Description
2764  *		Convert the initial part of the string from buffer *buf* of
2765  *		size *buf_len* to an unsigned long integer according to the
2766  *		given base and save the result in *res*.
2767  *
2768  *		The string may begin with an arbitrary amount of white space
2769  *		(as determined by **isspace**\ (3)).
2770  *
2771  *		Five least significant bits of *flags* encode base, other bits
2772  *		are currently unused.
2773  *
2774  *		Base must be either 8, 10, 16 or 0 to detect it automatically
2775  *		similar to user space **strtoul**\ (3).
2776  *	Return
2777  *		Number of characters consumed on success. Must be positive but
2778  *		no more than *buf_len*.
2779  *
2780  *		**-EINVAL** if no valid digits were found or unsupported base
2781  *		was provided.
2782  *
2783  *		**-ERANGE** if resulting value was out of range.
2784  *
2785  * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
2786  *	Description
2787  *		Get a bpf-local-storage from a *sk*.
2788  *
2789  *		Logically, it could be thought of getting the value from
2790  *		a *map* with *sk* as the **key**.  From this
2791  *		perspective,  the usage is not much different from
2792  *		**bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
2793  *		helper enforces the key must be a full socket and the map must
2794  *		be a **BPF_MAP_TYPE_SK_STORAGE** also.
2795  *
2796  *		Underneath, the value is stored locally at *sk* instead of
2797  *		the *map*.  The *map* is used as the bpf-local-storage
2798  *		"type". The bpf-local-storage "type" (i.e. the *map*) is
2799  *		searched against all bpf-local-storages residing at *sk*.
2800  *
2801  *		An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
2802  *		used such that a new bpf-local-storage will be
2803  *		created if one does not exist.  *value* can be used
2804  *		together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
2805  *		the initial value of a bpf-local-storage.  If *value* is
2806  *		**NULL**, the new bpf-local-storage will be zero initialized.
2807  *	Return
2808  *		A bpf-local-storage pointer is returned on success.
2809  *
2810  *		**NULL** if not found or there was an error in adding
2811  *		a new bpf-local-storage.
2812  *
2813  * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
2814  *	Description
2815  *		Delete a bpf-local-storage from a *sk*.
2816  *	Return
2817  *		0 on success.
2818  *
2819  *		**-ENOENT** if the bpf-local-storage cannot be found.
2820  *
2821  * int bpf_send_signal(u32 sig)
2822  *	Description
2823  *		Send signal *sig* to the process of the current task.
2824  *		The signal may be delivered to any of this process's threads.
2825  *	Return
2826  *		0 on success or successfully queued.
2827  *
2828  *		**-EBUSY** if work queue under nmi is full.
2829  *
2830  *		**-EINVAL** if *sig* is invalid.
2831  *
2832  *		**-EPERM** if no permission to send the *sig*.
2833  *
2834  *		**-EAGAIN** if bpf program can try again.
2835  *
2836  * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2837  *	Description
2838  *		Try to issue a SYN cookie for the packet with corresponding
2839  *		IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
2840  *
2841  *		*iph* points to the start of the IPv4 or IPv6 header, while
2842  *		*iph_len* contains **sizeof**\ (**struct iphdr**) or
2843  *		**sizeof**\ (**struct ip6hdr**).
2844  *
2845  *		*th* points to the start of the TCP header, while *th_len*
2846  *		contains the length of the TCP header.
2847  *	Return
2848  *		On success, lower 32 bits hold the generated SYN cookie in
2849  *		followed by 16 bits which hold the MSS value for that cookie,
2850  *		and the top 16 bits are unused.
2851  *
2852  *		On failure, the returned value is one of the following:
2853  *
2854  *		**-EINVAL** SYN cookie cannot be issued due to error
2855  *
2856  *		**-ENOENT** SYN cookie should not be issued (no SYN flood)
2857  *
2858  *		**-EOPNOTSUPP** kernel configuration does not enable SYN cookies
2859  *
2860  *		**-EPROTONOSUPPORT** IP packet version is not 4 or 6
2861  *
2862  * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2863  * 	Description
2864  * 		Write raw *data* blob into a special BPF perf event held by
2865  * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2866  * 		event must have the following attributes: **PERF_SAMPLE_RAW**
2867  * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2868  * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2869  *
2870  * 		The *flags* are used to indicate the index in *map* for which
2871  * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
2872  * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2873  * 		to indicate that the index of the current CPU core should be
2874  * 		used.
2875  *
2876  * 		The value to write, of *size*, is passed through eBPF stack and
2877  * 		pointed by *data*.
2878  *
2879  * 		*ctx* is a pointer to in-kernel struct sk_buff.
2880  *
2881  * 		This helper is similar to **bpf_perf_event_output**\ () but
2882  * 		restricted to raw_tracepoint bpf programs.
2883  * 	Return
2884  * 		0 on success, or a negative error in case of failure.
2885  *
2886  * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
2887  * 	Description
2888  * 		Safely attempt to read *size* bytes from user space address
2889  * 		*unsafe_ptr* and store the data in *dst*.
2890  * 	Return
2891  * 		0 on success, or a negative error in case of failure.
2892  *
2893  * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
2894  * 	Description
2895  * 		Safely attempt to read *size* bytes from kernel space address
2896  * 		*unsafe_ptr* and store the data in *dst*.
2897  * 	Return
2898  * 		0 on success, or a negative error in case of failure.
2899  *
2900  * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
2901  * 	Description
2902  * 		Copy a NUL terminated string from an unsafe user address
2903  * 		*unsafe_ptr* to *dst*. The *size* should include the
2904  * 		terminating NUL byte. In case the string length is smaller than
2905  * 		*size*, the target is not padded with further NUL bytes. If the
2906  * 		string length is larger than *size*, just *size*-1 bytes are
2907  * 		copied and the last byte is set to NUL.
2908  *
2909  * 		On success, the length of the copied string is returned. This
2910  * 		makes this helper useful in tracing programs for reading
2911  * 		strings, and more importantly to get its length at runtime. See
2912  * 		the following snippet:
2913  *
2914  * 		::
2915  *
2916  * 			SEC("kprobe/sys_open")
2917  * 			void bpf_sys_open(struct pt_regs *ctx)
2918  * 			{
2919  * 			        char buf[PATHLEN]; // PATHLEN is defined to 256
2920  * 			        int res = bpf_probe_read_user_str(buf, sizeof(buf),
2921  * 				                                  ctx->di);
2922  *
2923  * 				// Consume buf, for example push it to
2924  * 				// userspace via bpf_perf_event_output(); we
2925  * 				// can use res (the string length) as event
2926  * 				// size, after checking its boundaries.
2927  * 			}
2928  *
2929  * 		In comparison, using **bpf_probe_read_user**\ () helper here
2930  * 		instead to read the string would require to estimate the length
2931  * 		at compile time, and would often result in copying more memory
2932  * 		than necessary.
2933  *
2934  * 		Another useful use case is when parsing individual process
2935  * 		arguments or individual environment variables navigating
2936  * 		*current*\ **->mm->arg_start** and *current*\
2937  * 		**->mm->env_start**: using this helper and the return value,
2938  * 		one can quickly iterate at the right offset of the memory area.
2939  * 	Return
2940  * 		On success, the strictly positive length of the string,
2941  * 		including the trailing NUL character. On error, a negative
2942  * 		value.
2943  *
2944  * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
2945  * 	Description
2946  * 		Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
2947  * 		to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
2948  * 	Return
2949  * 		On success, the strictly positive length of the string, including
2950  * 		the trailing NUL character. On error, a negative value.
2951  *
2952  * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
2953  *	Description
2954  *		Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
2955  *		*rcv_nxt* is the ack_seq to be sent out.
2956  *	Return
2957  *		0 on success, or a negative error in case of failure.
2958  *
2959  * int bpf_send_signal_thread(u32 sig)
2960  *	Description
2961  *		Send signal *sig* to the thread corresponding to the current task.
2962  *	Return
2963  *		0 on success or successfully queued.
2964  *
2965  *		**-EBUSY** if work queue under nmi is full.
2966  *
2967  *		**-EINVAL** if *sig* is invalid.
2968  *
2969  *		**-EPERM** if no permission to send the *sig*.
2970  *
2971  *		**-EAGAIN** if bpf program can try again.
2972  *
2973  * u64 bpf_jiffies64(void)
2974  *	Description
2975  *		Obtain the 64bit jiffies
2976  *	Return
2977  *		The 64 bit jiffies
2978  *
2979  * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
2980  *	Description
2981  *		For an eBPF program attached to a perf event, retrieve the
2982  *		branch records (**struct perf_branch_entry**) associated to *ctx*
2983  *		and store it in the buffer pointed by *buf* up to size
2984  *		*size* bytes.
2985  *	Return
2986  *		On success, number of bytes written to *buf*. On error, a
2987  *		negative value.
2988  *
2989  *		The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
2990  *		instead return the number of bytes required to store all the
2991  *		branch entries. If this flag is set, *buf* may be NULL.
2992  *
2993  *		**-EINVAL** if arguments invalid or **size** not a multiple
2994  *		of **sizeof**\ (**struct perf_branch_entry**\ ).
2995  *
2996  *		**-ENOENT** if architecture does not support branch records.
2997  *
2998  * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
2999  *	Description
3000  *		Returns 0 on success, values for *pid* and *tgid* as seen from the current
3001  *		*namespace* will be returned in *nsdata*.
3002  *	Return
3003  *		0 on success, or one of the following in case of failure:
3004  *
3005  *		**-EINVAL** if dev and inum supplied don't match dev_t and inode number
3006  *              with nsfs of current task, or if dev conversion to dev_t lost high bits.
3007  *
3008  *		**-ENOENT** if pidns does not exists for the current task.
3009  *
3010  * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
3011  *	Description
3012  *		Write raw *data* blob into a special BPF perf event held by
3013  *		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
3014  *		event must have the following attributes: **PERF_SAMPLE_RAW**
3015  *		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
3016  *		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
3017  *
3018  *		The *flags* are used to indicate the index in *map* for which
3019  *		the value must be put, masked with **BPF_F_INDEX_MASK**.
3020  *		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
3021  *		to indicate that the index of the current CPU core should be
3022  *		used.
3023  *
3024  *		The value to write, of *size*, is passed through eBPF stack and
3025  *		pointed by *data*.
3026  *
3027  *		*ctx* is a pointer to in-kernel struct xdp_buff.
3028  *
3029  *		This helper is similar to **bpf_perf_eventoutput**\ () but
3030  *		restricted to raw_tracepoint bpf programs.
3031  *	Return
3032  *		0 on success, or a negative error in case of failure.
3033  *
3034  * u64 bpf_get_netns_cookie(void *ctx)
3035  * 	Description
3036  * 		Retrieve the cookie (generated by the kernel) of the network
3037  * 		namespace the input *ctx* is associated with. The network
3038  * 		namespace cookie remains stable for its lifetime and provides
3039  * 		a global identifier that can be assumed unique. If *ctx* is
3040  * 		NULL, then the helper returns the cookie for the initial
3041  * 		network namespace. The cookie itself is very similar to that
3042  * 		of **bpf_get_socket_cookie**\ () helper, but for network
3043  * 		namespaces instead of sockets.
3044  * 	Return
3045  * 		A 8-byte long opaque number.
3046  *
3047  * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
3048  * 	Description
3049  * 		Return id of cgroup v2 that is ancestor of the cgroup associated
3050  * 		with the current task at the *ancestor_level*. The root cgroup
3051  * 		is at *ancestor_level* zero and each step down the hierarchy
3052  * 		increments the level. If *ancestor_level* == level of cgroup
3053  * 		associated with the current task, then return value will be the
3054  * 		same as that of **bpf_get_current_cgroup_id**\ ().
3055  *
3056  * 		The helper is useful to implement policies based on cgroups
3057  * 		that are upper in hierarchy than immediate cgroup associated
3058  * 		with the current task.
3059  *
3060  * 		The format of returned id and helper limitations are same as in
3061  * 		**bpf_get_current_cgroup_id**\ ().
3062  * 	Return
3063  * 		The id is returned or 0 in case the id could not be retrieved.
3064  *
3065  * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
3066  *	Description
3067  *		Assign the *sk* to the *skb*. When combined with appropriate
3068  *		routing configuration to receive the packet towards the socket,
3069  *		will cause *skb* to be delivered to the specified socket.
3070  *		Subsequent redirection of *skb* via  **bpf_redirect**\ (),
3071  *		**bpf_clone_redirect**\ () or other methods outside of BPF may
3072  *		interfere with successful delivery to the socket.
3073  *
3074  *		This operation is only valid from TC ingress path.
3075  *
3076  *		The *flags* argument must be zero.
3077  *	Return
3078  *		0 on success, or a negative error in case of failure:
3079  *
3080  *		**-EINVAL** if specified *flags* are not supported.
3081  *
3082  *		**-ENOENT** if the socket is unavailable for assignment.
3083  *
3084  *		**-ENETUNREACH** if the socket is unreachable (wrong netns).
3085  *
3086  *		**-EOPNOTSUPP** if the operation is not supported, for example
3087  *		a call from outside of TC ingress.
3088  *
3089  *		**-ESOCKTNOSUPPORT** if the socket type is not supported
3090  *		(reuseport).
3091  *
3092  * u64 bpf_ktime_get_boot_ns(void)
3093  * 	Description
3094  * 		Return the time elapsed since system boot, in nanoseconds.
3095  * 		Does include the time the system was suspended.
3096  * 		See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
3097  * 	Return
3098  * 		Current *ktime*.
3099  *
3100  * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
3101  * 	Description
3102  * 		**bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
3103  * 		out the format string.
3104  * 		The *m* represents the seq_file. The *fmt* and *fmt_size* are for
3105  * 		the format string itself. The *data* and *data_len* are format string
3106  * 		arguments. The *data* are a **u64** array and corresponding format string
3107  * 		values are stored in the array. For strings and pointers where pointees
3108  * 		are accessed, only the pointer values are stored in the *data* array.
3109  * 		The *data_len* is the size of *data* in bytes.
3110  *
3111  *		Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
3112  *		Reading kernel memory may fail due to either invalid address or
3113  *		valid address but requiring a major memory fault. If reading kernel memory
3114  *		fails, the string for **%s** will be an empty string, and the ip
3115  *		address for **%p{i,I}{4,6}** will be 0. Not returning error to
3116  *		bpf program is consistent with what **bpf_trace_printk**\ () does for now.
3117  * 	Return
3118  * 		0 on success, or a negative error in case of failure:
3119  *
3120  *		**-EBUSY** if per-CPU memory copy buffer is busy, can try again
3121  *		by returning 1 from bpf program.
3122  *
3123  *		**-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
3124  *
3125  *		**-E2BIG** if *fmt* contains too many format specifiers.
3126  *
3127  *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
3128  *
3129  * int bpf_seq_write(struct seq_file *m, const void *data, u32 len)
3130  * 	Description
3131  * 		**bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
3132  * 		The *m* represents the seq_file. The *data* and *len* represent the
3133  * 		data to write in bytes.
3134  * 	Return
3135  * 		0 on success, or a negative error in case of failure:
3136  *
3137  *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
3138  *
3139  * u64 bpf_sk_cgroup_id(struct bpf_sock *sk)
3140  *	Description
3141  *		Return the cgroup v2 id of the socket *sk*.
3142  *
3143  *		*sk* must be a non-**NULL** pointer to a full socket, e.g. one
3144  *		returned from **bpf_sk_lookup_xxx**\ (),
3145  *		**bpf_sk_fullsock**\ (), etc. The format of returned id is
3146  *		same as in **bpf_skb_cgroup_id**\ ().
3147  *
3148  *		This helper is available only if the kernel was compiled with
3149  *		the **CONFIG_SOCK_CGROUP_DATA** configuration option.
3150  *	Return
3151  *		The id is returned or 0 in case the id could not be retrieved.
3152  *
3153  * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level)
3154  *	Description
3155  *		Return id of cgroup v2 that is ancestor of cgroup associated
3156  *		with the *sk* at the *ancestor_level*.  The root cgroup is at
3157  *		*ancestor_level* zero and each step down the hierarchy
3158  *		increments the level. If *ancestor_level* == level of cgroup
3159  *		associated with *sk*, then return value will be same as that
3160  *		of **bpf_sk_cgroup_id**\ ().
3161  *
3162  *		The helper is useful to implement policies based on cgroups
3163  *		that are upper in hierarchy than immediate cgroup associated
3164  *		with *sk*.
3165  *
3166  *		The format of returned id and helper limitations are same as in
3167  *		**bpf_sk_cgroup_id**\ ().
3168  *	Return
3169  *		The id is returned or 0 in case the id could not be retrieved.
3170  *
3171  * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
3172  * 	Description
3173  * 		Copy *size* bytes from *data* into a ring buffer *ringbuf*.
3174  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3175  * 		of new data availability is sent.
3176  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3177  * 		of new data availability is sent unconditionally.
3178  * 	Return
3179  * 		0 on success, or a negative error in case of failure.
3180  *
3181  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
3182  * 	Description
3183  * 		Reserve *size* bytes of payload in a ring buffer *ringbuf*.
3184  * 	Return
3185  * 		Valid pointer with *size* bytes of memory available; NULL,
3186  * 		otherwise.
3187  *
3188  * void bpf_ringbuf_submit(void *data, u64 flags)
3189  * 	Description
3190  * 		Submit reserved ring buffer sample, pointed to by *data*.
3191  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3192  * 		of new data availability is sent.
3193  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3194  * 		of new data availability is sent unconditionally.
3195  * 	Return
3196  * 		Nothing. Always succeeds.
3197  *
3198  * void bpf_ringbuf_discard(void *data, u64 flags)
3199  * 	Description
3200  * 		Discard reserved ring buffer sample, pointed to by *data*.
3201  * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3202  * 		of new data availability is sent.
3203  * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3204  * 		of new data availability is sent unconditionally.
3205  * 	Return
3206  * 		Nothing. Always succeeds.
3207  *
3208  * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
3209  *	Description
3210  *		Query various characteristics of provided ring buffer. What
3211  *		exactly is queries is determined by *flags*:
3212  *
3213  *		* **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
3214  *		* **BPF_RB_RING_SIZE**: The size of ring buffer.
3215  *		* **BPF_RB_CONS_POS**: Consumer position (can wrap around).
3216  *		* **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
3217  *
3218  *		Data returned is just a momentary snapshot of actual values
3219  *		and could be inaccurate, so this facility should be used to
3220  *		power heuristics and for reporting, not to make 100% correct
3221  *		calculation.
3222  *	Return
3223  *		Requested value, or 0, if *flags* are not recognized.
3224  *
3225  * int bpf_csum_level(struct sk_buff *skb, u64 level)
3226  * 	Description
3227  * 		Change the skbs checksum level by one layer up or down, or
3228  * 		reset it entirely to none in order to have the stack perform
3229  * 		checksum validation. The level is applicable to the following
3230  * 		protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
3231  * 		| ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
3232  * 		through **bpf_skb_adjust_room**\ () helper with passing in
3233  * 		**BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one	call
3234  * 		to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
3235  * 		the UDP header is removed. Similarly, an encap of the latter
3236  * 		into the former could be accompanied by a helper call to
3237  * 		**bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
3238  * 		skb is still intended to be processed in higher layers of the
3239  * 		stack instead of just egressing at tc.
3240  *
3241  * 		There are three supported level settings at this time:
3242  *
3243  * 		* **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
3244  * 		  with CHECKSUM_UNNECESSARY.
3245  * 		* **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
3246  * 		  with CHECKSUM_UNNECESSARY.
3247  * 		* **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
3248  * 		  sets CHECKSUM_NONE to force checksum validation by the stack.
3249  * 		* **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
3250  * 		  skb->csum_level.
3251  * 	Return
3252  * 		0 on success, or a negative error in case of failure. In the
3253  * 		case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
3254  * 		is returned or the error code -EACCES in case the skb is not
3255  * 		subject to CHECKSUM_UNNECESSARY.
3256  */
3257 #define __BPF_FUNC_MAPPER(FN)		\
3258 	FN(unspec),			\
3259 	FN(map_lookup_elem),		\
3260 	FN(map_update_elem),		\
3261 	FN(map_delete_elem),		\
3262 	FN(probe_read),			\
3263 	FN(ktime_get_ns),		\
3264 	FN(trace_printk),		\
3265 	FN(get_prandom_u32),		\
3266 	FN(get_smp_processor_id),	\
3267 	FN(skb_store_bytes),		\
3268 	FN(l3_csum_replace),		\
3269 	FN(l4_csum_replace),		\
3270 	FN(tail_call),			\
3271 	FN(clone_redirect),		\
3272 	FN(get_current_pid_tgid),	\
3273 	FN(get_current_uid_gid),	\
3274 	FN(get_current_comm),		\
3275 	FN(get_cgroup_classid),		\
3276 	FN(skb_vlan_push),		\
3277 	FN(skb_vlan_pop),		\
3278 	FN(skb_get_tunnel_key),		\
3279 	FN(skb_set_tunnel_key),		\
3280 	FN(perf_event_read),		\
3281 	FN(redirect),			\
3282 	FN(get_route_realm),		\
3283 	FN(perf_event_output),		\
3284 	FN(skb_load_bytes),		\
3285 	FN(get_stackid),		\
3286 	FN(csum_diff),			\
3287 	FN(skb_get_tunnel_opt),		\
3288 	FN(skb_set_tunnel_opt),		\
3289 	FN(skb_change_proto),		\
3290 	FN(skb_change_type),		\
3291 	FN(skb_under_cgroup),		\
3292 	FN(get_hash_recalc),		\
3293 	FN(get_current_task),		\
3294 	FN(probe_write_user),		\
3295 	FN(current_task_under_cgroup),	\
3296 	FN(skb_change_tail),		\
3297 	FN(skb_pull_data),		\
3298 	FN(csum_update),		\
3299 	FN(set_hash_invalid),		\
3300 	FN(get_numa_node_id),		\
3301 	FN(skb_change_head),		\
3302 	FN(xdp_adjust_head),		\
3303 	FN(probe_read_str),		\
3304 	FN(get_socket_cookie),		\
3305 	FN(get_socket_uid),		\
3306 	FN(set_hash),			\
3307 	FN(setsockopt),			\
3308 	FN(skb_adjust_room),		\
3309 	FN(redirect_map),		\
3310 	FN(sk_redirect_map),		\
3311 	FN(sock_map_update),		\
3312 	FN(xdp_adjust_meta),		\
3313 	FN(perf_event_read_value),	\
3314 	FN(perf_prog_read_value),	\
3315 	FN(getsockopt),			\
3316 	FN(override_return),		\
3317 	FN(sock_ops_cb_flags_set),	\
3318 	FN(msg_redirect_map),		\
3319 	FN(msg_apply_bytes),		\
3320 	FN(msg_cork_bytes),		\
3321 	FN(msg_pull_data),		\
3322 	FN(bind),			\
3323 	FN(xdp_adjust_tail),		\
3324 	FN(skb_get_xfrm_state),		\
3325 	FN(get_stack),			\
3326 	FN(skb_load_bytes_relative),	\
3327 	FN(fib_lookup),			\
3328 	FN(sock_hash_update),		\
3329 	FN(msg_redirect_hash),		\
3330 	FN(sk_redirect_hash),		\
3331 	FN(lwt_push_encap),		\
3332 	FN(lwt_seg6_store_bytes),	\
3333 	FN(lwt_seg6_adjust_srh),	\
3334 	FN(lwt_seg6_action),		\
3335 	FN(rc_repeat),			\
3336 	FN(rc_keydown),			\
3337 	FN(skb_cgroup_id),		\
3338 	FN(get_current_cgroup_id),	\
3339 	FN(get_local_storage),		\
3340 	FN(sk_select_reuseport),	\
3341 	FN(skb_ancestor_cgroup_id),	\
3342 	FN(sk_lookup_tcp),		\
3343 	FN(sk_lookup_udp),		\
3344 	FN(sk_release),			\
3345 	FN(map_push_elem),		\
3346 	FN(map_pop_elem),		\
3347 	FN(map_peek_elem),		\
3348 	FN(msg_push_data),		\
3349 	FN(msg_pop_data),		\
3350 	FN(rc_pointer_rel),		\
3351 	FN(spin_lock),			\
3352 	FN(spin_unlock),		\
3353 	FN(sk_fullsock),		\
3354 	FN(tcp_sock),			\
3355 	FN(skb_ecn_set_ce),		\
3356 	FN(get_listener_sock),		\
3357 	FN(skc_lookup_tcp),		\
3358 	FN(tcp_check_syncookie),	\
3359 	FN(sysctl_get_name),		\
3360 	FN(sysctl_get_current_value),	\
3361 	FN(sysctl_get_new_value),	\
3362 	FN(sysctl_set_new_value),	\
3363 	FN(strtol),			\
3364 	FN(strtoul),			\
3365 	FN(sk_storage_get),		\
3366 	FN(sk_storage_delete),		\
3367 	FN(send_signal),		\
3368 	FN(tcp_gen_syncookie),		\
3369 	FN(skb_output),			\
3370 	FN(probe_read_user),		\
3371 	FN(probe_read_kernel),		\
3372 	FN(probe_read_user_str),	\
3373 	FN(probe_read_kernel_str),	\
3374 	FN(tcp_send_ack),		\
3375 	FN(send_signal_thread),		\
3376 	FN(jiffies64),			\
3377 	FN(read_branch_records),	\
3378 	FN(get_ns_current_pid_tgid),	\
3379 	FN(xdp_output),			\
3380 	FN(get_netns_cookie),		\
3381 	FN(get_current_ancestor_cgroup_id),	\
3382 	FN(sk_assign),			\
3383 	FN(ktime_get_boot_ns),		\
3384 	FN(seq_printf),			\
3385 	FN(seq_write),			\
3386 	FN(sk_cgroup_id),		\
3387 	FN(sk_ancestor_cgroup_id),	\
3388 	FN(ringbuf_output),		\
3389 	FN(ringbuf_reserve),		\
3390 	FN(ringbuf_submit),		\
3391 	FN(ringbuf_discard),		\
3392 	FN(ringbuf_query),		\
3393 	FN(csum_level),
3394 
3395 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
3396  * function eBPF program intends to call
3397  */
3398 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
3399 enum bpf_func_id {
3400 	__BPF_FUNC_MAPPER(__BPF_ENUM_FN)
3401 	__BPF_FUNC_MAX_ID,
3402 };
3403 #undef __BPF_ENUM_FN
3404 
3405 /* All flags used by eBPF helper functions, placed here. */
3406 
3407 /* BPF_FUNC_skb_store_bytes flags. */
3408 enum {
3409 	BPF_F_RECOMPUTE_CSUM		= (1ULL << 0),
3410 	BPF_F_INVALIDATE_HASH		= (1ULL << 1),
3411 };
3412 
3413 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
3414  * First 4 bits are for passing the header field size.
3415  */
3416 enum {
3417 	BPF_F_HDR_FIELD_MASK		= 0xfULL,
3418 };
3419 
3420 /* BPF_FUNC_l4_csum_replace flags. */
3421 enum {
3422 	BPF_F_PSEUDO_HDR		= (1ULL << 4),
3423 	BPF_F_MARK_MANGLED_0		= (1ULL << 5),
3424 	BPF_F_MARK_ENFORCE		= (1ULL << 6),
3425 };
3426 
3427 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
3428 enum {
3429 	BPF_F_INGRESS			= (1ULL << 0),
3430 };
3431 
3432 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
3433 enum {
3434 	BPF_F_TUNINFO_IPV6		= (1ULL << 0),
3435 };
3436 
3437 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
3438 enum {
3439 	BPF_F_SKIP_FIELD_MASK		= 0xffULL,
3440 	BPF_F_USER_STACK		= (1ULL << 8),
3441 /* flags used by BPF_FUNC_get_stackid only. */
3442 	BPF_F_FAST_STACK_CMP		= (1ULL << 9),
3443 	BPF_F_REUSE_STACKID		= (1ULL << 10),
3444 /* flags used by BPF_FUNC_get_stack only. */
3445 	BPF_F_USER_BUILD_ID		= (1ULL << 11),
3446 };
3447 
3448 /* BPF_FUNC_skb_set_tunnel_key flags. */
3449 enum {
3450 	BPF_F_ZERO_CSUM_TX		= (1ULL << 1),
3451 	BPF_F_DONT_FRAGMENT		= (1ULL << 2),
3452 	BPF_F_SEQ_NUMBER		= (1ULL << 3),
3453 };
3454 
3455 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
3456  * BPF_FUNC_perf_event_read_value flags.
3457  */
3458 enum {
3459 	BPF_F_INDEX_MASK		= 0xffffffffULL,
3460 	BPF_F_CURRENT_CPU		= BPF_F_INDEX_MASK,
3461 /* BPF_FUNC_perf_event_output for sk_buff input context. */
3462 	BPF_F_CTXLEN_MASK		= (0xfffffULL << 32),
3463 };
3464 
3465 /* Current network namespace */
3466 enum {
3467 	BPF_F_CURRENT_NETNS		= (-1L),
3468 };
3469 
3470 /* BPF_FUNC_csum_level level values. */
3471 enum {
3472 	BPF_CSUM_LEVEL_QUERY,
3473 	BPF_CSUM_LEVEL_INC,
3474 	BPF_CSUM_LEVEL_DEC,
3475 	BPF_CSUM_LEVEL_RESET,
3476 };
3477 
3478 /* BPF_FUNC_skb_adjust_room flags. */
3479 enum {
3480 	BPF_F_ADJ_ROOM_FIXED_GSO	= (1ULL << 0),
3481 	BPF_F_ADJ_ROOM_ENCAP_L3_IPV4	= (1ULL << 1),
3482 	BPF_F_ADJ_ROOM_ENCAP_L3_IPV6	= (1ULL << 2),
3483 	BPF_F_ADJ_ROOM_ENCAP_L4_GRE	= (1ULL << 3),
3484 	BPF_F_ADJ_ROOM_ENCAP_L4_UDP	= (1ULL << 4),
3485 	BPF_F_ADJ_ROOM_NO_CSUM_RESET	= (1ULL << 5),
3486 };
3487 
3488 enum {
3489 	BPF_ADJ_ROOM_ENCAP_L2_MASK	= 0xff,
3490 	BPF_ADJ_ROOM_ENCAP_L2_SHIFT	= 56,
3491 };
3492 
3493 #define BPF_F_ADJ_ROOM_ENCAP_L2(len)	(((__u64)len & \
3494 					  BPF_ADJ_ROOM_ENCAP_L2_MASK) \
3495 					 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
3496 
3497 /* BPF_FUNC_sysctl_get_name flags. */
3498 enum {
3499 	BPF_F_SYSCTL_BASE_NAME		= (1ULL << 0),
3500 };
3501 
3502 /* BPF_FUNC_sk_storage_get flags */
3503 enum {
3504 	BPF_SK_STORAGE_GET_F_CREATE	= (1ULL << 0),
3505 };
3506 
3507 /* BPF_FUNC_read_branch_records flags. */
3508 enum {
3509 	BPF_F_GET_BRANCH_RECORDS_SIZE	= (1ULL << 0),
3510 };
3511 
3512 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
3513  * BPF_FUNC_bpf_ringbuf_output flags.
3514  */
3515 enum {
3516 	BPF_RB_NO_WAKEUP		= (1ULL << 0),
3517 	BPF_RB_FORCE_WAKEUP		= (1ULL << 1),
3518 };
3519 
3520 /* BPF_FUNC_bpf_ringbuf_query flags */
3521 enum {
3522 	BPF_RB_AVAIL_DATA = 0,
3523 	BPF_RB_RING_SIZE = 1,
3524 	BPF_RB_CONS_POS = 2,
3525 	BPF_RB_PROD_POS = 3,
3526 };
3527 
3528 /* BPF ring buffer constants */
3529 enum {
3530 	BPF_RINGBUF_BUSY_BIT		= (1U << 31),
3531 	BPF_RINGBUF_DISCARD_BIT		= (1U << 30),
3532 	BPF_RINGBUF_HDR_SZ		= 8,
3533 };
3534 
3535 /* Mode for BPF_FUNC_skb_adjust_room helper. */
3536 enum bpf_adj_room_mode {
3537 	BPF_ADJ_ROOM_NET,
3538 	BPF_ADJ_ROOM_MAC,
3539 };
3540 
3541 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
3542 enum bpf_hdr_start_off {
3543 	BPF_HDR_START_MAC,
3544 	BPF_HDR_START_NET,
3545 };
3546 
3547 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
3548 enum bpf_lwt_encap_mode {
3549 	BPF_LWT_ENCAP_SEG6,
3550 	BPF_LWT_ENCAP_SEG6_INLINE,
3551 	BPF_LWT_ENCAP_IP,
3552 };
3553 
3554 #define __bpf_md_ptr(type, name)	\
3555 union {					\
3556 	type name;			\
3557 	__u64 :64;			\
3558 } __attribute__((aligned(8)))
3559 
3560 /* user accessible mirror of in-kernel sk_buff.
3561  * new fields can only be added to the end of this structure
3562  */
3563 struct __sk_buff {
3564 	__u32 len;
3565 	__u32 pkt_type;
3566 	__u32 mark;
3567 	__u32 queue_mapping;
3568 	__u32 protocol;
3569 	__u32 vlan_present;
3570 	__u32 vlan_tci;
3571 	__u32 vlan_proto;
3572 	__u32 priority;
3573 	__u32 ingress_ifindex;
3574 	__u32 ifindex;
3575 	__u32 tc_index;
3576 	__u32 cb[5];
3577 	__u32 hash;
3578 	__u32 tc_classid;
3579 	__u32 data;
3580 	__u32 data_end;
3581 	__u32 napi_id;
3582 
3583 	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
3584 	__u32 family;
3585 	__u32 remote_ip4;	/* Stored in network byte order */
3586 	__u32 local_ip4;	/* Stored in network byte order */
3587 	__u32 remote_ip6[4];	/* Stored in network byte order */
3588 	__u32 local_ip6[4];	/* Stored in network byte order */
3589 	__u32 remote_port;	/* Stored in network byte order */
3590 	__u32 local_port;	/* stored in host byte order */
3591 	/* ... here. */
3592 
3593 	__u32 data_meta;
3594 	__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
3595 	__u64 tstamp;
3596 	__u32 wire_len;
3597 	__u32 gso_segs;
3598 	__bpf_md_ptr(struct bpf_sock *, sk);
3599 	__u32 gso_size;
3600 };
3601 
3602 struct bpf_tunnel_key {
3603 	__u32 tunnel_id;
3604 	union {
3605 		__u32 remote_ipv4;
3606 		__u32 remote_ipv6[4];
3607 	};
3608 	__u8 tunnel_tos;
3609 	__u8 tunnel_ttl;
3610 	__u16 tunnel_ext;	/* Padding, future use. */
3611 	__u32 tunnel_label;
3612 };
3613 
3614 /* user accessible mirror of in-kernel xfrm_state.
3615  * new fields can only be added to the end of this structure
3616  */
3617 struct bpf_xfrm_state {
3618 	__u32 reqid;
3619 	__u32 spi;	/* Stored in network byte order */
3620 	__u16 family;
3621 	__u16 ext;	/* Padding, future use. */
3622 	union {
3623 		__u32 remote_ipv4;	/* Stored in network byte order */
3624 		__u32 remote_ipv6[4];	/* Stored in network byte order */
3625 	};
3626 };
3627 
3628 /* Generic BPF return codes which all BPF program types may support.
3629  * The values are binary compatible with their TC_ACT_* counter-part to
3630  * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
3631  * programs.
3632  *
3633  * XDP is handled seprately, see XDP_*.
3634  */
3635 enum bpf_ret_code {
3636 	BPF_OK = 0,
3637 	/* 1 reserved */
3638 	BPF_DROP = 2,
3639 	/* 3-6 reserved */
3640 	BPF_REDIRECT = 7,
3641 	/* >127 are reserved for prog type specific return codes.
3642 	 *
3643 	 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
3644 	 *    BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
3645 	 *    changed and should be routed based on its new L3 header.
3646 	 *    (This is an L3 redirect, as opposed to L2 redirect
3647 	 *    represented by BPF_REDIRECT above).
3648 	 */
3649 	BPF_LWT_REROUTE = 128,
3650 };
3651 
3652 struct bpf_sock {
3653 	__u32 bound_dev_if;
3654 	__u32 family;
3655 	__u32 type;
3656 	__u32 protocol;
3657 	__u32 mark;
3658 	__u32 priority;
3659 	/* IP address also allows 1 and 2 bytes access */
3660 	__u32 src_ip4;
3661 	__u32 src_ip6[4];
3662 	__u32 src_port;		/* host byte order */
3663 	__u32 dst_port;		/* network byte order */
3664 	__u32 dst_ip4;
3665 	__u32 dst_ip6[4];
3666 	__u32 state;
3667 	__s32 rx_queue_mapping;
3668 };
3669 
3670 struct bpf_tcp_sock {
3671 	__u32 snd_cwnd;		/* Sending congestion window		*/
3672 	__u32 srtt_us;		/* smoothed round trip time << 3 in usecs */
3673 	__u32 rtt_min;
3674 	__u32 snd_ssthresh;	/* Slow start size threshold		*/
3675 	__u32 rcv_nxt;		/* What we want to receive next		*/
3676 	__u32 snd_nxt;		/* Next sequence we send		*/
3677 	__u32 snd_una;		/* First byte we want an ack for	*/
3678 	__u32 mss_cache;	/* Cached effective mss, not including SACKS */
3679 	__u32 ecn_flags;	/* ECN status bits.			*/
3680 	__u32 rate_delivered;	/* saved rate sample: packets delivered */
3681 	__u32 rate_interval_us;	/* saved rate sample: time elapsed */
3682 	__u32 packets_out;	/* Packets which are "in flight"	*/
3683 	__u32 retrans_out;	/* Retransmitted packets out		*/
3684 	__u32 total_retrans;	/* Total retransmits for entire connection */
3685 	__u32 segs_in;		/* RFC4898 tcpEStatsPerfSegsIn
3686 				 * total number of segments in.
3687 				 */
3688 	__u32 data_segs_in;	/* RFC4898 tcpEStatsPerfDataSegsIn
3689 				 * total number of data segments in.
3690 				 */
3691 	__u32 segs_out;		/* RFC4898 tcpEStatsPerfSegsOut
3692 				 * The total number of segments sent.
3693 				 */
3694 	__u32 data_segs_out;	/* RFC4898 tcpEStatsPerfDataSegsOut
3695 				 * total number of data segments sent.
3696 				 */
3697 	__u32 lost_out;		/* Lost packets			*/
3698 	__u32 sacked_out;	/* SACK'd packets			*/
3699 	__u64 bytes_received;	/* RFC4898 tcpEStatsAppHCThruOctetsReceived
3700 				 * sum(delta(rcv_nxt)), or how many bytes
3701 				 * were acked.
3702 				 */
3703 	__u64 bytes_acked;	/* RFC4898 tcpEStatsAppHCThruOctetsAcked
3704 				 * sum(delta(snd_una)), or how many bytes
3705 				 * were acked.
3706 				 */
3707 	__u32 dsack_dups;	/* RFC4898 tcpEStatsStackDSACKDups
3708 				 * total number of DSACK blocks received
3709 				 */
3710 	__u32 delivered;	/* Total data packets delivered incl. rexmits */
3711 	__u32 delivered_ce;	/* Like the above but only ECE marked packets */
3712 	__u32 icsk_retransmits;	/* Number of unrecovered [RTO] timeouts */
3713 };
3714 
3715 struct bpf_sock_tuple {
3716 	union {
3717 		struct {
3718 			__be32 saddr;
3719 			__be32 daddr;
3720 			__be16 sport;
3721 			__be16 dport;
3722 		} ipv4;
3723 		struct {
3724 			__be32 saddr[4];
3725 			__be32 daddr[4];
3726 			__be16 sport;
3727 			__be16 dport;
3728 		} ipv6;
3729 	};
3730 };
3731 
3732 struct bpf_xdp_sock {
3733 	__u32 queue_id;
3734 };
3735 
3736 #define XDP_PACKET_HEADROOM 256
3737 
3738 /* User return codes for XDP prog type.
3739  * A valid XDP program must return one of these defined values. All other
3740  * return codes are reserved for future use. Unknown return codes will
3741  * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
3742  */
3743 enum xdp_action {
3744 	XDP_ABORTED = 0,
3745 	XDP_DROP,
3746 	XDP_PASS,
3747 	XDP_TX,
3748 	XDP_REDIRECT,
3749 };
3750 
3751 /* user accessible metadata for XDP packet hook
3752  * new fields must be added to the end of this structure
3753  */
3754 struct xdp_md {
3755 	__u32 data;
3756 	__u32 data_end;
3757 	__u32 data_meta;
3758 	/* Below access go through struct xdp_rxq_info */
3759 	__u32 ingress_ifindex; /* rxq->dev->ifindex */
3760 	__u32 rx_queue_index;  /* rxq->queue_index  */
3761 
3762 	__u32 egress_ifindex;  /* txq->dev->ifindex */
3763 };
3764 
3765 /* DEVMAP map-value layout
3766  *
3767  * The struct data-layout of map-value is a configuration interface.
3768  * New members can only be added to the end of this structure.
3769  */
3770 struct bpf_devmap_val {
3771 	__u32 ifindex;   /* device index */
3772 	union {
3773 		int   fd;  /* prog fd on map write */
3774 		__u32 id;  /* prog id on map read */
3775 	} bpf_prog;
3776 };
3777 
3778 enum sk_action {
3779 	SK_DROP = 0,
3780 	SK_PASS,
3781 };
3782 
3783 /* user accessible metadata for SK_MSG packet hook, new fields must
3784  * be added to the end of this structure
3785  */
3786 struct sk_msg_md {
3787 	__bpf_md_ptr(void *, data);
3788 	__bpf_md_ptr(void *, data_end);
3789 
3790 	__u32 family;
3791 	__u32 remote_ip4;	/* Stored in network byte order */
3792 	__u32 local_ip4;	/* Stored in network byte order */
3793 	__u32 remote_ip6[4];	/* Stored in network byte order */
3794 	__u32 local_ip6[4];	/* Stored in network byte order */
3795 	__u32 remote_port;	/* Stored in network byte order */
3796 	__u32 local_port;	/* stored in host byte order */
3797 	__u32 size;		/* Total size of sk_msg */
3798 
3799 	__bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
3800 };
3801 
3802 struct sk_reuseport_md {
3803 	/*
3804 	 * Start of directly accessible data. It begins from
3805 	 * the tcp/udp header.
3806 	 */
3807 	__bpf_md_ptr(void *, data);
3808 	/* End of directly accessible data */
3809 	__bpf_md_ptr(void *, data_end);
3810 	/*
3811 	 * Total length of packet (starting from the tcp/udp header).
3812 	 * Note that the directly accessible bytes (data_end - data)
3813 	 * could be less than this "len".  Those bytes could be
3814 	 * indirectly read by a helper "bpf_skb_load_bytes()".
3815 	 */
3816 	__u32 len;
3817 	/*
3818 	 * Eth protocol in the mac header (network byte order). e.g.
3819 	 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
3820 	 */
3821 	__u32 eth_protocol;
3822 	__u32 ip_protocol;	/* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
3823 	__u32 bind_inany;	/* Is sock bound to an INANY address? */
3824 	__u32 hash;		/* A hash of the packet 4 tuples */
3825 };
3826 
3827 #define BPF_TAG_SIZE	8
3828 
3829 struct bpf_prog_info {
3830 	__u32 type;
3831 	__u32 id;
3832 	__u8  tag[BPF_TAG_SIZE];
3833 	__u32 jited_prog_len;
3834 	__u32 xlated_prog_len;
3835 	__aligned_u64 jited_prog_insns;
3836 	__aligned_u64 xlated_prog_insns;
3837 	__u64 load_time;	/* ns since boottime */
3838 	__u32 created_by_uid;
3839 	__u32 nr_map_ids;
3840 	__aligned_u64 map_ids;
3841 	char name[BPF_OBJ_NAME_LEN];
3842 	__u32 ifindex;
3843 	__u32 gpl_compatible:1;
3844 	__u32 :31; /* alignment pad */
3845 	__u64 netns_dev;
3846 	__u64 netns_ino;
3847 	__u32 nr_jited_ksyms;
3848 	__u32 nr_jited_func_lens;
3849 	__aligned_u64 jited_ksyms;
3850 	__aligned_u64 jited_func_lens;
3851 	__u32 btf_id;
3852 	__u32 func_info_rec_size;
3853 	__aligned_u64 func_info;
3854 	__u32 nr_func_info;
3855 	__u32 nr_line_info;
3856 	__aligned_u64 line_info;
3857 	__aligned_u64 jited_line_info;
3858 	__u32 nr_jited_line_info;
3859 	__u32 line_info_rec_size;
3860 	__u32 jited_line_info_rec_size;
3861 	__u32 nr_prog_tags;
3862 	__aligned_u64 prog_tags;
3863 	__u64 run_time_ns;
3864 	__u64 run_cnt;
3865 } __attribute__((aligned(8)));
3866 
3867 struct bpf_map_info {
3868 	__u32 type;
3869 	__u32 id;
3870 	__u32 key_size;
3871 	__u32 value_size;
3872 	__u32 max_entries;
3873 	__u32 map_flags;
3874 	char  name[BPF_OBJ_NAME_LEN];
3875 	__u32 ifindex;
3876 	__u32 btf_vmlinux_value_type_id;
3877 	__u64 netns_dev;
3878 	__u64 netns_ino;
3879 	__u32 btf_id;
3880 	__u32 btf_key_type_id;
3881 	__u32 btf_value_type_id;
3882 } __attribute__((aligned(8)));
3883 
3884 struct bpf_btf_info {
3885 	__aligned_u64 btf;
3886 	__u32 btf_size;
3887 	__u32 id;
3888 } __attribute__((aligned(8)));
3889 
3890 struct bpf_link_info {
3891 	__u32 type;
3892 	__u32 id;
3893 	__u32 prog_id;
3894 	union {
3895 		struct {
3896 			__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
3897 			__u32 tp_name_len;     /* in/out: tp_name buffer len */
3898 		} raw_tracepoint;
3899 		struct {
3900 			__u32 attach_type;
3901 		} tracing;
3902 		struct {
3903 			__u64 cgroup_id;
3904 			__u32 attach_type;
3905 		} cgroup;
3906 		struct  {
3907 			__u32 netns_ino;
3908 			__u32 attach_type;
3909 		} netns;
3910 	};
3911 } __attribute__((aligned(8)));
3912 
3913 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
3914  * by user and intended to be used by socket (e.g. to bind to, depends on
3915  * attach attach type).
3916  */
3917 struct bpf_sock_addr {
3918 	__u32 user_family;	/* Allows 4-byte read, but no write. */
3919 	__u32 user_ip4;		/* Allows 1,2,4-byte read and 4-byte write.
3920 				 * Stored in network byte order.
3921 				 */
3922 	__u32 user_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
3923 				 * Stored in network byte order.
3924 				 */
3925 	__u32 user_port;	/* Allows 1,2,4-byte read and 4-byte write.
3926 				 * Stored in network byte order
3927 				 */
3928 	__u32 family;		/* Allows 4-byte read, but no write */
3929 	__u32 type;		/* Allows 4-byte read, but no write */
3930 	__u32 protocol;		/* Allows 4-byte read, but no write */
3931 	__u32 msg_src_ip4;	/* Allows 1,2,4-byte read and 4-byte write.
3932 				 * Stored in network byte order.
3933 				 */
3934 	__u32 msg_src_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
3935 				 * Stored in network byte order.
3936 				 */
3937 	__bpf_md_ptr(struct bpf_sock *, sk);
3938 };
3939 
3940 /* User bpf_sock_ops struct to access socket values and specify request ops
3941  * and their replies.
3942  * Some of this fields are in network (bigendian) byte order and may need
3943  * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
3944  * New fields can only be added at the end of this structure
3945  */
3946 struct bpf_sock_ops {
3947 	__u32 op;
3948 	union {
3949 		__u32 args[4];		/* Optionally passed to bpf program */
3950 		__u32 reply;		/* Returned by bpf program	    */
3951 		__u32 replylong[4];	/* Optionally returned by bpf prog  */
3952 	};
3953 	__u32 family;
3954 	__u32 remote_ip4;	/* Stored in network byte order */
3955 	__u32 local_ip4;	/* Stored in network byte order */
3956 	__u32 remote_ip6[4];	/* Stored in network byte order */
3957 	__u32 local_ip6[4];	/* Stored in network byte order */
3958 	__u32 remote_port;	/* Stored in network byte order */
3959 	__u32 local_port;	/* stored in host byte order */
3960 	__u32 is_fullsock;	/* Some TCP fields are only valid if
3961 				 * there is a full socket. If not, the
3962 				 * fields read as zero.
3963 				 */
3964 	__u32 snd_cwnd;
3965 	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
3966 	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
3967 	__u32 state;
3968 	__u32 rtt_min;
3969 	__u32 snd_ssthresh;
3970 	__u32 rcv_nxt;
3971 	__u32 snd_nxt;
3972 	__u32 snd_una;
3973 	__u32 mss_cache;
3974 	__u32 ecn_flags;
3975 	__u32 rate_delivered;
3976 	__u32 rate_interval_us;
3977 	__u32 packets_out;
3978 	__u32 retrans_out;
3979 	__u32 total_retrans;
3980 	__u32 segs_in;
3981 	__u32 data_segs_in;
3982 	__u32 segs_out;
3983 	__u32 data_segs_out;
3984 	__u32 lost_out;
3985 	__u32 sacked_out;
3986 	__u32 sk_txhash;
3987 	__u64 bytes_received;
3988 	__u64 bytes_acked;
3989 	__bpf_md_ptr(struct bpf_sock *, sk);
3990 };
3991 
3992 /* Definitions for bpf_sock_ops_cb_flags */
3993 enum {
3994 	BPF_SOCK_OPS_RTO_CB_FLAG	= (1<<0),
3995 	BPF_SOCK_OPS_RETRANS_CB_FLAG	= (1<<1),
3996 	BPF_SOCK_OPS_STATE_CB_FLAG	= (1<<2),
3997 	BPF_SOCK_OPS_RTT_CB_FLAG	= (1<<3),
3998 /* Mask of all currently supported cb flags */
3999 	BPF_SOCK_OPS_ALL_CB_FLAGS       = 0xF,
4000 };
4001 
4002 /* List of known BPF sock_ops operators.
4003  * New entries can only be added at the end
4004  */
4005 enum {
4006 	BPF_SOCK_OPS_VOID,
4007 	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
4008 					 * -1 if default value should be used
4009 					 */
4010 	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
4011 					 * window (in packets) or -1 if default
4012 					 * value should be used
4013 					 */
4014 	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
4015 					 * active connection is initialized
4016 					 */
4017 	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
4018 						 * active connection is
4019 						 * established
4020 						 */
4021 	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
4022 						 * passive connection is
4023 						 * established
4024 						 */
4025 	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
4026 					 * needs ECN
4027 					 */
4028 	BPF_SOCK_OPS_BASE_RTT,		/* Get base RTT. The correct value is
4029 					 * based on the path and may be
4030 					 * dependent on the congestion control
4031 					 * algorithm. In general it indicates
4032 					 * a congestion threshold. RTTs above
4033 					 * this indicate congestion
4034 					 */
4035 	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
4036 					 * Arg1: value of icsk_retransmits
4037 					 * Arg2: value of icsk_rto
4038 					 * Arg3: whether RTO has expired
4039 					 */
4040 	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
4041 					 * Arg1: sequence number of 1st byte
4042 					 * Arg2: # segments
4043 					 * Arg3: return value of
4044 					 *       tcp_transmit_skb (0 => success)
4045 					 */
4046 	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
4047 					 * Arg1: old_state
4048 					 * Arg2: new_state
4049 					 */
4050 	BPF_SOCK_OPS_TCP_LISTEN_CB,	/* Called on listen(2), right after
4051 					 * socket transition to LISTEN state.
4052 					 */
4053 	BPF_SOCK_OPS_RTT_CB,		/* Called on every RTT.
4054 					 */
4055 };
4056 
4057 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
4058  * changes between the TCP and BPF versions. Ideally this should never happen.
4059  * If it does, we need to add code to convert them before calling
4060  * the BPF sock_ops function.
4061  */
4062 enum {
4063 	BPF_TCP_ESTABLISHED = 1,
4064 	BPF_TCP_SYN_SENT,
4065 	BPF_TCP_SYN_RECV,
4066 	BPF_TCP_FIN_WAIT1,
4067 	BPF_TCP_FIN_WAIT2,
4068 	BPF_TCP_TIME_WAIT,
4069 	BPF_TCP_CLOSE,
4070 	BPF_TCP_CLOSE_WAIT,
4071 	BPF_TCP_LAST_ACK,
4072 	BPF_TCP_LISTEN,
4073 	BPF_TCP_CLOSING,	/* Now a valid state */
4074 	BPF_TCP_NEW_SYN_RECV,
4075 
4076 	BPF_TCP_MAX_STATES	/* Leave at the end! */
4077 };
4078 
4079 enum {
4080 	TCP_BPF_IW		= 1001,	/* Set TCP initial congestion window */
4081 	TCP_BPF_SNDCWND_CLAMP	= 1002,	/* Set sndcwnd_clamp */
4082 };
4083 
4084 struct bpf_perf_event_value {
4085 	__u64 counter;
4086 	__u64 enabled;
4087 	__u64 running;
4088 };
4089 
4090 enum {
4091 	BPF_DEVCG_ACC_MKNOD	= (1ULL << 0),
4092 	BPF_DEVCG_ACC_READ	= (1ULL << 1),
4093 	BPF_DEVCG_ACC_WRITE	= (1ULL << 2),
4094 };
4095 
4096 enum {
4097 	BPF_DEVCG_DEV_BLOCK	= (1ULL << 0),
4098 	BPF_DEVCG_DEV_CHAR	= (1ULL << 1),
4099 };
4100 
4101 struct bpf_cgroup_dev_ctx {
4102 	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
4103 	__u32 access_type;
4104 	__u32 major;
4105 	__u32 minor;
4106 };
4107 
4108 struct bpf_raw_tracepoint_args {
4109 	__u64 args[0];
4110 };
4111 
4112 /* DIRECT:  Skip the FIB rules and go to FIB table associated with device
4113  * OUTPUT:  Do lookup from egress perspective; default is ingress
4114  */
4115 enum {
4116 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
4117 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
4118 };
4119 
4120 enum {
4121 	BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
4122 	BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
4123 	BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
4124 	BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
4125 	BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
4126 	BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
4127 	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
4128 	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
4129 	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
4130 };
4131 
4132 struct bpf_fib_lookup {
4133 	/* input:  network family for lookup (AF_INET, AF_INET6)
4134 	 * output: network family of egress nexthop
4135 	 */
4136 	__u8	family;
4137 
4138 	/* set if lookup is to consider L4 data - e.g., FIB rules */
4139 	__u8	l4_protocol;
4140 	__be16	sport;
4141 	__be16	dport;
4142 
4143 	/* total length of packet from network header - used for MTU check */
4144 	__u16	tot_len;
4145 
4146 	/* input: L3 device index for lookup
4147 	 * output: device index from FIB lookup
4148 	 */
4149 	__u32	ifindex;
4150 
4151 	union {
4152 		/* inputs to lookup */
4153 		__u8	tos;		/* AF_INET  */
4154 		__be32	flowinfo;	/* AF_INET6, flow_label + priority */
4155 
4156 		/* output: metric of fib result (IPv4/IPv6 only) */
4157 		__u32	rt_metric;
4158 	};
4159 
4160 	union {
4161 		__be32		ipv4_src;
4162 		__u32		ipv6_src[4];  /* in6_addr; network order */
4163 	};
4164 
4165 	/* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
4166 	 * network header. output: bpf_fib_lookup sets to gateway address
4167 	 * if FIB lookup returns gateway route
4168 	 */
4169 	union {
4170 		__be32		ipv4_dst;
4171 		__u32		ipv6_dst[4];  /* in6_addr; network order */
4172 	};
4173 
4174 	/* output */
4175 	__be16	h_vlan_proto;
4176 	__be16	h_vlan_TCI;
4177 	__u8	smac[6];     /* ETH_ALEN */
4178 	__u8	dmac[6];     /* ETH_ALEN */
4179 };
4180 
4181 enum bpf_task_fd_type {
4182 	BPF_FD_TYPE_RAW_TRACEPOINT,	/* tp name */
4183 	BPF_FD_TYPE_TRACEPOINT,		/* tp name */
4184 	BPF_FD_TYPE_KPROBE,		/* (symbol + offset) or addr */
4185 	BPF_FD_TYPE_KRETPROBE,		/* (symbol + offset) or addr */
4186 	BPF_FD_TYPE_UPROBE,		/* filename + offset */
4187 	BPF_FD_TYPE_URETPROBE,		/* filename + offset */
4188 };
4189 
4190 enum {
4191 	BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG		= (1U << 0),
4192 	BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL		= (1U << 1),
4193 	BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP		= (1U << 2),
4194 };
4195 
4196 struct bpf_flow_keys {
4197 	__u16	nhoff;
4198 	__u16	thoff;
4199 	__u16	addr_proto;			/* ETH_P_* of valid addrs */
4200 	__u8	is_frag;
4201 	__u8	is_first_frag;
4202 	__u8	is_encap;
4203 	__u8	ip_proto;
4204 	__be16	n_proto;
4205 	__be16	sport;
4206 	__be16	dport;
4207 	union {
4208 		struct {
4209 			__be32	ipv4_src;
4210 			__be32	ipv4_dst;
4211 		};
4212 		struct {
4213 			__u32	ipv6_src[4];	/* in6_addr; network order */
4214 			__u32	ipv6_dst[4];	/* in6_addr; network order */
4215 		};
4216 	};
4217 	__u32	flags;
4218 	__be32	flow_label;
4219 };
4220 
4221 struct bpf_func_info {
4222 	__u32	insn_off;
4223 	__u32	type_id;
4224 };
4225 
4226 #define BPF_LINE_INFO_LINE_NUM(line_col)	((line_col) >> 10)
4227 #define BPF_LINE_INFO_LINE_COL(line_col)	((line_col) & 0x3ff)
4228 
4229 struct bpf_line_info {
4230 	__u32	insn_off;
4231 	__u32	file_name_off;
4232 	__u32	line_off;
4233 	__u32	line_col;
4234 };
4235 
4236 struct bpf_spin_lock {
4237 	__u32	val;
4238 };
4239 
4240 struct bpf_sysctl {
4241 	__u32	write;		/* Sysctl is being read (= 0) or written (= 1).
4242 				 * Allows 1,2,4-byte read, but no write.
4243 				 */
4244 	__u32	file_pos;	/* Sysctl file position to read from, write to.
4245 				 * Allows 1,2,4-byte read an 4-byte write.
4246 				 */
4247 };
4248 
4249 struct bpf_sockopt {
4250 	__bpf_md_ptr(struct bpf_sock *, sk);
4251 	__bpf_md_ptr(void *, optval);
4252 	__bpf_md_ptr(void *, optval_end);
4253 
4254 	__s32	level;
4255 	__s32	optname;
4256 	__s32	optlen;
4257 	__s32	retval;
4258 };
4259 
4260 struct bpf_pidns_info {
4261 	__u32 pid;
4262 	__u32 tgid;
4263 };
4264 #endif /* _UAPI__LINUX_BPF_H__ */
4265