xref: /linux/kernel/trace/trace_events_filter.c (revision 564eb714f5f09ac733c26860d5f0831f213fbdf1)
1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26 
27 #include "trace.h"
28 #include "trace_output.h"
29 
30 #define DEFAULT_SYS_FILTER_MESSAGE					\
31 	"### global filter ###\n"					\
32 	"# Use this to set filters for multiple events.\n"		\
33 	"# Only events with the given fields will be affected.\n"	\
34 	"# If no events are modified, an error message will be displayed here"
35 
36 enum filter_op_ids
37 {
38 	OP_OR,
39 	OP_AND,
40 	OP_GLOB,
41 	OP_NE,
42 	OP_EQ,
43 	OP_LT,
44 	OP_LE,
45 	OP_GT,
46 	OP_GE,
47 	OP_BAND,
48 	OP_NONE,
49 	OP_OPEN_PAREN,
50 };
51 
52 struct filter_op {
53 	int id;
54 	char *string;
55 	int precedence;
56 };
57 
58 /* Order must be the same as enum filter_op_ids above */
59 static struct filter_op filter_ops[] = {
60 	{ OP_OR,	"||",		1 },
61 	{ OP_AND,	"&&",		2 },
62 	{ OP_GLOB,	"~",		4 },
63 	{ OP_NE,	"!=",		4 },
64 	{ OP_EQ,	"==",		4 },
65 	{ OP_LT,	"<",		5 },
66 	{ OP_LE,	"<=",		5 },
67 	{ OP_GT,	">",		5 },
68 	{ OP_GE,	">=",		5 },
69 	{ OP_BAND,	"&",		6 },
70 	{ OP_NONE,	"OP_NONE",	0 },
71 	{ OP_OPEN_PAREN, "(",		0 },
72 };
73 
74 enum {
75 	FILT_ERR_NONE,
76 	FILT_ERR_INVALID_OP,
77 	FILT_ERR_UNBALANCED_PAREN,
78 	FILT_ERR_TOO_MANY_OPERANDS,
79 	FILT_ERR_OPERAND_TOO_LONG,
80 	FILT_ERR_FIELD_NOT_FOUND,
81 	FILT_ERR_ILLEGAL_FIELD_OP,
82 	FILT_ERR_ILLEGAL_INTVAL,
83 	FILT_ERR_BAD_SUBSYS_FILTER,
84 	FILT_ERR_TOO_MANY_PREDS,
85 	FILT_ERR_MISSING_FIELD,
86 	FILT_ERR_INVALID_FILTER,
87 	FILT_ERR_IP_FIELD_ONLY,
88 };
89 
90 static char *err_text[] = {
91 	"No error",
92 	"Invalid operator",
93 	"Unbalanced parens",
94 	"Too many operands",
95 	"Operand too long",
96 	"Field not found",
97 	"Illegal operation for field type",
98 	"Illegal integer value",
99 	"Couldn't find or set field in one of a subsystem's events",
100 	"Too many terms in predicate expression",
101 	"Missing field name and/or value",
102 	"Meaningless filter expression",
103 	"Only 'ip' field is supported for function trace",
104 };
105 
106 struct opstack_op {
107 	int op;
108 	struct list_head list;
109 };
110 
111 struct postfix_elt {
112 	int op;
113 	char *operand;
114 	struct list_head list;
115 };
116 
117 struct filter_parse_state {
118 	struct filter_op *ops;
119 	struct list_head opstack;
120 	struct list_head postfix;
121 	int lasterr;
122 	int lasterr_pos;
123 
124 	struct {
125 		char *string;
126 		unsigned int cnt;
127 		unsigned int tail;
128 	} infix;
129 
130 	struct {
131 		char string[MAX_FILTER_STR_VAL];
132 		int pos;
133 		unsigned int tail;
134 	} operand;
135 };
136 
137 struct pred_stack {
138 	struct filter_pred	**preds;
139 	int			index;
140 };
141 
142 #define DEFINE_COMPARISON_PRED(type)					\
143 static int filter_pred_##type(struct filter_pred *pred, void *event)	\
144 {									\
145 	type *addr = (type *)(event + pred->offset);			\
146 	type val = (type)pred->val;					\
147 	int match = 0;							\
148 									\
149 	switch (pred->op) {						\
150 	case OP_LT:							\
151 		match = (*addr < val);					\
152 		break;							\
153 	case OP_LE:							\
154 		match = (*addr <= val);					\
155 		break;							\
156 	case OP_GT:							\
157 		match = (*addr > val);					\
158 		break;							\
159 	case OP_GE:							\
160 		match = (*addr >= val);					\
161 		break;							\
162 	case OP_BAND:							\
163 		match = (*addr & val);					\
164 		break;							\
165 	default:							\
166 		break;							\
167 	}								\
168 									\
169 	return match;							\
170 }
171 
172 #define DEFINE_EQUALITY_PRED(size)					\
173 static int filter_pred_##size(struct filter_pred *pred, void *event)	\
174 {									\
175 	u##size *addr = (u##size *)(event + pred->offset);		\
176 	u##size val = (u##size)pred->val;				\
177 	int match;							\
178 									\
179 	match = (val == *addr) ^ pred->not;				\
180 									\
181 	return match;							\
182 }
183 
184 DEFINE_COMPARISON_PRED(s64);
185 DEFINE_COMPARISON_PRED(u64);
186 DEFINE_COMPARISON_PRED(s32);
187 DEFINE_COMPARISON_PRED(u32);
188 DEFINE_COMPARISON_PRED(s16);
189 DEFINE_COMPARISON_PRED(u16);
190 DEFINE_COMPARISON_PRED(s8);
191 DEFINE_COMPARISON_PRED(u8);
192 
193 DEFINE_EQUALITY_PRED(64);
194 DEFINE_EQUALITY_PRED(32);
195 DEFINE_EQUALITY_PRED(16);
196 DEFINE_EQUALITY_PRED(8);
197 
198 /* Filter predicate for fixed sized arrays of characters */
199 static int filter_pred_string(struct filter_pred *pred, void *event)
200 {
201 	char *addr = (char *)(event + pred->offset);
202 	int cmp, match;
203 
204 	cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
205 
206 	match = cmp ^ pred->not;
207 
208 	return match;
209 }
210 
211 /* Filter predicate for char * pointers */
212 static int filter_pred_pchar(struct filter_pred *pred, void *event)
213 {
214 	char **addr = (char **)(event + pred->offset);
215 	int cmp, match;
216 	int len = strlen(*addr) + 1;	/* including tailing '\0' */
217 
218 	cmp = pred->regex.match(*addr, &pred->regex, len);
219 
220 	match = cmp ^ pred->not;
221 
222 	return match;
223 }
224 
225 /*
226  * Filter predicate for dynamic sized arrays of characters.
227  * These are implemented through a list of strings at the end
228  * of the entry.
229  * Also each of these strings have a field in the entry which
230  * contains its offset from the beginning of the entry.
231  * We have then first to get this field, dereference it
232  * and add it to the address of the entry, and at last we have
233  * the address of the string.
234  */
235 static int filter_pred_strloc(struct filter_pred *pred, void *event)
236 {
237 	u32 str_item = *(u32 *)(event + pred->offset);
238 	int str_loc = str_item & 0xffff;
239 	int str_len = str_item >> 16;
240 	char *addr = (char *)(event + str_loc);
241 	int cmp, match;
242 
243 	cmp = pred->regex.match(addr, &pred->regex, str_len);
244 
245 	match = cmp ^ pred->not;
246 
247 	return match;
248 }
249 
250 static int filter_pred_none(struct filter_pred *pred, void *event)
251 {
252 	return 0;
253 }
254 
255 /*
256  * regex_match_foo - Basic regex callbacks
257  *
258  * @str: the string to be searched
259  * @r:   the regex structure containing the pattern string
260  * @len: the length of the string to be searched (including '\0')
261  *
262  * Note:
263  * - @str might not be NULL-terminated if it's of type DYN_STRING
264  *   or STATIC_STRING
265  */
266 
267 static int regex_match_full(char *str, struct regex *r, int len)
268 {
269 	if (strncmp(str, r->pattern, len) == 0)
270 		return 1;
271 	return 0;
272 }
273 
274 static int regex_match_front(char *str, struct regex *r, int len)
275 {
276 	if (strncmp(str, r->pattern, r->len) == 0)
277 		return 1;
278 	return 0;
279 }
280 
281 static int regex_match_middle(char *str, struct regex *r, int len)
282 {
283 	if (strnstr(str, r->pattern, len))
284 		return 1;
285 	return 0;
286 }
287 
288 static int regex_match_end(char *str, struct regex *r, int len)
289 {
290 	int strlen = len - 1;
291 
292 	if (strlen >= r->len &&
293 	    memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
294 		return 1;
295 	return 0;
296 }
297 
298 /**
299  * filter_parse_regex - parse a basic regex
300  * @buff:   the raw regex
301  * @len:    length of the regex
302  * @search: will point to the beginning of the string to compare
303  * @not:    tell whether the match will have to be inverted
304  *
305  * This passes in a buffer containing a regex and this function will
306  * set search to point to the search part of the buffer and
307  * return the type of search it is (see enum above).
308  * This does modify buff.
309  *
310  * Returns enum type.
311  *  search returns the pointer to use for comparison.
312  *  not returns 1 if buff started with a '!'
313  *     0 otherwise.
314  */
315 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
316 {
317 	int type = MATCH_FULL;
318 	int i;
319 
320 	if (buff[0] == '!') {
321 		*not = 1;
322 		buff++;
323 		len--;
324 	} else
325 		*not = 0;
326 
327 	*search = buff;
328 
329 	for (i = 0; i < len; i++) {
330 		if (buff[i] == '*') {
331 			if (!i) {
332 				*search = buff + 1;
333 				type = MATCH_END_ONLY;
334 			} else {
335 				if (type == MATCH_END_ONLY)
336 					type = MATCH_MIDDLE_ONLY;
337 				else
338 					type = MATCH_FRONT_ONLY;
339 				buff[i] = 0;
340 				break;
341 			}
342 		}
343 	}
344 
345 	return type;
346 }
347 
348 static void filter_build_regex(struct filter_pred *pred)
349 {
350 	struct regex *r = &pred->regex;
351 	char *search;
352 	enum regex_type type = MATCH_FULL;
353 	int not = 0;
354 
355 	if (pred->op == OP_GLOB) {
356 		type = filter_parse_regex(r->pattern, r->len, &search, &not);
357 		r->len = strlen(search);
358 		memmove(r->pattern, search, r->len+1);
359 	}
360 
361 	switch (type) {
362 	case MATCH_FULL:
363 		r->match = regex_match_full;
364 		break;
365 	case MATCH_FRONT_ONLY:
366 		r->match = regex_match_front;
367 		break;
368 	case MATCH_MIDDLE_ONLY:
369 		r->match = regex_match_middle;
370 		break;
371 	case MATCH_END_ONLY:
372 		r->match = regex_match_end;
373 		break;
374 	}
375 
376 	pred->not ^= not;
377 }
378 
379 enum move_type {
380 	MOVE_DOWN,
381 	MOVE_UP_FROM_LEFT,
382 	MOVE_UP_FROM_RIGHT
383 };
384 
385 static struct filter_pred *
386 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
387 		int index, enum move_type *move)
388 {
389 	if (pred->parent & FILTER_PRED_IS_RIGHT)
390 		*move = MOVE_UP_FROM_RIGHT;
391 	else
392 		*move = MOVE_UP_FROM_LEFT;
393 	pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
394 
395 	return pred;
396 }
397 
398 enum walk_return {
399 	WALK_PRED_ABORT,
400 	WALK_PRED_PARENT,
401 	WALK_PRED_DEFAULT,
402 };
403 
404 typedef int (*filter_pred_walkcb_t) (enum move_type move,
405 				     struct filter_pred *pred,
406 				     int *err, void *data);
407 
408 static int walk_pred_tree(struct filter_pred *preds,
409 			  struct filter_pred *root,
410 			  filter_pred_walkcb_t cb, void *data)
411 {
412 	struct filter_pred *pred = root;
413 	enum move_type move = MOVE_DOWN;
414 	int done = 0;
415 
416 	if  (!preds)
417 		return -EINVAL;
418 
419 	do {
420 		int err = 0, ret;
421 
422 		ret = cb(move, pred, &err, data);
423 		if (ret == WALK_PRED_ABORT)
424 			return err;
425 		if (ret == WALK_PRED_PARENT)
426 			goto get_parent;
427 
428 		switch (move) {
429 		case MOVE_DOWN:
430 			if (pred->left != FILTER_PRED_INVALID) {
431 				pred = &preds[pred->left];
432 				continue;
433 			}
434 			goto get_parent;
435 		case MOVE_UP_FROM_LEFT:
436 			pred = &preds[pred->right];
437 			move = MOVE_DOWN;
438 			continue;
439 		case MOVE_UP_FROM_RIGHT:
440  get_parent:
441 			if (pred == root)
442 				break;
443 			pred = get_pred_parent(pred, preds,
444 					       pred->parent,
445 					       &move);
446 			continue;
447 		}
448 		done = 1;
449 	} while (!done);
450 
451 	/* We are fine. */
452 	return 0;
453 }
454 
455 /*
456  * A series of AND or ORs where found together. Instead of
457  * climbing up and down the tree branches, an array of the
458  * ops were made in order of checks. We can just move across
459  * the array and short circuit if needed.
460  */
461 static int process_ops(struct filter_pred *preds,
462 		       struct filter_pred *op, void *rec)
463 {
464 	struct filter_pred *pred;
465 	int match = 0;
466 	int type;
467 	int i;
468 
469 	/*
470 	 * Micro-optimization: We set type to true if op
471 	 * is an OR and false otherwise (AND). Then we
472 	 * just need to test if the match is equal to
473 	 * the type, and if it is, we can short circuit the
474 	 * rest of the checks:
475 	 *
476 	 * if ((match && op->op == OP_OR) ||
477 	 *     (!match && op->op == OP_AND))
478 	 *	  return match;
479 	 */
480 	type = op->op == OP_OR;
481 
482 	for (i = 0; i < op->val; i++) {
483 		pred = &preds[op->ops[i]];
484 		if (!WARN_ON_ONCE(!pred->fn))
485 			match = pred->fn(pred, rec);
486 		if (!!match == type)
487 			return match;
488 	}
489 	return match;
490 }
491 
492 struct filter_match_preds_data {
493 	struct filter_pred *preds;
494 	int match;
495 	void *rec;
496 };
497 
498 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
499 				 int *err, void *data)
500 {
501 	struct filter_match_preds_data *d = data;
502 
503 	*err = 0;
504 	switch (move) {
505 	case MOVE_DOWN:
506 		/* only AND and OR have children */
507 		if (pred->left != FILTER_PRED_INVALID) {
508 			/* If ops is set, then it was folded. */
509 			if (!pred->ops)
510 				return WALK_PRED_DEFAULT;
511 			/* We can treat folded ops as a leaf node */
512 			d->match = process_ops(d->preds, pred, d->rec);
513 		} else {
514 			if (!WARN_ON_ONCE(!pred->fn))
515 				d->match = pred->fn(pred, d->rec);
516 		}
517 
518 		return WALK_PRED_PARENT;
519 	case MOVE_UP_FROM_LEFT:
520 		/*
521 		 * Check for short circuits.
522 		 *
523 		 * Optimization: !!match == (pred->op == OP_OR)
524 		 *   is the same as:
525 		 * if ((match && pred->op == OP_OR) ||
526 		 *     (!match && pred->op == OP_AND))
527 		 */
528 		if (!!d->match == (pred->op == OP_OR))
529 			return WALK_PRED_PARENT;
530 		break;
531 	case MOVE_UP_FROM_RIGHT:
532 		break;
533 	}
534 
535 	return WALK_PRED_DEFAULT;
536 }
537 
538 /* return 1 if event matches, 0 otherwise (discard) */
539 int filter_match_preds(struct event_filter *filter, void *rec)
540 {
541 	struct filter_pred *preds;
542 	struct filter_pred *root;
543 	struct filter_match_preds_data data = {
544 		/* match is currently meaningless */
545 		.match = -1,
546 		.rec   = rec,
547 	};
548 	int n_preds, ret;
549 
550 	/* no filter is considered a match */
551 	if (!filter)
552 		return 1;
553 
554 	n_preds = filter->n_preds;
555 	if (!n_preds)
556 		return 1;
557 
558 	/*
559 	 * n_preds, root and filter->preds are protect with preemption disabled.
560 	 */
561 	root = rcu_dereference_sched(filter->root);
562 	if (!root)
563 		return 1;
564 
565 	data.preds = preds = rcu_dereference_sched(filter->preds);
566 	ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
567 	WARN_ON(ret);
568 	return data.match;
569 }
570 EXPORT_SYMBOL_GPL(filter_match_preds);
571 
572 static void parse_error(struct filter_parse_state *ps, int err, int pos)
573 {
574 	ps->lasterr = err;
575 	ps->lasterr_pos = pos;
576 }
577 
578 static void remove_filter_string(struct event_filter *filter)
579 {
580 	if (!filter)
581 		return;
582 
583 	kfree(filter->filter_string);
584 	filter->filter_string = NULL;
585 }
586 
587 static int replace_filter_string(struct event_filter *filter,
588 				 char *filter_string)
589 {
590 	kfree(filter->filter_string);
591 	filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
592 	if (!filter->filter_string)
593 		return -ENOMEM;
594 
595 	return 0;
596 }
597 
598 static int append_filter_string(struct event_filter *filter,
599 				char *string)
600 {
601 	int newlen;
602 	char *new_filter_string;
603 
604 	BUG_ON(!filter->filter_string);
605 	newlen = strlen(filter->filter_string) + strlen(string) + 1;
606 	new_filter_string = kmalloc(newlen, GFP_KERNEL);
607 	if (!new_filter_string)
608 		return -ENOMEM;
609 
610 	strcpy(new_filter_string, filter->filter_string);
611 	strcat(new_filter_string, string);
612 	kfree(filter->filter_string);
613 	filter->filter_string = new_filter_string;
614 
615 	return 0;
616 }
617 
618 static void append_filter_err(struct filter_parse_state *ps,
619 			      struct event_filter *filter)
620 {
621 	int pos = ps->lasterr_pos;
622 	char *buf, *pbuf;
623 
624 	buf = (char *)__get_free_page(GFP_TEMPORARY);
625 	if (!buf)
626 		return;
627 
628 	append_filter_string(filter, "\n");
629 	memset(buf, ' ', PAGE_SIZE);
630 	if (pos > PAGE_SIZE - 128)
631 		pos = 0;
632 	buf[pos] = '^';
633 	pbuf = &buf[pos] + 1;
634 
635 	sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
636 	append_filter_string(filter, buf);
637 	free_page((unsigned long) buf);
638 }
639 
640 static inline struct event_filter *event_filter(struct ftrace_event_file *file)
641 {
642 	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
643 		return file->event_call->filter;
644 	else
645 		return file->filter;
646 }
647 
648 /* caller must hold event_mutex */
649 void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
650 {
651 	struct event_filter *filter = event_filter(file);
652 
653 	if (filter && filter->filter_string)
654 		trace_seq_printf(s, "%s\n", filter->filter_string);
655 	else
656 		trace_seq_puts(s, "none\n");
657 }
658 
659 void print_subsystem_event_filter(struct event_subsystem *system,
660 				  struct trace_seq *s)
661 {
662 	struct event_filter *filter;
663 
664 	mutex_lock(&event_mutex);
665 	filter = system->filter;
666 	if (filter && filter->filter_string)
667 		trace_seq_printf(s, "%s\n", filter->filter_string);
668 	else
669 		trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
670 	mutex_unlock(&event_mutex);
671 }
672 
673 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
674 {
675 	stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
676 	if (!stack->preds)
677 		return -ENOMEM;
678 	stack->index = n_preds;
679 	return 0;
680 }
681 
682 static void __free_pred_stack(struct pred_stack *stack)
683 {
684 	kfree(stack->preds);
685 	stack->index = 0;
686 }
687 
688 static int __push_pred_stack(struct pred_stack *stack,
689 			     struct filter_pred *pred)
690 {
691 	int index = stack->index;
692 
693 	if (WARN_ON(index == 0))
694 		return -ENOSPC;
695 
696 	stack->preds[--index] = pred;
697 	stack->index = index;
698 	return 0;
699 }
700 
701 static struct filter_pred *
702 __pop_pred_stack(struct pred_stack *stack)
703 {
704 	struct filter_pred *pred;
705 	int index = stack->index;
706 
707 	pred = stack->preds[index++];
708 	if (!pred)
709 		return NULL;
710 
711 	stack->index = index;
712 	return pred;
713 }
714 
715 static int filter_set_pred(struct event_filter *filter,
716 			   int idx,
717 			   struct pred_stack *stack,
718 			   struct filter_pred *src)
719 {
720 	struct filter_pred *dest = &filter->preds[idx];
721 	struct filter_pred *left;
722 	struct filter_pred *right;
723 
724 	*dest = *src;
725 	dest->index = idx;
726 
727 	if (dest->op == OP_OR || dest->op == OP_AND) {
728 		right = __pop_pred_stack(stack);
729 		left = __pop_pred_stack(stack);
730 		if (!left || !right)
731 			return -EINVAL;
732 		/*
733 		 * If both children can be folded
734 		 * and they are the same op as this op or a leaf,
735 		 * then this op can be folded.
736 		 */
737 		if (left->index & FILTER_PRED_FOLD &&
738 		    (left->op == dest->op ||
739 		     left->left == FILTER_PRED_INVALID) &&
740 		    right->index & FILTER_PRED_FOLD &&
741 		    (right->op == dest->op ||
742 		     right->left == FILTER_PRED_INVALID))
743 			dest->index |= FILTER_PRED_FOLD;
744 
745 		dest->left = left->index & ~FILTER_PRED_FOLD;
746 		dest->right = right->index & ~FILTER_PRED_FOLD;
747 		left->parent = dest->index & ~FILTER_PRED_FOLD;
748 		right->parent = dest->index | FILTER_PRED_IS_RIGHT;
749 	} else {
750 		/*
751 		 * Make dest->left invalid to be used as a quick
752 		 * way to know this is a leaf node.
753 		 */
754 		dest->left = FILTER_PRED_INVALID;
755 
756 		/* All leafs allow folding the parent ops. */
757 		dest->index |= FILTER_PRED_FOLD;
758 	}
759 
760 	return __push_pred_stack(stack, dest);
761 }
762 
763 static void __free_preds(struct event_filter *filter)
764 {
765 	int i;
766 
767 	if (filter->preds) {
768 		for (i = 0; i < filter->n_preds; i++)
769 			kfree(filter->preds[i].ops);
770 		kfree(filter->preds);
771 		filter->preds = NULL;
772 	}
773 	filter->a_preds = 0;
774 	filter->n_preds = 0;
775 }
776 
777 static void call_filter_disable(struct ftrace_event_call *call)
778 {
779 	call->flags &= ~TRACE_EVENT_FL_FILTERED;
780 }
781 
782 static void filter_disable(struct ftrace_event_file *file)
783 {
784 	struct ftrace_event_call *call = file->event_call;
785 
786 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
787 		call_filter_disable(call);
788 	else
789 		file->flags &= ~FTRACE_EVENT_FL_FILTERED;
790 }
791 
792 static void __free_filter(struct event_filter *filter)
793 {
794 	if (!filter)
795 		return;
796 
797 	__free_preds(filter);
798 	kfree(filter->filter_string);
799 	kfree(filter);
800 }
801 
802 void destroy_call_preds(struct ftrace_event_call *call)
803 {
804 	__free_filter(call->filter);
805 	call->filter = NULL;
806 }
807 
808 static void destroy_file_preds(struct ftrace_event_file *file)
809 {
810 	__free_filter(file->filter);
811 	file->filter = NULL;
812 }
813 
814 /*
815  * Called when destroying the ftrace_event_file.
816  * The file is being freed, so we do not need to worry about
817  * the file being currently used. This is for module code removing
818  * the tracepoints from within it.
819  */
820 void destroy_preds(struct ftrace_event_file *file)
821 {
822 	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
823 		destroy_call_preds(file->event_call);
824 	else
825 		destroy_file_preds(file);
826 }
827 
828 static struct event_filter *__alloc_filter(void)
829 {
830 	struct event_filter *filter;
831 
832 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
833 	return filter;
834 }
835 
836 static int __alloc_preds(struct event_filter *filter, int n_preds)
837 {
838 	struct filter_pred *pred;
839 	int i;
840 
841 	if (filter->preds)
842 		__free_preds(filter);
843 
844 	filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
845 
846 	if (!filter->preds)
847 		return -ENOMEM;
848 
849 	filter->a_preds = n_preds;
850 	filter->n_preds = 0;
851 
852 	for (i = 0; i < n_preds; i++) {
853 		pred = &filter->preds[i];
854 		pred->fn = filter_pred_none;
855 	}
856 
857 	return 0;
858 }
859 
860 static inline void __remove_filter(struct ftrace_event_file *file)
861 {
862 	struct ftrace_event_call *call = file->event_call;
863 
864 	filter_disable(file);
865 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
866 		remove_filter_string(call->filter);
867 	else
868 		remove_filter_string(file->filter);
869 }
870 
871 static void filter_free_subsystem_preds(struct event_subsystem *system,
872 					struct trace_array *tr)
873 {
874 	struct ftrace_event_file *file;
875 	struct ftrace_event_call *call;
876 
877 	list_for_each_entry(file, &tr->events, list) {
878 		call = file->event_call;
879 		if (strcmp(call->class->system, system->name) != 0)
880 			continue;
881 
882 		__remove_filter(file);
883 	}
884 }
885 
886 static inline void __free_subsystem_filter(struct ftrace_event_file *file)
887 {
888 	struct ftrace_event_call *call = file->event_call;
889 
890 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
891 		__free_filter(call->filter);
892 		call->filter = NULL;
893 	} else {
894 		__free_filter(file->filter);
895 		file->filter = NULL;
896 	}
897 }
898 
899 static void filter_free_subsystem_filters(struct event_subsystem *system,
900 					  struct trace_array *tr)
901 {
902 	struct ftrace_event_file *file;
903 	struct ftrace_event_call *call;
904 
905 	list_for_each_entry(file, &tr->events, list) {
906 		call = file->event_call;
907 		if (strcmp(call->class->system, system->name) != 0)
908 			continue;
909 		__free_subsystem_filter(file);
910 	}
911 }
912 
913 static int filter_add_pred(struct filter_parse_state *ps,
914 			   struct event_filter *filter,
915 			   struct filter_pred *pred,
916 			   struct pred_stack *stack)
917 {
918 	int err;
919 
920 	if (WARN_ON(filter->n_preds == filter->a_preds)) {
921 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
922 		return -ENOSPC;
923 	}
924 
925 	err = filter_set_pred(filter, filter->n_preds, stack, pred);
926 	if (err)
927 		return err;
928 
929 	filter->n_preds++;
930 
931 	return 0;
932 }
933 
934 int filter_assign_type(const char *type)
935 {
936 	if (strstr(type, "__data_loc") && strstr(type, "char"))
937 		return FILTER_DYN_STRING;
938 
939 	if (strchr(type, '[') && strstr(type, "char"))
940 		return FILTER_STATIC_STRING;
941 
942 	return FILTER_OTHER;
943 }
944 
945 static bool is_function_field(struct ftrace_event_field *field)
946 {
947 	return field->filter_type == FILTER_TRACE_FN;
948 }
949 
950 static bool is_string_field(struct ftrace_event_field *field)
951 {
952 	return field->filter_type == FILTER_DYN_STRING ||
953 	       field->filter_type == FILTER_STATIC_STRING ||
954 	       field->filter_type == FILTER_PTR_STRING;
955 }
956 
957 static int is_legal_op(struct ftrace_event_field *field, int op)
958 {
959 	if (is_string_field(field) &&
960 	    (op != OP_EQ && op != OP_NE && op != OP_GLOB))
961 		return 0;
962 	if (!is_string_field(field) && op == OP_GLOB)
963 		return 0;
964 
965 	return 1;
966 }
967 
968 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
969 					     int field_is_signed)
970 {
971 	filter_pred_fn_t fn = NULL;
972 
973 	switch (field_size) {
974 	case 8:
975 		if (op == OP_EQ || op == OP_NE)
976 			fn = filter_pred_64;
977 		else if (field_is_signed)
978 			fn = filter_pred_s64;
979 		else
980 			fn = filter_pred_u64;
981 		break;
982 	case 4:
983 		if (op == OP_EQ || op == OP_NE)
984 			fn = filter_pred_32;
985 		else if (field_is_signed)
986 			fn = filter_pred_s32;
987 		else
988 			fn = filter_pred_u32;
989 		break;
990 	case 2:
991 		if (op == OP_EQ || op == OP_NE)
992 			fn = filter_pred_16;
993 		else if (field_is_signed)
994 			fn = filter_pred_s16;
995 		else
996 			fn = filter_pred_u16;
997 		break;
998 	case 1:
999 		if (op == OP_EQ || op == OP_NE)
1000 			fn = filter_pred_8;
1001 		else if (field_is_signed)
1002 			fn = filter_pred_s8;
1003 		else
1004 			fn = filter_pred_u8;
1005 		break;
1006 	}
1007 
1008 	return fn;
1009 }
1010 
1011 static int init_pred(struct filter_parse_state *ps,
1012 		     struct ftrace_event_field *field,
1013 		     struct filter_pred *pred)
1014 
1015 {
1016 	filter_pred_fn_t fn = filter_pred_none;
1017 	unsigned long long val;
1018 	int ret;
1019 
1020 	pred->offset = field->offset;
1021 
1022 	if (!is_legal_op(field, pred->op)) {
1023 		parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1024 		return -EINVAL;
1025 	}
1026 
1027 	if (is_string_field(field)) {
1028 		filter_build_regex(pred);
1029 
1030 		if (field->filter_type == FILTER_STATIC_STRING) {
1031 			fn = filter_pred_string;
1032 			pred->regex.field_len = field->size;
1033 		} else if (field->filter_type == FILTER_DYN_STRING)
1034 			fn = filter_pred_strloc;
1035 		else
1036 			fn = filter_pred_pchar;
1037 	} else if (is_function_field(field)) {
1038 		if (strcmp(field->name, "ip")) {
1039 			parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1040 			return -EINVAL;
1041 		}
1042 	} else {
1043 		if (field->is_signed)
1044 			ret = kstrtoll(pred->regex.pattern, 0, &val);
1045 		else
1046 			ret = kstrtoull(pred->regex.pattern, 0, &val);
1047 		if (ret) {
1048 			parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1049 			return -EINVAL;
1050 		}
1051 		pred->val = val;
1052 
1053 		fn = select_comparison_fn(pred->op, field->size,
1054 					  field->is_signed);
1055 		if (!fn) {
1056 			parse_error(ps, FILT_ERR_INVALID_OP, 0);
1057 			return -EINVAL;
1058 		}
1059 	}
1060 
1061 	if (pred->op == OP_NE)
1062 		pred->not = 1;
1063 
1064 	pred->fn = fn;
1065 	return 0;
1066 }
1067 
1068 static void parse_init(struct filter_parse_state *ps,
1069 		       struct filter_op *ops,
1070 		       char *infix_string)
1071 {
1072 	memset(ps, '\0', sizeof(*ps));
1073 
1074 	ps->infix.string = infix_string;
1075 	ps->infix.cnt = strlen(infix_string);
1076 	ps->ops = ops;
1077 
1078 	INIT_LIST_HEAD(&ps->opstack);
1079 	INIT_LIST_HEAD(&ps->postfix);
1080 }
1081 
1082 static char infix_next(struct filter_parse_state *ps)
1083 {
1084 	ps->infix.cnt--;
1085 
1086 	return ps->infix.string[ps->infix.tail++];
1087 }
1088 
1089 static char infix_peek(struct filter_parse_state *ps)
1090 {
1091 	if (ps->infix.tail == strlen(ps->infix.string))
1092 		return 0;
1093 
1094 	return ps->infix.string[ps->infix.tail];
1095 }
1096 
1097 static void infix_advance(struct filter_parse_state *ps)
1098 {
1099 	ps->infix.cnt--;
1100 	ps->infix.tail++;
1101 }
1102 
1103 static inline int is_precedence_lower(struct filter_parse_state *ps,
1104 				      int a, int b)
1105 {
1106 	return ps->ops[a].precedence < ps->ops[b].precedence;
1107 }
1108 
1109 static inline int is_op_char(struct filter_parse_state *ps, char c)
1110 {
1111 	int i;
1112 
1113 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1114 		if (ps->ops[i].string[0] == c)
1115 			return 1;
1116 	}
1117 
1118 	return 0;
1119 }
1120 
1121 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1122 {
1123 	char nextc = infix_peek(ps);
1124 	char opstr[3];
1125 	int i;
1126 
1127 	opstr[0] = firstc;
1128 	opstr[1] = nextc;
1129 	opstr[2] = '\0';
1130 
1131 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1132 		if (!strcmp(opstr, ps->ops[i].string)) {
1133 			infix_advance(ps);
1134 			return ps->ops[i].id;
1135 		}
1136 	}
1137 
1138 	opstr[1] = '\0';
1139 
1140 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1141 		if (!strcmp(opstr, ps->ops[i].string))
1142 			return ps->ops[i].id;
1143 	}
1144 
1145 	return OP_NONE;
1146 }
1147 
1148 static inline void clear_operand_string(struct filter_parse_state *ps)
1149 {
1150 	memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1151 	ps->operand.tail = 0;
1152 }
1153 
1154 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1155 {
1156 	if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1157 		return -EINVAL;
1158 
1159 	ps->operand.string[ps->operand.tail++] = c;
1160 
1161 	return 0;
1162 }
1163 
1164 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1165 {
1166 	struct opstack_op *opstack_op;
1167 
1168 	opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1169 	if (!opstack_op)
1170 		return -ENOMEM;
1171 
1172 	opstack_op->op = op;
1173 	list_add(&opstack_op->list, &ps->opstack);
1174 
1175 	return 0;
1176 }
1177 
1178 static int filter_opstack_empty(struct filter_parse_state *ps)
1179 {
1180 	return list_empty(&ps->opstack);
1181 }
1182 
1183 static int filter_opstack_top(struct filter_parse_state *ps)
1184 {
1185 	struct opstack_op *opstack_op;
1186 
1187 	if (filter_opstack_empty(ps))
1188 		return OP_NONE;
1189 
1190 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1191 
1192 	return opstack_op->op;
1193 }
1194 
1195 static int filter_opstack_pop(struct filter_parse_state *ps)
1196 {
1197 	struct opstack_op *opstack_op;
1198 	int op;
1199 
1200 	if (filter_opstack_empty(ps))
1201 		return OP_NONE;
1202 
1203 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1204 	op = opstack_op->op;
1205 	list_del(&opstack_op->list);
1206 
1207 	kfree(opstack_op);
1208 
1209 	return op;
1210 }
1211 
1212 static void filter_opstack_clear(struct filter_parse_state *ps)
1213 {
1214 	while (!filter_opstack_empty(ps))
1215 		filter_opstack_pop(ps);
1216 }
1217 
1218 static char *curr_operand(struct filter_parse_state *ps)
1219 {
1220 	return ps->operand.string;
1221 }
1222 
1223 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1224 {
1225 	struct postfix_elt *elt;
1226 
1227 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1228 	if (!elt)
1229 		return -ENOMEM;
1230 
1231 	elt->op = OP_NONE;
1232 	elt->operand = kstrdup(operand, GFP_KERNEL);
1233 	if (!elt->operand) {
1234 		kfree(elt);
1235 		return -ENOMEM;
1236 	}
1237 
1238 	list_add_tail(&elt->list, &ps->postfix);
1239 
1240 	return 0;
1241 }
1242 
1243 static int postfix_append_op(struct filter_parse_state *ps, int op)
1244 {
1245 	struct postfix_elt *elt;
1246 
1247 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1248 	if (!elt)
1249 		return -ENOMEM;
1250 
1251 	elt->op = op;
1252 	elt->operand = NULL;
1253 
1254 	list_add_tail(&elt->list, &ps->postfix);
1255 
1256 	return 0;
1257 }
1258 
1259 static void postfix_clear(struct filter_parse_state *ps)
1260 {
1261 	struct postfix_elt *elt;
1262 
1263 	while (!list_empty(&ps->postfix)) {
1264 		elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1265 		list_del(&elt->list);
1266 		kfree(elt->operand);
1267 		kfree(elt);
1268 	}
1269 }
1270 
1271 static int filter_parse(struct filter_parse_state *ps)
1272 {
1273 	int in_string = 0;
1274 	int op, top_op;
1275 	char ch;
1276 
1277 	while ((ch = infix_next(ps))) {
1278 		if (ch == '"') {
1279 			in_string ^= 1;
1280 			continue;
1281 		}
1282 
1283 		if (in_string)
1284 			goto parse_operand;
1285 
1286 		if (isspace(ch))
1287 			continue;
1288 
1289 		if (is_op_char(ps, ch)) {
1290 			op = infix_get_op(ps, ch);
1291 			if (op == OP_NONE) {
1292 				parse_error(ps, FILT_ERR_INVALID_OP, 0);
1293 				return -EINVAL;
1294 			}
1295 
1296 			if (strlen(curr_operand(ps))) {
1297 				postfix_append_operand(ps, curr_operand(ps));
1298 				clear_operand_string(ps);
1299 			}
1300 
1301 			while (!filter_opstack_empty(ps)) {
1302 				top_op = filter_opstack_top(ps);
1303 				if (!is_precedence_lower(ps, top_op, op)) {
1304 					top_op = filter_opstack_pop(ps);
1305 					postfix_append_op(ps, top_op);
1306 					continue;
1307 				}
1308 				break;
1309 			}
1310 
1311 			filter_opstack_push(ps, op);
1312 			continue;
1313 		}
1314 
1315 		if (ch == '(') {
1316 			filter_opstack_push(ps, OP_OPEN_PAREN);
1317 			continue;
1318 		}
1319 
1320 		if (ch == ')') {
1321 			if (strlen(curr_operand(ps))) {
1322 				postfix_append_operand(ps, curr_operand(ps));
1323 				clear_operand_string(ps);
1324 			}
1325 
1326 			top_op = filter_opstack_pop(ps);
1327 			while (top_op != OP_NONE) {
1328 				if (top_op == OP_OPEN_PAREN)
1329 					break;
1330 				postfix_append_op(ps, top_op);
1331 				top_op = filter_opstack_pop(ps);
1332 			}
1333 			if (top_op == OP_NONE) {
1334 				parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1335 				return -EINVAL;
1336 			}
1337 			continue;
1338 		}
1339 parse_operand:
1340 		if (append_operand_char(ps, ch)) {
1341 			parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1342 			return -EINVAL;
1343 		}
1344 	}
1345 
1346 	if (strlen(curr_operand(ps)))
1347 		postfix_append_operand(ps, curr_operand(ps));
1348 
1349 	while (!filter_opstack_empty(ps)) {
1350 		top_op = filter_opstack_pop(ps);
1351 		if (top_op == OP_NONE)
1352 			break;
1353 		if (top_op == OP_OPEN_PAREN) {
1354 			parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1355 			return -EINVAL;
1356 		}
1357 		postfix_append_op(ps, top_op);
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1364 				       struct ftrace_event_call *call,
1365 				       int op, char *operand1, char *operand2)
1366 {
1367 	struct ftrace_event_field *field;
1368 	static struct filter_pred pred;
1369 
1370 	memset(&pred, 0, sizeof(pred));
1371 	pred.op = op;
1372 
1373 	if (op == OP_AND || op == OP_OR)
1374 		return &pred;
1375 
1376 	if (!operand1 || !operand2) {
1377 		parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1378 		return NULL;
1379 	}
1380 
1381 	field = trace_find_event_field(call, operand1);
1382 	if (!field) {
1383 		parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1384 		return NULL;
1385 	}
1386 
1387 	strcpy(pred.regex.pattern, operand2);
1388 	pred.regex.len = strlen(pred.regex.pattern);
1389 	pred.field = field;
1390 	return init_pred(ps, field, &pred) ? NULL : &pred;
1391 }
1392 
1393 static int check_preds(struct filter_parse_state *ps)
1394 {
1395 	int n_normal_preds = 0, n_logical_preds = 0;
1396 	struct postfix_elt *elt;
1397 
1398 	list_for_each_entry(elt, &ps->postfix, list) {
1399 		if (elt->op == OP_NONE)
1400 			continue;
1401 
1402 		if (elt->op == OP_AND || elt->op == OP_OR) {
1403 			n_logical_preds++;
1404 			continue;
1405 		}
1406 		n_normal_preds++;
1407 	}
1408 
1409 	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1410 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1411 		return -EINVAL;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int count_preds(struct filter_parse_state *ps)
1418 {
1419 	struct postfix_elt *elt;
1420 	int n_preds = 0;
1421 
1422 	list_for_each_entry(elt, &ps->postfix, list) {
1423 		if (elt->op == OP_NONE)
1424 			continue;
1425 		n_preds++;
1426 	}
1427 
1428 	return n_preds;
1429 }
1430 
1431 struct check_pred_data {
1432 	int count;
1433 	int max;
1434 };
1435 
1436 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1437 			      int *err, void *data)
1438 {
1439 	struct check_pred_data *d = data;
1440 
1441 	if (WARN_ON(d->count++ > d->max)) {
1442 		*err = -EINVAL;
1443 		return WALK_PRED_ABORT;
1444 	}
1445 	return WALK_PRED_DEFAULT;
1446 }
1447 
1448 /*
1449  * The tree is walked at filtering of an event. If the tree is not correctly
1450  * built, it may cause an infinite loop. Check here that the tree does
1451  * indeed terminate.
1452  */
1453 static int check_pred_tree(struct event_filter *filter,
1454 			   struct filter_pred *root)
1455 {
1456 	struct check_pred_data data = {
1457 		/*
1458 		 * The max that we can hit a node is three times.
1459 		 * Once going down, once coming up from left, and
1460 		 * once coming up from right. This is more than enough
1461 		 * since leafs are only hit a single time.
1462 		 */
1463 		.max   = 3 * filter->n_preds,
1464 		.count = 0,
1465 	};
1466 
1467 	return walk_pred_tree(filter->preds, root,
1468 			      check_pred_tree_cb, &data);
1469 }
1470 
1471 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1472 			  int *err, void *data)
1473 {
1474 	int *count = data;
1475 
1476 	if ((move == MOVE_DOWN) &&
1477 	    (pred->left == FILTER_PRED_INVALID))
1478 		(*count)++;
1479 
1480 	return WALK_PRED_DEFAULT;
1481 }
1482 
1483 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1484 {
1485 	int count = 0, ret;
1486 
1487 	ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1488 	WARN_ON(ret);
1489 	return count;
1490 }
1491 
1492 struct fold_pred_data {
1493 	struct filter_pred *root;
1494 	int count;
1495 	int children;
1496 };
1497 
1498 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1499 			int *err, void *data)
1500 {
1501 	struct fold_pred_data *d = data;
1502 	struct filter_pred *root = d->root;
1503 
1504 	if (move != MOVE_DOWN)
1505 		return WALK_PRED_DEFAULT;
1506 	if (pred->left != FILTER_PRED_INVALID)
1507 		return WALK_PRED_DEFAULT;
1508 
1509 	if (WARN_ON(d->count == d->children)) {
1510 		*err = -EINVAL;
1511 		return WALK_PRED_ABORT;
1512 	}
1513 
1514 	pred->index &= ~FILTER_PRED_FOLD;
1515 	root->ops[d->count++] = pred->index;
1516 	return WALK_PRED_DEFAULT;
1517 }
1518 
1519 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1520 {
1521 	struct fold_pred_data data = {
1522 		.root  = root,
1523 		.count = 0,
1524 	};
1525 	int children;
1526 
1527 	/* No need to keep the fold flag */
1528 	root->index &= ~FILTER_PRED_FOLD;
1529 
1530 	/* If the root is a leaf then do nothing */
1531 	if (root->left == FILTER_PRED_INVALID)
1532 		return 0;
1533 
1534 	/* count the children */
1535 	children = count_leafs(preds, &preds[root->left]);
1536 	children += count_leafs(preds, &preds[root->right]);
1537 
1538 	root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1539 	if (!root->ops)
1540 		return -ENOMEM;
1541 
1542 	root->val = children;
1543 	data.children = children;
1544 	return walk_pred_tree(preds, root, fold_pred_cb, &data);
1545 }
1546 
1547 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1548 			     int *err, void *data)
1549 {
1550 	struct filter_pred *preds = data;
1551 
1552 	if (move != MOVE_DOWN)
1553 		return WALK_PRED_DEFAULT;
1554 	if (!(pred->index & FILTER_PRED_FOLD))
1555 		return WALK_PRED_DEFAULT;
1556 
1557 	*err = fold_pred(preds, pred);
1558 	if (*err)
1559 		return WALK_PRED_ABORT;
1560 
1561 	/* eveyrhing below is folded, continue with parent */
1562 	return WALK_PRED_PARENT;
1563 }
1564 
1565 /*
1566  * To optimize the processing of the ops, if we have several "ors" or
1567  * "ands" together, we can put them in an array and process them all
1568  * together speeding up the filter logic.
1569  */
1570 static int fold_pred_tree(struct event_filter *filter,
1571 			   struct filter_pred *root)
1572 {
1573 	return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1574 			      filter->preds);
1575 }
1576 
1577 static int replace_preds(struct ftrace_event_call *call,
1578 			 struct event_filter *filter,
1579 			 struct filter_parse_state *ps,
1580 			 char *filter_string,
1581 			 bool dry_run)
1582 {
1583 	char *operand1 = NULL, *operand2 = NULL;
1584 	struct filter_pred *pred;
1585 	struct filter_pred *root;
1586 	struct postfix_elt *elt;
1587 	struct pred_stack stack = { }; /* init to NULL */
1588 	int err;
1589 	int n_preds = 0;
1590 
1591 	n_preds = count_preds(ps);
1592 	if (n_preds >= MAX_FILTER_PRED) {
1593 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1594 		return -ENOSPC;
1595 	}
1596 
1597 	err = check_preds(ps);
1598 	if (err)
1599 		return err;
1600 
1601 	if (!dry_run) {
1602 		err = __alloc_pred_stack(&stack, n_preds);
1603 		if (err)
1604 			return err;
1605 		err = __alloc_preds(filter, n_preds);
1606 		if (err)
1607 			goto fail;
1608 	}
1609 
1610 	n_preds = 0;
1611 	list_for_each_entry(elt, &ps->postfix, list) {
1612 		if (elt->op == OP_NONE) {
1613 			if (!operand1)
1614 				operand1 = elt->operand;
1615 			else if (!operand2)
1616 				operand2 = elt->operand;
1617 			else {
1618 				parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1619 				err = -EINVAL;
1620 				goto fail;
1621 			}
1622 			continue;
1623 		}
1624 
1625 		if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1626 			parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1627 			err = -ENOSPC;
1628 			goto fail;
1629 		}
1630 
1631 		pred = create_pred(ps, call, elt->op, operand1, operand2);
1632 		if (!pred) {
1633 			err = -EINVAL;
1634 			goto fail;
1635 		}
1636 
1637 		if (!dry_run) {
1638 			err = filter_add_pred(ps, filter, pred, &stack);
1639 			if (err)
1640 				goto fail;
1641 		}
1642 
1643 		operand1 = operand2 = NULL;
1644 	}
1645 
1646 	if (!dry_run) {
1647 		/* We should have one item left on the stack */
1648 		pred = __pop_pred_stack(&stack);
1649 		if (!pred)
1650 			return -EINVAL;
1651 		/* This item is where we start from in matching */
1652 		root = pred;
1653 		/* Make sure the stack is empty */
1654 		pred = __pop_pred_stack(&stack);
1655 		if (WARN_ON(pred)) {
1656 			err = -EINVAL;
1657 			filter->root = NULL;
1658 			goto fail;
1659 		}
1660 		err = check_pred_tree(filter, root);
1661 		if (err)
1662 			goto fail;
1663 
1664 		/* Optimize the tree */
1665 		err = fold_pred_tree(filter, root);
1666 		if (err)
1667 			goto fail;
1668 
1669 		/* We don't set root until we know it works */
1670 		barrier();
1671 		filter->root = root;
1672 	}
1673 
1674 	err = 0;
1675 fail:
1676 	__free_pred_stack(&stack);
1677 	return err;
1678 }
1679 
1680 static inline void event_set_filtered_flag(struct ftrace_event_file *file)
1681 {
1682 	struct ftrace_event_call *call = file->event_call;
1683 
1684 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1685 		call->flags |= TRACE_EVENT_FL_FILTERED;
1686 	else
1687 		file->flags |= FTRACE_EVENT_FL_FILTERED;
1688 }
1689 
1690 static inline void event_set_filter(struct ftrace_event_file *file,
1691 				    struct event_filter *filter)
1692 {
1693 	struct ftrace_event_call *call = file->event_call;
1694 
1695 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1696 		rcu_assign_pointer(call->filter, filter);
1697 	else
1698 		rcu_assign_pointer(file->filter, filter);
1699 }
1700 
1701 static inline void event_clear_filter(struct ftrace_event_file *file)
1702 {
1703 	struct ftrace_event_call *call = file->event_call;
1704 
1705 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1706 		RCU_INIT_POINTER(call->filter, NULL);
1707 	else
1708 		RCU_INIT_POINTER(file->filter, NULL);
1709 }
1710 
1711 static inline void
1712 event_set_no_set_filter_flag(struct ftrace_event_file *file)
1713 {
1714 	struct ftrace_event_call *call = file->event_call;
1715 
1716 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1717 		call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1718 	else
1719 		file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
1720 }
1721 
1722 static inline void
1723 event_clear_no_set_filter_flag(struct ftrace_event_file *file)
1724 {
1725 	struct ftrace_event_call *call = file->event_call;
1726 
1727 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1728 		call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1729 	else
1730 		file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
1731 }
1732 
1733 static inline bool
1734 event_no_set_filter_flag(struct ftrace_event_file *file)
1735 {
1736 	struct ftrace_event_call *call = file->event_call;
1737 
1738 	if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
1739 		return true;
1740 
1741 	if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1742 	    (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1743 		return true;
1744 
1745 	return false;
1746 }
1747 
1748 struct filter_list {
1749 	struct list_head	list;
1750 	struct event_filter	*filter;
1751 };
1752 
1753 static int replace_system_preds(struct event_subsystem *system,
1754 				struct trace_array *tr,
1755 				struct filter_parse_state *ps,
1756 				char *filter_string)
1757 {
1758 	struct ftrace_event_file *file;
1759 	struct ftrace_event_call *call;
1760 	struct filter_list *filter_item;
1761 	struct filter_list *tmp;
1762 	LIST_HEAD(filter_list);
1763 	bool fail = true;
1764 	int err;
1765 
1766 	list_for_each_entry(file, &tr->events, list) {
1767 		call = file->event_call;
1768 		if (strcmp(call->class->system, system->name) != 0)
1769 			continue;
1770 
1771 		/*
1772 		 * Try to see if the filter can be applied
1773 		 *  (filter arg is ignored on dry_run)
1774 		 */
1775 		err = replace_preds(call, NULL, ps, filter_string, true);
1776 		if (err)
1777 			event_set_no_set_filter_flag(file);
1778 		else
1779 			event_clear_no_set_filter_flag(file);
1780 	}
1781 
1782 	list_for_each_entry(file, &tr->events, list) {
1783 		struct event_filter *filter;
1784 
1785 		call = file->event_call;
1786 
1787 		if (strcmp(call->class->system, system->name) != 0)
1788 			continue;
1789 
1790 		if (event_no_set_filter_flag(file))
1791 			continue;
1792 
1793 		filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1794 		if (!filter_item)
1795 			goto fail_mem;
1796 
1797 		list_add_tail(&filter_item->list, &filter_list);
1798 
1799 		filter_item->filter = __alloc_filter();
1800 		if (!filter_item->filter)
1801 			goto fail_mem;
1802 		filter = filter_item->filter;
1803 
1804 		/* Can only fail on no memory */
1805 		err = replace_filter_string(filter, filter_string);
1806 		if (err)
1807 			goto fail_mem;
1808 
1809 		err = replace_preds(call, filter, ps, filter_string, false);
1810 		if (err) {
1811 			filter_disable(file);
1812 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1813 			append_filter_err(ps, filter);
1814 		} else
1815 			event_set_filtered_flag(file);
1816 		/*
1817 		 * Regardless of if this returned an error, we still
1818 		 * replace the filter for the call.
1819 		 */
1820 		filter = event_filter(file);
1821 		event_set_filter(file, filter_item->filter);
1822 		filter_item->filter = filter;
1823 
1824 		fail = false;
1825 	}
1826 
1827 	if (fail)
1828 		goto fail;
1829 
1830 	/*
1831 	 * The calls can still be using the old filters.
1832 	 * Do a synchronize_sched() to ensure all calls are
1833 	 * done with them before we free them.
1834 	 */
1835 	synchronize_sched();
1836 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1837 		__free_filter(filter_item->filter);
1838 		list_del(&filter_item->list);
1839 		kfree(filter_item);
1840 	}
1841 	return 0;
1842  fail:
1843 	/* No call succeeded */
1844 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1845 		list_del(&filter_item->list);
1846 		kfree(filter_item);
1847 	}
1848 	parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1849 	return -EINVAL;
1850  fail_mem:
1851 	/* If any call succeeded, we still need to sync */
1852 	if (!fail)
1853 		synchronize_sched();
1854 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1855 		__free_filter(filter_item->filter);
1856 		list_del(&filter_item->list);
1857 		kfree(filter_item);
1858 	}
1859 	return -ENOMEM;
1860 }
1861 
1862 static int create_filter_start(char *filter_str, bool set_str,
1863 			       struct filter_parse_state **psp,
1864 			       struct event_filter **filterp)
1865 {
1866 	struct event_filter *filter;
1867 	struct filter_parse_state *ps = NULL;
1868 	int err = 0;
1869 
1870 	WARN_ON_ONCE(*psp || *filterp);
1871 
1872 	/* allocate everything, and if any fails, free all and fail */
1873 	filter = __alloc_filter();
1874 	if (filter && set_str)
1875 		err = replace_filter_string(filter, filter_str);
1876 
1877 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1878 
1879 	if (!filter || !ps || err) {
1880 		kfree(ps);
1881 		__free_filter(filter);
1882 		return -ENOMEM;
1883 	}
1884 
1885 	/* we're committed to creating a new filter */
1886 	*filterp = filter;
1887 	*psp = ps;
1888 
1889 	parse_init(ps, filter_ops, filter_str);
1890 	err = filter_parse(ps);
1891 	if (err && set_str)
1892 		append_filter_err(ps, filter);
1893 	return err;
1894 }
1895 
1896 static void create_filter_finish(struct filter_parse_state *ps)
1897 {
1898 	if (ps) {
1899 		filter_opstack_clear(ps);
1900 		postfix_clear(ps);
1901 		kfree(ps);
1902 	}
1903 }
1904 
1905 /**
1906  * create_filter - create a filter for a ftrace_event_call
1907  * @call: ftrace_event_call to create a filter for
1908  * @filter_str: filter string
1909  * @set_str: remember @filter_str and enable detailed error in filter
1910  * @filterp: out param for created filter (always updated on return)
1911  *
1912  * Creates a filter for @call with @filter_str.  If @set_str is %true,
1913  * @filter_str is copied and recorded in the new filter.
1914  *
1915  * On success, returns 0 and *@filterp points to the new filter.  On
1916  * failure, returns -errno and *@filterp may point to %NULL or to a new
1917  * filter.  In the latter case, the returned filter contains error
1918  * information if @set_str is %true and the caller is responsible for
1919  * freeing it.
1920  */
1921 static int create_filter(struct ftrace_event_call *call,
1922 			 char *filter_str, bool set_str,
1923 			 struct event_filter **filterp)
1924 {
1925 	struct event_filter *filter = NULL;
1926 	struct filter_parse_state *ps = NULL;
1927 	int err;
1928 
1929 	err = create_filter_start(filter_str, set_str, &ps, &filter);
1930 	if (!err) {
1931 		err = replace_preds(call, filter, ps, filter_str, false);
1932 		if (err && set_str)
1933 			append_filter_err(ps, filter);
1934 	}
1935 	create_filter_finish(ps);
1936 
1937 	*filterp = filter;
1938 	return err;
1939 }
1940 
1941 /**
1942  * create_system_filter - create a filter for an event_subsystem
1943  * @system: event_subsystem to create a filter for
1944  * @filter_str: filter string
1945  * @filterp: out param for created filter (always updated on return)
1946  *
1947  * Identical to create_filter() except that it creates a subsystem filter
1948  * and always remembers @filter_str.
1949  */
1950 static int create_system_filter(struct event_subsystem *system,
1951 				struct trace_array *tr,
1952 				char *filter_str, struct event_filter **filterp)
1953 {
1954 	struct event_filter *filter = NULL;
1955 	struct filter_parse_state *ps = NULL;
1956 	int err;
1957 
1958 	err = create_filter_start(filter_str, true, &ps, &filter);
1959 	if (!err) {
1960 		err = replace_system_preds(system, tr, ps, filter_str);
1961 		if (!err) {
1962 			/* System filters just show a default message */
1963 			kfree(filter->filter_string);
1964 			filter->filter_string = NULL;
1965 		} else {
1966 			append_filter_err(ps, filter);
1967 		}
1968 	}
1969 	create_filter_finish(ps);
1970 
1971 	*filterp = filter;
1972 	return err;
1973 }
1974 
1975 /* caller must hold event_mutex */
1976 int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
1977 {
1978 	struct ftrace_event_call *call = file->event_call;
1979 	struct event_filter *filter;
1980 	int err;
1981 
1982 	if (!strcmp(strstrip(filter_string), "0")) {
1983 		filter_disable(file);
1984 		filter = event_filter(file);
1985 
1986 		if (!filter)
1987 			return 0;
1988 
1989 		event_clear_filter(file);
1990 
1991 		/* Make sure the filter is not being used */
1992 		synchronize_sched();
1993 		__free_filter(filter);
1994 
1995 		return 0;
1996 	}
1997 
1998 	err = create_filter(call, filter_string, true, &filter);
1999 
2000 	/*
2001 	 * Always swap the call filter with the new filter
2002 	 * even if there was an error. If there was an error
2003 	 * in the filter, we disable the filter and show the error
2004 	 * string
2005 	 */
2006 	if (filter) {
2007 		struct event_filter *tmp;
2008 
2009 		tmp = event_filter(file);
2010 		if (!err)
2011 			event_set_filtered_flag(file);
2012 		else
2013 			filter_disable(file);
2014 
2015 		event_set_filter(file, filter);
2016 
2017 		if (tmp) {
2018 			/* Make sure the call is done with the filter */
2019 			synchronize_sched();
2020 			__free_filter(tmp);
2021 		}
2022 	}
2023 
2024 	return err;
2025 }
2026 
2027 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
2028 				 char *filter_string)
2029 {
2030 	struct event_subsystem *system = dir->subsystem;
2031 	struct trace_array *tr = dir->tr;
2032 	struct event_filter *filter;
2033 	int err = 0;
2034 
2035 	mutex_lock(&event_mutex);
2036 
2037 	/* Make sure the system still has events */
2038 	if (!dir->nr_events) {
2039 		err = -ENODEV;
2040 		goto out_unlock;
2041 	}
2042 
2043 	if (!strcmp(strstrip(filter_string), "0")) {
2044 		filter_free_subsystem_preds(system, tr);
2045 		remove_filter_string(system->filter);
2046 		filter = system->filter;
2047 		system->filter = NULL;
2048 		/* Ensure all filters are no longer used */
2049 		synchronize_sched();
2050 		filter_free_subsystem_filters(system, tr);
2051 		__free_filter(filter);
2052 		goto out_unlock;
2053 	}
2054 
2055 	err = create_system_filter(system, tr, filter_string, &filter);
2056 	if (filter) {
2057 		/*
2058 		 * No event actually uses the system filter
2059 		 * we can free it without synchronize_sched().
2060 		 */
2061 		__free_filter(system->filter);
2062 		system->filter = filter;
2063 	}
2064 out_unlock:
2065 	mutex_unlock(&event_mutex);
2066 
2067 	return err;
2068 }
2069 
2070 #ifdef CONFIG_PERF_EVENTS
2071 
2072 void ftrace_profile_free_filter(struct perf_event *event)
2073 {
2074 	struct event_filter *filter = event->filter;
2075 
2076 	event->filter = NULL;
2077 	__free_filter(filter);
2078 }
2079 
2080 struct function_filter_data {
2081 	struct ftrace_ops *ops;
2082 	int first_filter;
2083 	int first_notrace;
2084 };
2085 
2086 #ifdef CONFIG_FUNCTION_TRACER
2087 static char **
2088 ftrace_function_filter_re(char *buf, int len, int *count)
2089 {
2090 	char *str, *sep, **re;
2091 
2092 	str = kstrndup(buf, len, GFP_KERNEL);
2093 	if (!str)
2094 		return NULL;
2095 
2096 	/*
2097 	 * The argv_split function takes white space
2098 	 * as a separator, so convert ',' into spaces.
2099 	 */
2100 	while ((sep = strchr(str, ',')))
2101 		*sep = ' ';
2102 
2103 	re = argv_split(GFP_KERNEL, str, count);
2104 	kfree(str);
2105 	return re;
2106 }
2107 
2108 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2109 				      int reset, char *re, int len)
2110 {
2111 	int ret;
2112 
2113 	if (filter)
2114 		ret = ftrace_set_filter(ops, re, len, reset);
2115 	else
2116 		ret = ftrace_set_notrace(ops, re, len, reset);
2117 
2118 	return ret;
2119 }
2120 
2121 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2122 					struct function_filter_data *data)
2123 {
2124 	int i, re_cnt, ret = -EINVAL;
2125 	int *reset;
2126 	char **re;
2127 
2128 	reset = filter ? &data->first_filter : &data->first_notrace;
2129 
2130 	/*
2131 	 * The 'ip' field could have multiple filters set, separated
2132 	 * either by space or comma. We first cut the filter and apply
2133 	 * all pieces separatelly.
2134 	 */
2135 	re = ftrace_function_filter_re(buf, len, &re_cnt);
2136 	if (!re)
2137 		return -EINVAL;
2138 
2139 	for (i = 0; i < re_cnt; i++) {
2140 		ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2141 						 re[i], strlen(re[i]));
2142 		if (ret)
2143 			break;
2144 
2145 		if (*reset)
2146 			*reset = 0;
2147 	}
2148 
2149 	argv_free(re);
2150 	return ret;
2151 }
2152 
2153 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2154 {
2155 	struct ftrace_event_field *field = pred->field;
2156 
2157 	if (leaf) {
2158 		/*
2159 		 * Check the leaf predicate for function trace, verify:
2160 		 *  - only '==' and '!=' is used
2161 		 *  - the 'ip' field is used
2162 		 */
2163 		if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2164 			return -EINVAL;
2165 
2166 		if (strcmp(field->name, "ip"))
2167 			return -EINVAL;
2168 	} else {
2169 		/*
2170 		 * Check the non leaf predicate for function trace, verify:
2171 		 *  - only '||' is used
2172 		*/
2173 		if (pred->op != OP_OR)
2174 			return -EINVAL;
2175 	}
2176 
2177 	return 0;
2178 }
2179 
2180 static int ftrace_function_set_filter_cb(enum move_type move,
2181 					 struct filter_pred *pred,
2182 					 int *err, void *data)
2183 {
2184 	/* Checking the node is valid for function trace. */
2185 	if ((move != MOVE_DOWN) ||
2186 	    (pred->left != FILTER_PRED_INVALID)) {
2187 		*err = ftrace_function_check_pred(pred, 0);
2188 	} else {
2189 		*err = ftrace_function_check_pred(pred, 1);
2190 		if (*err)
2191 			return WALK_PRED_ABORT;
2192 
2193 		*err = __ftrace_function_set_filter(pred->op == OP_EQ,
2194 						    pred->regex.pattern,
2195 						    pred->regex.len,
2196 						    data);
2197 	}
2198 
2199 	return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2200 }
2201 
2202 static int ftrace_function_set_filter(struct perf_event *event,
2203 				      struct event_filter *filter)
2204 {
2205 	struct function_filter_data data = {
2206 		.first_filter  = 1,
2207 		.first_notrace = 1,
2208 		.ops           = &event->ftrace_ops,
2209 	};
2210 
2211 	return walk_pred_tree(filter->preds, filter->root,
2212 			      ftrace_function_set_filter_cb, &data);
2213 }
2214 #else
2215 static int ftrace_function_set_filter(struct perf_event *event,
2216 				      struct event_filter *filter)
2217 {
2218 	return -ENODEV;
2219 }
2220 #endif /* CONFIG_FUNCTION_TRACER */
2221 
2222 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2223 			      char *filter_str)
2224 {
2225 	int err;
2226 	struct event_filter *filter;
2227 	struct ftrace_event_call *call;
2228 
2229 	mutex_lock(&event_mutex);
2230 
2231 	call = event->tp_event;
2232 
2233 	err = -EINVAL;
2234 	if (!call)
2235 		goto out_unlock;
2236 
2237 	err = -EEXIST;
2238 	if (event->filter)
2239 		goto out_unlock;
2240 
2241 	err = create_filter(call, filter_str, false, &filter);
2242 	if (err)
2243 		goto free_filter;
2244 
2245 	if (ftrace_event_is_function(call))
2246 		err = ftrace_function_set_filter(event, filter);
2247 	else
2248 		event->filter = filter;
2249 
2250 free_filter:
2251 	if (err || ftrace_event_is_function(call))
2252 		__free_filter(filter);
2253 
2254 out_unlock:
2255 	mutex_unlock(&event_mutex);
2256 
2257 	return err;
2258 }
2259 
2260 #endif /* CONFIG_PERF_EVENTS */
2261 
2262 #ifdef CONFIG_FTRACE_STARTUP_TEST
2263 
2264 #include <linux/types.h>
2265 #include <linux/tracepoint.h>
2266 
2267 #define CREATE_TRACE_POINTS
2268 #include "trace_events_filter_test.h"
2269 
2270 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2271 { \
2272 	.filter = FILTER, \
2273 	.rec    = { .a = va, .b = vb, .c = vc, .d = vd, \
2274 		    .e = ve, .f = vf, .g = vg, .h = vh }, \
2275 	.match  = m, \
2276 	.not_visited = nvisit, \
2277 }
2278 #define YES 1
2279 #define NO  0
2280 
2281 static struct test_filter_data_t {
2282 	char *filter;
2283 	struct ftrace_raw_ftrace_test_filter rec;
2284 	int match;
2285 	char *not_visited;
2286 } test_filter_data[] = {
2287 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2288 	       "e == 1 && f == 1 && g == 1 && h == 1"
2289 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2290 	DATA_REC(NO,  0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2291 	DATA_REC(NO,  1, 1, 1, 1, 1, 1, 1, 0, ""),
2292 #undef FILTER
2293 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2294 	       "e == 1 || f == 1 || g == 1 || h == 1"
2295 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2296 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2297 	DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2298 #undef FILTER
2299 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2300 	       "(e == 1 || f == 1) && (g == 1 || h == 1)"
2301 	DATA_REC(NO,  0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2302 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2303 	DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2304 	DATA_REC(NO,  1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2305 #undef FILTER
2306 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2307 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2308 	DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2309 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2310 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2311 #undef FILTER
2312 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2313 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2314 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2315 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2316 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2317 #undef FILTER
2318 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2319 	       "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2320 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2321 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2322 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2323 #undef FILTER
2324 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2325 	       "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2326 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2327 	DATA_REC(NO,  0, 1, 0, 1, 0, 1, 0, 1, ""),
2328 	DATA_REC(NO,  1, 0, 1, 0, 1, 0, 1, 0, ""),
2329 #undef FILTER
2330 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2331 	       "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2332 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2333 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2334 	DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2335 };
2336 
2337 #undef DATA_REC
2338 #undef FILTER
2339 #undef YES
2340 #undef NO
2341 
2342 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2343 
2344 static int test_pred_visited;
2345 
2346 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2347 {
2348 	struct ftrace_event_field *field = pred->field;
2349 
2350 	test_pred_visited = 1;
2351 	printk(KERN_INFO "\npred visited %s\n", field->name);
2352 	return 1;
2353 }
2354 
2355 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2356 			     int *err, void *data)
2357 {
2358 	char *fields = data;
2359 
2360 	if ((move == MOVE_DOWN) &&
2361 	    (pred->left == FILTER_PRED_INVALID)) {
2362 		struct ftrace_event_field *field = pred->field;
2363 
2364 		if (!field) {
2365 			WARN(1, "all leafs should have field defined");
2366 			return WALK_PRED_DEFAULT;
2367 		}
2368 		if (!strchr(fields, *field->name))
2369 			return WALK_PRED_DEFAULT;
2370 
2371 		WARN_ON(!pred->fn);
2372 		pred->fn = test_pred_visited_fn;
2373 	}
2374 	return WALK_PRED_DEFAULT;
2375 }
2376 
2377 static __init int ftrace_test_event_filter(void)
2378 {
2379 	int i;
2380 
2381 	printk(KERN_INFO "Testing ftrace filter: ");
2382 
2383 	for (i = 0; i < DATA_CNT; i++) {
2384 		struct event_filter *filter = NULL;
2385 		struct test_filter_data_t *d = &test_filter_data[i];
2386 		int err;
2387 
2388 		err = create_filter(&event_ftrace_test_filter, d->filter,
2389 				    false, &filter);
2390 		if (err) {
2391 			printk(KERN_INFO
2392 			       "Failed to get filter for '%s', err %d\n",
2393 			       d->filter, err);
2394 			__free_filter(filter);
2395 			break;
2396 		}
2397 
2398 		/*
2399 		 * The preemption disabling is not really needed for self
2400 		 * tests, but the rcu dereference will complain without it.
2401 		 */
2402 		preempt_disable();
2403 		if (*d->not_visited)
2404 			walk_pred_tree(filter->preds, filter->root,
2405 				       test_walk_pred_cb,
2406 				       d->not_visited);
2407 
2408 		test_pred_visited = 0;
2409 		err = filter_match_preds(filter, &d->rec);
2410 		preempt_enable();
2411 
2412 		__free_filter(filter);
2413 
2414 		if (test_pred_visited) {
2415 			printk(KERN_INFO
2416 			       "Failed, unwanted pred visited for filter %s\n",
2417 			       d->filter);
2418 			break;
2419 		}
2420 
2421 		if (err != d->match) {
2422 			printk(KERN_INFO
2423 			       "Failed to match filter '%s', expected %d\n",
2424 			       d->filter, d->match);
2425 			break;
2426 		}
2427 	}
2428 
2429 	if (i == DATA_CNT)
2430 		printk(KERN_CONT "OK\n");
2431 
2432 	return 0;
2433 }
2434 
2435 late_initcall(ftrace_test_event_filter);
2436 
2437 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2438