xref: /linux/arch/x86/events/perf_event.h (revision fbc872c38c8fed31948c85683b5326ee5ab9fccc)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 /* To enable MSR tracing please use the generic trace points. */
18 
19 /*
20  *          |   NHM/WSM    |      SNB     |
21  * register -------------------------------
22  *          |  HT  | no HT |  HT  | no HT |
23  *-----------------------------------------
24  * offcore  | core | core  | cpu  | core  |
25  * lbr_sel  | core | core  | cpu  | core  |
26  * ld_lat   | cpu  | core  | cpu  | core  |
27  *-----------------------------------------
28  *
29  * Given that there is a small number of shared regs,
30  * we can pre-allocate their slot in the per-cpu
31  * per-core reg tables.
32  */
33 enum extra_reg_type {
34 	EXTRA_REG_NONE  = -1,	/* not used */
35 
36 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
37 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
38 	EXTRA_REG_LBR   = 2,	/* lbr_select */
39 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
40 	EXTRA_REG_FE    = 4,    /* fe_* */
41 
42 	EXTRA_REG_MAX		/* number of entries needed */
43 };
44 
45 struct event_constraint {
46 	union {
47 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
48 		u64		idxmsk64;
49 	};
50 	u64	code;
51 	u64	cmask;
52 	int	weight;
53 	int	overlap;
54 	int	flags;
55 };
56 /*
57  * struct hw_perf_event.flags flags
58  */
59 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
60 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
61 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
62 #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
63 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
64 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
65 #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
66 #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
67 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
68 #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
69 #define PERF_X86_EVENT_AUTO_RELOAD	0x0400 /* use PEBS auto-reload */
70 #define PERF_X86_EVENT_FREERUNNING	0x0800 /* use freerunning PEBS */
71 
72 
73 struct amd_nb {
74 	int nb_id;  /* NorthBridge id */
75 	int refcnt; /* reference count */
76 	struct perf_event *owners[X86_PMC_IDX_MAX];
77 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
78 };
79 
80 /* The maximal number of PEBS events: */
81 #define MAX_PEBS_EVENTS		8
82 
83 /*
84  * Flags PEBS can handle without an PMI.
85  *
86  * TID can only be handled by flushing at context switch.
87  *
88  */
89 #define PEBS_FREERUNNING_FLAGS \
90 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
91 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
92 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
93 	PERF_SAMPLE_TRANSACTION)
94 
95 /*
96  * A debug store configuration.
97  *
98  * We only support architectures that use 64bit fields.
99  */
100 struct debug_store {
101 	u64	bts_buffer_base;
102 	u64	bts_index;
103 	u64	bts_absolute_maximum;
104 	u64	bts_interrupt_threshold;
105 	u64	pebs_buffer_base;
106 	u64	pebs_index;
107 	u64	pebs_absolute_maximum;
108 	u64	pebs_interrupt_threshold;
109 	u64	pebs_event_reset[MAX_PEBS_EVENTS];
110 };
111 
112 /*
113  * Per register state.
114  */
115 struct er_account {
116 	raw_spinlock_t		lock;	/* per-core: protect structure */
117 	u64                 config;	/* extra MSR config */
118 	u64                 reg;	/* extra MSR number */
119 	atomic_t            ref;	/* reference count */
120 };
121 
122 /*
123  * Per core/cpu state
124  *
125  * Used to coordinate shared registers between HT threads or
126  * among events on a single PMU.
127  */
128 struct intel_shared_regs {
129 	struct er_account       regs[EXTRA_REG_MAX];
130 	int                     refcnt;		/* per-core: #HT threads */
131 	unsigned                core_id;	/* per-core: core id */
132 };
133 
134 enum intel_excl_state_type {
135 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
136 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
137 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
138 };
139 
140 struct intel_excl_states {
141 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
142 	bool sched_started; /* true if scheduling has started */
143 };
144 
145 struct intel_excl_cntrs {
146 	raw_spinlock_t	lock;
147 
148 	struct intel_excl_states states[2];
149 
150 	union {
151 		u16	has_exclusive[2];
152 		u32	exclusive_present;
153 	};
154 
155 	int		refcnt;		/* per-core: #HT threads */
156 	unsigned	core_id;	/* per-core: core id */
157 };
158 
159 #define MAX_LBR_ENTRIES		32
160 
161 enum {
162 	X86_PERF_KFREE_SHARED = 0,
163 	X86_PERF_KFREE_EXCL   = 1,
164 	X86_PERF_KFREE_MAX
165 };
166 
167 struct cpu_hw_events {
168 	/*
169 	 * Generic x86 PMC bits
170 	 */
171 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
172 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
173 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
174 	int			enabled;
175 
176 	int			n_events; /* the # of events in the below arrays */
177 	int			n_added;  /* the # last events in the below arrays;
178 					     they've never been enabled yet */
179 	int			n_txn;    /* the # last events in the below arrays;
180 					     added in the current transaction */
181 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
182 	u64			tags[X86_PMC_IDX_MAX];
183 
184 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
185 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
186 
187 	int			n_excl; /* the number of exclusive events */
188 
189 	unsigned int		txn_flags;
190 	int			is_fake;
191 
192 	/*
193 	 * Intel DebugStore bits
194 	 */
195 	struct debug_store	*ds;
196 	u64			pebs_enabled;
197 
198 	/*
199 	 * Intel LBR bits
200 	 */
201 	int				lbr_users;
202 	void				*lbr_context;
203 	struct perf_branch_stack	lbr_stack;
204 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
205 	struct er_account		*lbr_sel;
206 	u64				br_sel;
207 
208 	/*
209 	 * Intel host/guest exclude bits
210 	 */
211 	u64				intel_ctrl_guest_mask;
212 	u64				intel_ctrl_host_mask;
213 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
214 
215 	/*
216 	 * Intel checkpoint mask
217 	 */
218 	u64				intel_cp_status;
219 
220 	/*
221 	 * manage shared (per-core, per-cpu) registers
222 	 * used on Intel NHM/WSM/SNB
223 	 */
224 	struct intel_shared_regs	*shared_regs;
225 	/*
226 	 * manage exclusive counter access between hyperthread
227 	 */
228 	struct event_constraint *constraint_list; /* in enable order */
229 	struct intel_excl_cntrs		*excl_cntrs;
230 	int excl_thread_id; /* 0 or 1 */
231 
232 	/*
233 	 * AMD specific bits
234 	 */
235 	struct amd_nb			*amd_nb;
236 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
237 	u64				perf_ctr_virt_mask;
238 
239 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
240 };
241 
242 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
243 	{ .idxmsk64 = (n) },		\
244 	.code = (c),			\
245 	.cmask = (m),			\
246 	.weight = (w),			\
247 	.overlap = (o),			\
248 	.flags = f,			\
249 }
250 
251 #define EVENT_CONSTRAINT(c, n, m)	\
252 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
253 
254 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
255 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
256 			   0, PERF_X86_EVENT_EXCL)
257 
258 /*
259  * The overlap flag marks event constraints with overlapping counter
260  * masks. This is the case if the counter mask of such an event is not
261  * a subset of any other counter mask of a constraint with an equal or
262  * higher weight, e.g.:
263  *
264  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
265  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
266  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
267  *
268  * The event scheduler may not select the correct counter in the first
269  * cycle because it needs to know which subsequent events will be
270  * scheduled. It may fail to schedule the events then. So we set the
271  * overlap flag for such constraints to give the scheduler a hint which
272  * events to select for counter rescheduling.
273  *
274  * Care must be taken as the rescheduling algorithm is O(n!) which
275  * will increase scheduling cycles for an over-committed system
276  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
277  * and its counter masks must be kept at a minimum.
278  */
279 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
280 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
281 
282 /*
283  * Constraint on the Event code.
284  */
285 #define INTEL_EVENT_CONSTRAINT(c, n)	\
286 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
287 
288 /*
289  * Constraint on the Event code + UMask + fixed-mask
290  *
291  * filter mask to validate fixed counter events.
292  * the following filters disqualify for fixed counters:
293  *  - inv
294  *  - edge
295  *  - cnt-mask
296  *  - in_tx
297  *  - in_tx_checkpointed
298  *  The other filters are supported by fixed counters.
299  *  The any-thread option is supported starting with v3.
300  */
301 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
302 #define FIXED_EVENT_CONSTRAINT(c, n)	\
303 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
304 
305 /*
306  * Constraint on the Event code + UMask
307  */
308 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
309 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
310 
311 /* Constraint on specific umask bit only + event */
312 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
313 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
314 
315 /* Like UEVENT_CONSTRAINT, but match flags too */
316 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
317 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
318 
319 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
320 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
321 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
322 
323 #define INTEL_PLD_CONSTRAINT(c, n)	\
324 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
325 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
326 
327 #define INTEL_PST_CONSTRAINT(c, n)	\
328 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
329 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
330 
331 /* Event constraint, but match on all event flags too. */
332 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
333 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
334 
335 /* Check only flags, but allow all event/umask */
336 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
337 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
338 
339 /* Check flags and event code, and set the HSW store flag */
340 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
341 	__EVENT_CONSTRAINT(code, n, 			\
342 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
343 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
344 
345 /* Check flags and event code, and set the HSW load flag */
346 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
347 	__EVENT_CONSTRAINT(code, n,			\
348 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
349 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
350 
351 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
352 	__EVENT_CONSTRAINT(code, n,			\
353 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
354 			  HWEIGHT(n), 0, \
355 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
356 
357 /* Check flags and event code/umask, and set the HSW store flag */
358 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
359 	__EVENT_CONSTRAINT(code, n, 			\
360 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
361 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
362 
363 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
364 	__EVENT_CONSTRAINT(code, n,			\
365 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
366 			  HWEIGHT(n), 0, \
367 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
368 
369 /* Check flags and event code/umask, and set the HSW load flag */
370 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
371 	__EVENT_CONSTRAINT(code, n, 			\
372 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
373 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
374 
375 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
376 	__EVENT_CONSTRAINT(code, n,			\
377 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
378 			  HWEIGHT(n), 0, \
379 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
380 
381 /* Check flags and event code/umask, and set the HSW N/A flag */
382 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
383 	__EVENT_CONSTRAINT(code, n, 			\
384 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
385 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
386 
387 
388 /*
389  * We define the end marker as having a weight of -1
390  * to enable blacklisting of events using a counter bitmask
391  * of zero and thus a weight of zero.
392  * The end marker has a weight that cannot possibly be
393  * obtained from counting the bits in the bitmask.
394  */
395 #define EVENT_CONSTRAINT_END { .weight = -1 }
396 
397 /*
398  * Check for end marker with weight == -1
399  */
400 #define for_each_event_constraint(e, c)	\
401 	for ((e) = (c); (e)->weight != -1; (e)++)
402 
403 /*
404  * Extra registers for specific events.
405  *
406  * Some events need large masks and require external MSRs.
407  * Those extra MSRs end up being shared for all events on
408  * a PMU and sometimes between PMU of sibling HT threads.
409  * In either case, the kernel needs to handle conflicting
410  * accesses to those extra, shared, regs. The data structure
411  * to manage those registers is stored in cpu_hw_event.
412  */
413 struct extra_reg {
414 	unsigned int		event;
415 	unsigned int		msr;
416 	u64			config_mask;
417 	u64			valid_mask;
418 	int			idx;  /* per_xxx->regs[] reg index */
419 	bool			extra_msr_access;
420 };
421 
422 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
423 	.event = (e),			\
424 	.msr = (ms),			\
425 	.config_mask = (m),		\
426 	.valid_mask = (vm),		\
427 	.idx = EXTRA_REG_##i,		\
428 	.extra_msr_access = true,	\
429 	}
430 
431 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
432 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
433 
434 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
435 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
436 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
437 
438 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
439 	INTEL_UEVENT_EXTRA_REG(c, \
440 			       MSR_PEBS_LD_LAT_THRESHOLD, \
441 			       0xffff, \
442 			       LDLAT)
443 
444 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
445 
446 union perf_capabilities {
447 	struct {
448 		u64	lbr_format:6;
449 		u64	pebs_trap:1;
450 		u64	pebs_arch_reg:1;
451 		u64	pebs_format:4;
452 		u64	smm_freeze:1;
453 		/*
454 		 * PMU supports separate counter range for writing
455 		 * values > 32bit.
456 		 */
457 		u64	full_width_write:1;
458 	};
459 	u64	capabilities;
460 };
461 
462 struct x86_pmu_quirk {
463 	struct x86_pmu_quirk *next;
464 	void (*func)(void);
465 };
466 
467 union x86_pmu_config {
468 	struct {
469 		u64 event:8,
470 		    umask:8,
471 		    usr:1,
472 		    os:1,
473 		    edge:1,
474 		    pc:1,
475 		    interrupt:1,
476 		    __reserved1:1,
477 		    en:1,
478 		    inv:1,
479 		    cmask:8,
480 		    event2:4,
481 		    __reserved2:4,
482 		    go:1,
483 		    ho:1;
484 	} bits;
485 	u64 value;
486 };
487 
488 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
489 
490 enum {
491 	x86_lbr_exclusive_lbr,
492 	x86_lbr_exclusive_bts,
493 	x86_lbr_exclusive_pt,
494 	x86_lbr_exclusive_max,
495 };
496 
497 /*
498  * struct x86_pmu - generic x86 pmu
499  */
500 struct x86_pmu {
501 	/*
502 	 * Generic x86 PMC bits
503 	 */
504 	const char	*name;
505 	int		version;
506 	int		(*handle_irq)(struct pt_regs *);
507 	void		(*disable_all)(void);
508 	void		(*enable_all)(int added);
509 	void		(*enable)(struct perf_event *);
510 	void		(*disable)(struct perf_event *);
511 	int		(*hw_config)(struct perf_event *event);
512 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
513 	unsigned	eventsel;
514 	unsigned	perfctr;
515 	int		(*addr_offset)(int index, bool eventsel);
516 	int		(*rdpmc_index)(int index);
517 	u64		(*event_map)(int);
518 	int		max_events;
519 	int		num_counters;
520 	int		num_counters_fixed;
521 	int		cntval_bits;
522 	u64		cntval_mask;
523 	union {
524 			unsigned long events_maskl;
525 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
526 	};
527 	int		events_mask_len;
528 	int		apic;
529 	u64		max_period;
530 	struct event_constraint *
531 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
532 						 int idx,
533 						 struct perf_event *event);
534 
535 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
536 						 struct perf_event *event);
537 
538 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
539 
540 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
541 
542 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
543 
544 	struct event_constraint *event_constraints;
545 	struct x86_pmu_quirk *quirks;
546 	int		perfctr_second_write;
547 	bool		late_ack;
548 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
549 
550 	/*
551 	 * sysfs attrs
552 	 */
553 	int		attr_rdpmc_broken;
554 	int		attr_rdpmc;
555 	struct attribute **format_attrs;
556 	struct attribute **event_attrs;
557 
558 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
559 	struct attribute **cpu_events;
560 
561 	/*
562 	 * CPU Hotplug hooks
563 	 */
564 	int		(*cpu_prepare)(int cpu);
565 	void		(*cpu_starting)(int cpu);
566 	void		(*cpu_dying)(int cpu);
567 	void		(*cpu_dead)(int cpu);
568 
569 	void		(*check_microcode)(void);
570 	void		(*sched_task)(struct perf_event_context *ctx,
571 				      bool sched_in);
572 
573 	/*
574 	 * Intel Arch Perfmon v2+
575 	 */
576 	u64			intel_ctrl;
577 	union perf_capabilities intel_cap;
578 
579 	/*
580 	 * Intel DebugStore bits
581 	 */
582 	unsigned int	bts		:1,
583 			bts_active	:1,
584 			pebs		:1,
585 			pebs_active	:1,
586 			pebs_broken	:1,
587 			pebs_prec_dist	:1;
588 	int		pebs_record_size;
589 	int		pebs_buffer_size;
590 	void		(*drain_pebs)(struct pt_regs *regs);
591 	struct event_constraint *pebs_constraints;
592 	void		(*pebs_aliases)(struct perf_event *event);
593 	int 		max_pebs_events;
594 	unsigned long	free_running_flags;
595 
596 	/*
597 	 * Intel LBR
598 	 */
599 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
600 	int		lbr_nr;			   /* hardware stack size */
601 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
602 	const int	*lbr_sel_map;		   /* lbr_select mappings */
603 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
604 	bool		lbr_pt_coexist;		   /* LBR may coexist with PT */
605 
606 	/*
607 	 * Intel PT/LBR/BTS are exclusive
608 	 */
609 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
610 
611 	/*
612 	 * AMD bits
613 	 */
614 	unsigned int	amd_nb_constraints : 1;
615 
616 	/*
617 	 * Extra registers for events
618 	 */
619 	struct extra_reg *extra_regs;
620 	unsigned int flags;
621 
622 	/*
623 	 * Intel host/guest support (KVM)
624 	 */
625 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
626 };
627 
628 struct x86_perf_task_context {
629 	u64 lbr_from[MAX_LBR_ENTRIES];
630 	u64 lbr_to[MAX_LBR_ENTRIES];
631 	u64 lbr_info[MAX_LBR_ENTRIES];
632 	int tos;
633 	int lbr_callstack_users;
634 	int lbr_stack_state;
635 };
636 
637 #define x86_add_quirk(func_)						\
638 do {									\
639 	static struct x86_pmu_quirk __quirk __initdata = {		\
640 		.func = func_,						\
641 	};								\
642 	__quirk.next = x86_pmu.quirks;					\
643 	x86_pmu.quirks = &__quirk;					\
644 } while (0)
645 
646 /*
647  * x86_pmu flags
648  */
649 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
650 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
651 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
652 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
653 
654 #define EVENT_VAR(_id)  event_attr_##_id
655 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
656 
657 #define EVENT_ATTR(_name, _id)						\
658 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
659 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
660 	.id		= PERF_COUNT_HW_##_id,				\
661 	.event_str	= NULL,						\
662 };
663 
664 #define EVENT_ATTR_STR(_name, v, str)					\
665 static struct perf_pmu_events_attr event_attr_##v = {			\
666 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
667 	.id		= 0,						\
668 	.event_str	= str,						\
669 };
670 
671 extern struct x86_pmu x86_pmu __read_mostly;
672 
673 static inline bool x86_pmu_has_lbr_callstack(void)
674 {
675 	return  x86_pmu.lbr_sel_map &&
676 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
677 }
678 
679 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
680 
681 int x86_perf_event_set_period(struct perf_event *event);
682 
683 /*
684  * Generalized hw caching related hw_event table, filled
685  * in on a per model basis. A value of 0 means
686  * 'not supported', -1 means 'hw_event makes no sense on
687  * this CPU', any other value means the raw hw_event
688  * ID.
689  */
690 
691 #define C(x) PERF_COUNT_HW_CACHE_##x
692 
693 extern u64 __read_mostly hw_cache_event_ids
694 				[PERF_COUNT_HW_CACHE_MAX]
695 				[PERF_COUNT_HW_CACHE_OP_MAX]
696 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
697 extern u64 __read_mostly hw_cache_extra_regs
698 				[PERF_COUNT_HW_CACHE_MAX]
699 				[PERF_COUNT_HW_CACHE_OP_MAX]
700 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
701 
702 u64 x86_perf_event_update(struct perf_event *event);
703 
704 static inline unsigned int x86_pmu_config_addr(int index)
705 {
706 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
707 				   x86_pmu.addr_offset(index, true) : index);
708 }
709 
710 static inline unsigned int x86_pmu_event_addr(int index)
711 {
712 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
713 				  x86_pmu.addr_offset(index, false) : index);
714 }
715 
716 static inline int x86_pmu_rdpmc_index(int index)
717 {
718 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
719 }
720 
721 int x86_add_exclusive(unsigned int what);
722 
723 void x86_del_exclusive(unsigned int what);
724 
725 int x86_reserve_hardware(void);
726 
727 void x86_release_hardware(void);
728 
729 void hw_perf_lbr_event_destroy(struct perf_event *event);
730 
731 int x86_setup_perfctr(struct perf_event *event);
732 
733 int x86_pmu_hw_config(struct perf_event *event);
734 
735 void x86_pmu_disable_all(void);
736 
737 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
738 					  u64 enable_mask)
739 {
740 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
741 
742 	if (hwc->extra_reg.reg)
743 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
744 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
745 }
746 
747 void x86_pmu_enable_all(int added);
748 
749 int perf_assign_events(struct event_constraint **constraints, int n,
750 			int wmin, int wmax, int gpmax, int *assign);
751 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
752 
753 void x86_pmu_stop(struct perf_event *event, int flags);
754 
755 static inline void x86_pmu_disable_event(struct perf_event *event)
756 {
757 	struct hw_perf_event *hwc = &event->hw;
758 
759 	wrmsrl(hwc->config_base, hwc->config);
760 }
761 
762 void x86_pmu_enable_event(struct perf_event *event);
763 
764 int x86_pmu_handle_irq(struct pt_regs *regs);
765 
766 extern struct event_constraint emptyconstraint;
767 
768 extern struct event_constraint unconstrained;
769 
770 static inline bool kernel_ip(unsigned long ip)
771 {
772 #ifdef CONFIG_X86_32
773 	return ip > PAGE_OFFSET;
774 #else
775 	return (long)ip < 0;
776 #endif
777 }
778 
779 /*
780  * Not all PMUs provide the right context information to place the reported IP
781  * into full context. Specifically segment registers are typically not
782  * supplied.
783  *
784  * Assuming the address is a linear address (it is for IBS), we fake the CS and
785  * vm86 mode using the known zero-based code segment and 'fix up' the registers
786  * to reflect this.
787  *
788  * Intel PEBS/LBR appear to typically provide the effective address, nothing
789  * much we can do about that but pray and treat it like a linear address.
790  */
791 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
792 {
793 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
794 	if (regs->flags & X86_VM_MASK)
795 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
796 	regs->ip = ip;
797 }
798 
799 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
800 ssize_t intel_event_sysfs_show(char *page, u64 config);
801 
802 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
803 
804 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
805 			  char *page);
806 
807 #ifdef CONFIG_CPU_SUP_AMD
808 
809 int amd_pmu_init(void);
810 
811 #else /* CONFIG_CPU_SUP_AMD */
812 
813 static inline int amd_pmu_init(void)
814 {
815 	return 0;
816 }
817 
818 #endif /* CONFIG_CPU_SUP_AMD */
819 
820 #ifdef CONFIG_CPU_SUP_INTEL
821 
822 static inline bool intel_pmu_has_bts(struct perf_event *event)
823 {
824 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
825 	    !event->attr.freq && event->hw.sample_period == 1)
826 		return true;
827 
828 	return false;
829 }
830 
831 int intel_pmu_save_and_restart(struct perf_event *event);
832 
833 struct event_constraint *
834 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
835 			  struct perf_event *event);
836 
837 struct intel_shared_regs *allocate_shared_regs(int cpu);
838 
839 int intel_pmu_init(void);
840 
841 void init_debug_store_on_cpu(int cpu);
842 
843 void fini_debug_store_on_cpu(int cpu);
844 
845 void release_ds_buffers(void);
846 
847 void reserve_ds_buffers(void);
848 
849 extern struct event_constraint bts_constraint;
850 
851 void intel_pmu_enable_bts(u64 config);
852 
853 void intel_pmu_disable_bts(void);
854 
855 int intel_pmu_drain_bts_buffer(void);
856 
857 extern struct event_constraint intel_core2_pebs_event_constraints[];
858 
859 extern struct event_constraint intel_atom_pebs_event_constraints[];
860 
861 extern struct event_constraint intel_slm_pebs_event_constraints[];
862 
863 extern struct event_constraint intel_glm_pebs_event_constraints[];
864 
865 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
866 
867 extern struct event_constraint intel_westmere_pebs_event_constraints[];
868 
869 extern struct event_constraint intel_snb_pebs_event_constraints[];
870 
871 extern struct event_constraint intel_ivb_pebs_event_constraints[];
872 
873 extern struct event_constraint intel_hsw_pebs_event_constraints[];
874 
875 extern struct event_constraint intel_bdw_pebs_event_constraints[];
876 
877 extern struct event_constraint intel_skl_pebs_event_constraints[];
878 
879 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
880 
881 void intel_pmu_pebs_enable(struct perf_event *event);
882 
883 void intel_pmu_pebs_disable(struct perf_event *event);
884 
885 void intel_pmu_pebs_enable_all(void);
886 
887 void intel_pmu_pebs_disable_all(void);
888 
889 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
890 
891 void intel_ds_init(void);
892 
893 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
894 
895 void intel_pmu_lbr_reset(void);
896 
897 void intel_pmu_lbr_enable(struct perf_event *event);
898 
899 void intel_pmu_lbr_disable(struct perf_event *event);
900 
901 void intel_pmu_lbr_enable_all(bool pmi);
902 
903 void intel_pmu_lbr_disable_all(void);
904 
905 void intel_pmu_lbr_read(void);
906 
907 void intel_pmu_lbr_init_core(void);
908 
909 void intel_pmu_lbr_init_nhm(void);
910 
911 void intel_pmu_lbr_init_atom(void);
912 
913 void intel_pmu_lbr_init_slm(void);
914 
915 void intel_pmu_lbr_init_snb(void);
916 
917 void intel_pmu_lbr_init_hsw(void);
918 
919 void intel_pmu_lbr_init_skl(void);
920 
921 void intel_pmu_lbr_init_knl(void);
922 
923 void intel_pmu_pebs_data_source_nhm(void);
924 
925 int intel_pmu_setup_lbr_filter(struct perf_event *event);
926 
927 void intel_pt_interrupt(void);
928 
929 int intel_bts_interrupt(void);
930 
931 void intel_bts_enable_local(void);
932 
933 void intel_bts_disable_local(void);
934 
935 int p4_pmu_init(void);
936 
937 int p6_pmu_init(void);
938 
939 int knc_pmu_init(void);
940 
941 static inline int is_ht_workaround_enabled(void)
942 {
943 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
944 }
945 
946 #else /* CONFIG_CPU_SUP_INTEL */
947 
948 static inline void reserve_ds_buffers(void)
949 {
950 }
951 
952 static inline void release_ds_buffers(void)
953 {
954 }
955 
956 static inline int intel_pmu_init(void)
957 {
958 	return 0;
959 }
960 
961 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
962 {
963 	return NULL;
964 }
965 
966 static inline int is_ht_workaround_enabled(void)
967 {
968 	return 0;
969 }
970 #endif /* CONFIG_CPU_SUP_INTEL */
971