xref: /linux/include/trace/events/kmem.h (revision a13d7201d7deedcbb6ac6efa94a1a7d34d3d79ec)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM kmem
3 
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_KMEM_H
6 
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9 #include <trace/events/gfpflags.h>
10 
11 DECLARE_EVENT_CLASS(kmem_alloc,
12 
13 	TP_PROTO(unsigned long call_site,
14 		 const void *ptr,
15 		 size_t bytes_req,
16 		 size_t bytes_alloc,
17 		 gfp_t gfp_flags),
18 
19 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
20 
21 	TP_STRUCT__entry(
22 		__field(	unsigned long,	call_site	)
23 		__field(	const void *,	ptr		)
24 		__field(	size_t,		bytes_req	)
25 		__field(	size_t,		bytes_alloc	)
26 		__field(	gfp_t,		gfp_flags	)
27 	),
28 
29 	TP_fast_assign(
30 		__entry->call_site	= call_site;
31 		__entry->ptr		= ptr;
32 		__entry->bytes_req	= bytes_req;
33 		__entry->bytes_alloc	= bytes_alloc;
34 		__entry->gfp_flags	= gfp_flags;
35 	),
36 
37 	TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
38 		__entry->call_site,
39 		__entry->ptr,
40 		__entry->bytes_req,
41 		__entry->bytes_alloc,
42 		show_gfp_flags(__entry->gfp_flags))
43 );
44 
45 DEFINE_EVENT(kmem_alloc, kmalloc,
46 
47 	TP_PROTO(unsigned long call_site, const void *ptr,
48 		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
49 
50 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
51 );
52 
53 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
54 
55 	TP_PROTO(unsigned long call_site, const void *ptr,
56 		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57 
58 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
59 );
60 
61 DECLARE_EVENT_CLASS(kmem_alloc_node,
62 
63 	TP_PROTO(unsigned long call_site,
64 		 const void *ptr,
65 		 size_t bytes_req,
66 		 size_t bytes_alloc,
67 		 gfp_t gfp_flags,
68 		 int node),
69 
70 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
71 
72 	TP_STRUCT__entry(
73 		__field(	unsigned long,	call_site	)
74 		__field(	const void *,	ptr		)
75 		__field(	size_t,		bytes_req	)
76 		__field(	size_t,		bytes_alloc	)
77 		__field(	gfp_t,		gfp_flags	)
78 		__field(	int,		node		)
79 	),
80 
81 	TP_fast_assign(
82 		__entry->call_site	= call_site;
83 		__entry->ptr		= ptr;
84 		__entry->bytes_req	= bytes_req;
85 		__entry->bytes_alloc	= bytes_alloc;
86 		__entry->gfp_flags	= gfp_flags;
87 		__entry->node		= node;
88 	),
89 
90 	TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
91 		__entry->call_site,
92 		__entry->ptr,
93 		__entry->bytes_req,
94 		__entry->bytes_alloc,
95 		show_gfp_flags(__entry->gfp_flags),
96 		__entry->node)
97 );
98 
99 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
100 
101 	TP_PROTO(unsigned long call_site, const void *ptr,
102 		 size_t bytes_req, size_t bytes_alloc,
103 		 gfp_t gfp_flags, int node),
104 
105 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
106 );
107 
108 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
109 
110 	TP_PROTO(unsigned long call_site, const void *ptr,
111 		 size_t bytes_req, size_t bytes_alloc,
112 		 gfp_t gfp_flags, int node),
113 
114 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115 );
116 
117 DECLARE_EVENT_CLASS(kmem_free,
118 
119 	TP_PROTO(unsigned long call_site, const void *ptr),
120 
121 	TP_ARGS(call_site, ptr),
122 
123 	TP_STRUCT__entry(
124 		__field(	unsigned long,	call_site	)
125 		__field(	const void *,	ptr		)
126 	),
127 
128 	TP_fast_assign(
129 		__entry->call_site	= call_site;
130 		__entry->ptr		= ptr;
131 	),
132 
133 	TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
134 );
135 
136 DEFINE_EVENT(kmem_free, kfree,
137 
138 	TP_PROTO(unsigned long call_site, const void *ptr),
139 
140 	TP_ARGS(call_site, ptr)
141 );
142 
143 DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
144 
145 	TP_PROTO(unsigned long call_site, const void *ptr),
146 
147 	TP_ARGS(call_site, ptr),
148 
149 	/*
150 	 * This trace can be potentially called from an offlined cpu.
151 	 * Since trace points use RCU and RCU should not be used from
152 	 * offline cpus, filter such calls out.
153 	 * While this trace can be called from a preemptable section,
154 	 * it has no impact on the condition since tasks can migrate
155 	 * only from online cpus to other online cpus. Thus its safe
156 	 * to use raw_smp_processor_id.
157 	 */
158 	TP_CONDITION(cpu_online(raw_smp_processor_id()))
159 );
160 
161 TRACE_EVENT_CONDITION(mm_page_free,
162 
163 	TP_PROTO(struct page *page, unsigned int order),
164 
165 	TP_ARGS(page, order),
166 
167 
168 	/*
169 	 * This trace can be potentially called from an offlined cpu.
170 	 * Since trace points use RCU and RCU should not be used from
171 	 * offline cpus, filter such calls out.
172 	 * While this trace can be called from a preemptable section,
173 	 * it has no impact on the condition since tasks can migrate
174 	 * only from online cpus to other online cpus. Thus its safe
175 	 * to use raw_smp_processor_id.
176 	 */
177 	TP_CONDITION(cpu_online(raw_smp_processor_id())),
178 
179 	TP_STRUCT__entry(
180 		__field(	unsigned long,	pfn		)
181 		__field(	unsigned int,	order		)
182 	),
183 
184 	TP_fast_assign(
185 		__entry->pfn		= page_to_pfn(page);
186 		__entry->order		= order;
187 	),
188 
189 	TP_printk("page=%p pfn=%lu order=%d",
190 			pfn_to_page(__entry->pfn),
191 			__entry->pfn,
192 			__entry->order)
193 );
194 
195 TRACE_EVENT(mm_page_free_batched,
196 
197 	TP_PROTO(struct page *page, int cold),
198 
199 	TP_ARGS(page, cold),
200 
201 	TP_STRUCT__entry(
202 		__field(	unsigned long,	pfn		)
203 		__field(	int,		cold		)
204 	),
205 
206 	TP_fast_assign(
207 		__entry->pfn		= page_to_pfn(page);
208 		__entry->cold		= cold;
209 	),
210 
211 	TP_printk("page=%p pfn=%lu order=0 cold=%d",
212 			pfn_to_page(__entry->pfn),
213 			__entry->pfn,
214 			__entry->cold)
215 );
216 
217 TRACE_EVENT(mm_page_alloc,
218 
219 	TP_PROTO(struct page *page, unsigned int order,
220 			gfp_t gfp_flags, int migratetype),
221 
222 	TP_ARGS(page, order, gfp_flags, migratetype),
223 
224 	TP_STRUCT__entry(
225 		__field(	unsigned long,	pfn		)
226 		__field(	unsigned int,	order		)
227 		__field(	gfp_t,		gfp_flags	)
228 		__field(	int,		migratetype	)
229 	),
230 
231 	TP_fast_assign(
232 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
233 		__entry->order		= order;
234 		__entry->gfp_flags	= gfp_flags;
235 		__entry->migratetype	= migratetype;
236 	),
237 
238 	TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
239 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
240 		__entry->pfn != -1UL ? __entry->pfn : 0,
241 		__entry->order,
242 		__entry->migratetype,
243 		show_gfp_flags(__entry->gfp_flags))
244 );
245 
246 DECLARE_EVENT_CLASS(mm_page,
247 
248 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
249 
250 	TP_ARGS(page, order, migratetype),
251 
252 	TP_STRUCT__entry(
253 		__field(	unsigned long,	pfn		)
254 		__field(	unsigned int,	order		)
255 		__field(	int,		migratetype	)
256 	),
257 
258 	TP_fast_assign(
259 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
260 		__entry->order		= order;
261 		__entry->migratetype	= migratetype;
262 	),
263 
264 	TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
265 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
266 		__entry->pfn != -1UL ? __entry->pfn : 0,
267 		__entry->order,
268 		__entry->migratetype,
269 		__entry->order == 0)
270 );
271 
272 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
273 
274 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
275 
276 	TP_ARGS(page, order, migratetype)
277 );
278 
279 TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
280 
281 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
282 
283 	TP_ARGS(page, order, migratetype),
284 
285 	/*
286 	 * This trace can be potentially called from an offlined cpu.
287 	 * Since trace points use RCU and RCU should not be used from
288 	 * offline cpus, filter such calls out.
289 	 * While this trace can be called from a preemptable section,
290 	 * it has no impact on the condition since tasks can migrate
291 	 * only from online cpus to other online cpus. Thus its safe
292 	 * to use raw_smp_processor_id.
293 	 */
294 	TP_CONDITION(cpu_online(raw_smp_processor_id())),
295 
296 	TP_STRUCT__entry(
297 		__field(	unsigned long,	pfn		)
298 		__field(	unsigned int,	order		)
299 		__field(	int,		migratetype	)
300 	),
301 
302 	TP_fast_assign(
303 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
304 		__entry->order		= order;
305 		__entry->migratetype	= migratetype;
306 	),
307 
308 	TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
309 		pfn_to_page(__entry->pfn), __entry->pfn,
310 		__entry->order, __entry->migratetype)
311 );
312 
313 TRACE_EVENT(mm_page_alloc_extfrag,
314 
315 	TP_PROTO(struct page *page,
316 		int alloc_order, int fallback_order,
317 		int alloc_migratetype, int fallback_migratetype),
318 
319 	TP_ARGS(page,
320 		alloc_order, fallback_order,
321 		alloc_migratetype, fallback_migratetype),
322 
323 	TP_STRUCT__entry(
324 		__field(	unsigned long,	pfn			)
325 		__field(	int,		alloc_order		)
326 		__field(	int,		fallback_order		)
327 		__field(	int,		alloc_migratetype	)
328 		__field(	int,		fallback_migratetype	)
329 		__field(	int,		change_ownership	)
330 	),
331 
332 	TP_fast_assign(
333 		__entry->pfn			= page_to_pfn(page);
334 		__entry->alloc_order		= alloc_order;
335 		__entry->fallback_order		= fallback_order;
336 		__entry->alloc_migratetype	= alloc_migratetype;
337 		__entry->fallback_migratetype	= fallback_migratetype;
338 		__entry->change_ownership	= (alloc_migratetype ==
339 					get_pageblock_migratetype(page));
340 	),
341 
342 	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
343 		pfn_to_page(__entry->pfn),
344 		__entry->pfn,
345 		__entry->alloc_order,
346 		__entry->fallback_order,
347 		pageblock_order,
348 		__entry->alloc_migratetype,
349 		__entry->fallback_migratetype,
350 		__entry->fallback_order < pageblock_order,
351 		__entry->change_ownership)
352 );
353 
354 #endif /* _TRACE_KMEM_H */
355 
356 /* This part must be outside protection */
357 #include <trace/define_trace.h>
358