1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 2 #define _I915_TRACE_H_ 3 4 #include <linux/stringify.h> 5 #include <linux/types.h> 6 #include <linux/tracepoint.h> 7 8 #include <drm/drmP.h> 9 #include "i915_drv.h" 10 #include "intel_ringbuffer.h" 11 12 #undef TRACE_SYSTEM 13 #define TRACE_SYSTEM i915 14 #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) 15 #define TRACE_INCLUDE_FILE i915_trace 16 17 /* object tracking */ 18 19 TRACE_EVENT(i915_gem_object_create, 20 TP_PROTO(struct drm_i915_gem_object *obj), 21 TP_ARGS(obj), 22 23 TP_STRUCT__entry( 24 __field(struct drm_i915_gem_object *, obj) 25 __field(u32, size) 26 ), 27 28 TP_fast_assign( 29 __entry->obj = obj; 30 __entry->size = obj->base.size; 31 ), 32 33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 34 ); 35 36 TRACE_EVENT(i915_gem_object_bind, 37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), 38 TP_ARGS(obj, mappable), 39 40 TP_STRUCT__entry( 41 __field(struct drm_i915_gem_object *, obj) 42 __field(u32, offset) 43 __field(u32, size) 44 __field(bool, mappable) 45 ), 46 47 TP_fast_assign( 48 __entry->obj = obj; 49 __entry->offset = obj->gtt_space->start; 50 __entry->size = obj->gtt_space->size; 51 __entry->mappable = mappable; 52 ), 53 54 TP_printk("obj=%p, offset=%08x size=%x%s", 55 __entry->obj, __entry->offset, __entry->size, 56 __entry->mappable ? ", mappable" : "") 57 ); 58 59 TRACE_EVENT(i915_gem_object_unbind, 60 TP_PROTO(struct drm_i915_gem_object *obj), 61 TP_ARGS(obj), 62 63 TP_STRUCT__entry( 64 __field(struct drm_i915_gem_object *, obj) 65 __field(u32, offset) 66 __field(u32, size) 67 ), 68 69 TP_fast_assign( 70 __entry->obj = obj; 71 __entry->offset = obj->gtt_space->start; 72 __entry->size = obj->gtt_space->size; 73 ), 74 75 TP_printk("obj=%p, offset=%08x size=%x", 76 __entry->obj, __entry->offset, __entry->size) 77 ); 78 79 TRACE_EVENT(i915_gem_object_change_domain, 80 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), 81 TP_ARGS(obj, old_read, old_write), 82 83 TP_STRUCT__entry( 84 __field(struct drm_i915_gem_object *, obj) 85 __field(u32, read_domains) 86 __field(u32, write_domain) 87 ), 88 89 TP_fast_assign( 90 __entry->obj = obj; 91 __entry->read_domains = obj->base.read_domains | (old_read << 16); 92 __entry->write_domain = obj->base.write_domain | (old_write << 16); 93 ), 94 95 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", 96 __entry->obj, 97 __entry->read_domains >> 16, 98 __entry->read_domains & 0xffff, 99 __entry->write_domain >> 16, 100 __entry->write_domain & 0xffff) 101 ); 102 103 TRACE_EVENT(i915_gem_object_pwrite, 104 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 105 TP_ARGS(obj, offset, len), 106 107 TP_STRUCT__entry( 108 __field(struct drm_i915_gem_object *, obj) 109 __field(u32, offset) 110 __field(u32, len) 111 ), 112 113 TP_fast_assign( 114 __entry->obj = obj; 115 __entry->offset = offset; 116 __entry->len = len; 117 ), 118 119 TP_printk("obj=%p, offset=%u, len=%u", 120 __entry->obj, __entry->offset, __entry->len) 121 ); 122 123 TRACE_EVENT(i915_gem_object_pread, 124 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 125 TP_ARGS(obj, offset, len), 126 127 TP_STRUCT__entry( 128 __field(struct drm_i915_gem_object *, obj) 129 __field(u32, offset) 130 __field(u32, len) 131 ), 132 133 TP_fast_assign( 134 __entry->obj = obj; 135 __entry->offset = offset; 136 __entry->len = len; 137 ), 138 139 TP_printk("obj=%p, offset=%u, len=%u", 140 __entry->obj, __entry->offset, __entry->len) 141 ); 142 143 TRACE_EVENT(i915_gem_object_fault, 144 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), 145 TP_ARGS(obj, index, gtt, write), 146 147 TP_STRUCT__entry( 148 __field(struct drm_i915_gem_object *, obj) 149 __field(u32, index) 150 __field(bool, gtt) 151 __field(bool, write) 152 ), 153 154 TP_fast_assign( 155 __entry->obj = obj; 156 __entry->index = index; 157 __entry->gtt = gtt; 158 __entry->write = write; 159 ), 160 161 TP_printk("obj=%p, %s index=%u %s", 162 __entry->obj, 163 __entry->gtt ? "GTT" : "CPU", 164 __entry->index, 165 __entry->write ? ", writable" : "") 166 ); 167 168 DECLARE_EVENT_CLASS(i915_gem_object, 169 TP_PROTO(struct drm_i915_gem_object *obj), 170 TP_ARGS(obj), 171 172 TP_STRUCT__entry( 173 __field(struct drm_i915_gem_object *, obj) 174 ), 175 176 TP_fast_assign( 177 __entry->obj = obj; 178 ), 179 180 TP_printk("obj=%p", __entry->obj) 181 ); 182 183 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 184 TP_PROTO(struct drm_i915_gem_object *obj), 185 TP_ARGS(obj) 186 ); 187 188 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 189 TP_PROTO(struct drm_i915_gem_object *obj), 190 TP_ARGS(obj) 191 ); 192 193 TRACE_EVENT(i915_gem_evict, 194 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), 195 TP_ARGS(dev, size, align, mappable), 196 197 TP_STRUCT__entry( 198 __field(u32, dev) 199 __field(u32, size) 200 __field(u32, align) 201 __field(bool, mappable) 202 ), 203 204 TP_fast_assign( 205 __entry->dev = dev->primary->index; 206 __entry->size = size; 207 __entry->align = align; 208 __entry->mappable = mappable; 209 ), 210 211 TP_printk("dev=%d, size=%d, align=%d %s", 212 __entry->dev, __entry->size, __entry->align, 213 __entry->mappable ? ", mappable" : "") 214 ); 215 216 TRACE_EVENT(i915_gem_evict_everything, 217 TP_PROTO(struct drm_device *dev), 218 TP_ARGS(dev), 219 220 TP_STRUCT__entry( 221 __field(u32, dev) 222 ), 223 224 TP_fast_assign( 225 __entry->dev = dev->primary->index; 226 ), 227 228 TP_printk("dev=%d", __entry->dev) 229 ); 230 231 TRACE_EVENT(i915_gem_ring_dispatch, 232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 233 TP_ARGS(ring, seqno), 234 235 TP_STRUCT__entry( 236 __field(u32, dev) 237 __field(u32, ring) 238 __field(u32, seqno) 239 ), 240 241 TP_fast_assign( 242 __entry->dev = ring->dev->primary->index; 243 __entry->ring = ring->id; 244 __entry->seqno = seqno; 245 i915_trace_irq_get(ring, seqno); 246 ), 247 248 TP_printk("dev=%u, ring=%u, seqno=%u", 249 __entry->dev, __entry->ring, __entry->seqno) 250 ); 251 252 TRACE_EVENT(i915_gem_ring_flush, 253 TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), 254 TP_ARGS(ring, invalidate, flush), 255 256 TP_STRUCT__entry( 257 __field(u32, dev) 258 __field(u32, ring) 259 __field(u32, invalidate) 260 __field(u32, flush) 261 ), 262 263 TP_fast_assign( 264 __entry->dev = ring->dev->primary->index; 265 __entry->ring = ring->id; 266 __entry->invalidate = invalidate; 267 __entry->flush = flush; 268 ), 269 270 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", 271 __entry->dev, __entry->ring, 272 __entry->invalidate, __entry->flush) 273 ); 274 275 DECLARE_EVENT_CLASS(i915_gem_request, 276 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 277 TP_ARGS(ring, seqno), 278 279 TP_STRUCT__entry( 280 __field(u32, dev) 281 __field(u32, ring) 282 __field(u32, seqno) 283 ), 284 285 TP_fast_assign( 286 __entry->dev = ring->dev->primary->index; 287 __entry->ring = ring->id; 288 __entry->seqno = seqno; 289 ), 290 291 TP_printk("dev=%u, ring=%u, seqno=%u", 292 __entry->dev, __entry->ring, __entry->seqno) 293 ); 294 295 DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 296 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 297 TP_ARGS(ring, seqno) 298 ); 299 300 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 301 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 302 TP_ARGS(ring, seqno) 303 ); 304 305 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 306 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 307 TP_ARGS(ring, seqno) 308 ); 309 310 TRACE_EVENT(i915_gem_request_wait_begin, 311 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 312 TP_ARGS(ring, seqno), 313 314 TP_STRUCT__entry( 315 __field(u32, dev) 316 __field(u32, ring) 317 __field(u32, seqno) 318 __field(bool, blocking) 319 ), 320 321 /* NB: the blocking information is racy since mutex_is_locked 322 * doesn't check that the current thread holds the lock. The only 323 * other option would be to pass the boolean information of whether 324 * or not the class was blocking down through the stack which is 325 * less desirable. 326 */ 327 TP_fast_assign( 328 __entry->dev = ring->dev->primary->index; 329 __entry->ring = ring->id; 330 __entry->seqno = seqno; 331 __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); 332 ), 333 334 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 335 __entry->dev, __entry->ring, __entry->seqno, 336 __entry->blocking ? "yes (NB)" : "no") 337 ); 338 339 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 340 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 341 TP_ARGS(ring, seqno) 342 ); 343 344 DECLARE_EVENT_CLASS(i915_ring, 345 TP_PROTO(struct intel_ring_buffer *ring), 346 TP_ARGS(ring), 347 348 TP_STRUCT__entry( 349 __field(u32, dev) 350 __field(u32, ring) 351 ), 352 353 TP_fast_assign( 354 __entry->dev = ring->dev->primary->index; 355 __entry->ring = ring->id; 356 ), 357 358 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) 359 ); 360 361 DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 362 TP_PROTO(struct intel_ring_buffer *ring), 363 TP_ARGS(ring) 364 ); 365 366 DEFINE_EVENT(i915_ring, i915_ring_wait_end, 367 TP_PROTO(struct intel_ring_buffer *ring), 368 TP_ARGS(ring) 369 ); 370 371 TRACE_EVENT(i915_flip_request, 372 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 373 374 TP_ARGS(plane, obj), 375 376 TP_STRUCT__entry( 377 __field(int, plane) 378 __field(struct drm_i915_gem_object *, obj) 379 ), 380 381 TP_fast_assign( 382 __entry->plane = plane; 383 __entry->obj = obj; 384 ), 385 386 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 387 ); 388 389 TRACE_EVENT(i915_flip_complete, 390 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 391 392 TP_ARGS(plane, obj), 393 394 TP_STRUCT__entry( 395 __field(int, plane) 396 __field(struct drm_i915_gem_object *, obj) 397 ), 398 399 TP_fast_assign( 400 __entry->plane = plane; 401 __entry->obj = obj; 402 ), 403 404 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 405 ); 406 407 TRACE_EVENT(i915_reg_rw, 408 TP_PROTO(bool write, u32 reg, u64 val, int len), 409 410 TP_ARGS(write, reg, val, len), 411 412 TP_STRUCT__entry( 413 __field(u64, val) 414 __field(u32, reg) 415 __field(u16, write) 416 __field(u16, len) 417 ), 418 419 TP_fast_assign( 420 __entry->val = (u64)val; 421 __entry->reg = reg; 422 __entry->write = write; 423 __entry->len = len; 424 ), 425 426 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 427 __entry->write ? "write" : "read", 428 __entry->reg, __entry->len, 429 (u32)(__entry->val & 0xffffffff), 430 (u32)(__entry->val >> 32)) 431 ); 432 433 TRACE_EVENT(intel_gpu_freq_change, 434 TP_PROTO(u32 freq), 435 TP_ARGS(freq), 436 437 TP_STRUCT__entry( 438 __field(u32, freq) 439 ), 440 441 TP_fast_assign( 442 __entry->freq = freq; 443 ), 444 445 TP_printk("new_freq=%u", __entry->freq) 446 ); 447 448 #endif /* _I915_TRACE_H_ */ 449 450 /* This part must be outside protection */ 451 #undef TRACE_INCLUDE_PATH 452 #define TRACE_INCLUDE_PATH . 453 #include <trace/define_trace.h> 454