xref: /linux/tools/perf/util/thread.c (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "util.h"
8 #include "debug.h"
9 
10 static struct thread *thread__new(pid_t pid)
11 {
12 	struct thread *self = zalloc(sizeof(*self));
13 
14 	if (self != NULL) {
15 		map_groups__init(&self->mg);
16 		self->pid = pid;
17 		self->comm = malloc(32);
18 		if (self->comm)
19 			snprintf(self->comm, 32, ":%d", self->pid);
20 	}
21 
22 	return self;
23 }
24 
25 void thread__delete(struct thread *self)
26 {
27 	map_groups__exit(&self->mg);
28 	free(self->comm);
29 	free(self);
30 }
31 
32 int thread__set_comm(struct thread *self, const char *comm)
33 {
34 	int err;
35 
36 	if (self->comm)
37 		free(self->comm);
38 	self->comm = strdup(comm);
39 	err = self->comm == NULL ? -ENOMEM : 0;
40 	if (!err) {
41 		self->comm_set = true;
42 		map_groups__flush(&self->mg);
43 	}
44 	return err;
45 }
46 
47 int thread__comm_len(struct thread *self)
48 {
49 	if (!self->comm_len) {
50 		if (!self->comm)
51 			return 0;
52 		self->comm_len = strlen(self->comm);
53 	}
54 
55 	return self->comm_len;
56 }
57 
58 static size_t thread__fprintf(struct thread *self, FILE *fp)
59 {
60 	return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
61 	       map_groups__fprintf(&self->mg, verbose, fp);
62 }
63 
64 struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
65 {
66 	struct rb_node **p = &self->threads.rb_node;
67 	struct rb_node *parent = NULL;
68 	struct thread *th;
69 
70 	/*
71 	 * Font-end cache - PID lookups come in blocks,
72 	 * so most of the time we dont have to look up
73 	 * the full rbtree:
74 	 */
75 	if (self->last_match && self->last_match->pid == pid)
76 		return self->last_match;
77 
78 	while (*p != NULL) {
79 		parent = *p;
80 		th = rb_entry(parent, struct thread, rb_node);
81 
82 		if (th->pid == pid) {
83 			self->last_match = th;
84 			return th;
85 		}
86 
87 		if (pid < th->pid)
88 			p = &(*p)->rb_left;
89 		else
90 			p = &(*p)->rb_right;
91 	}
92 
93 	th = thread__new(pid);
94 	if (th != NULL) {
95 		rb_link_node(&th->rb_node, parent, p);
96 		rb_insert_color(&th->rb_node, &self->threads);
97 		self->last_match = th;
98 	}
99 
100 	return th;
101 }
102 
103 void thread__insert_map(struct thread *self, struct map *map)
104 {
105 	map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
106 	map_groups__insert(&self->mg, map);
107 }
108 
109 int thread__fork(struct thread *self, struct thread *parent)
110 {
111 	int i;
112 
113 	if (parent->comm_set) {
114 		if (self->comm)
115 			free(self->comm);
116 		self->comm = strdup(parent->comm);
117 		if (!self->comm)
118 			return -ENOMEM;
119 		self->comm_set = true;
120 	}
121 
122 	for (i = 0; i < MAP__NR_TYPES; ++i)
123 		if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
124 			return -ENOMEM;
125 	return 0;
126 }
127 
128 size_t machine__fprintf(struct machine *machine, FILE *fp)
129 {
130 	size_t ret = 0;
131 	struct rb_node *nd;
132 
133 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
134 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
135 
136 		ret += thread__fprintf(pos, fp);
137 	}
138 
139 	return ret;
140 }
141