xref: /linux/drivers/android/binder.c (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/uaccess.h>
61 #include <linux/pid_namespace.h>
62 #include <linux/security.h>
63 #include <linux/spinlock.h>
64 #include <linux/ratelimit.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
67 
68 #include <uapi/linux/android/binder.h>
69 
70 #include <asm/cacheflush.h>
71 
72 #include "binder_alloc.h"
73 #include "binder_internal.h"
74 #include "binder_trace.h"
75 
76 static HLIST_HEAD(binder_deferred_list);
77 static DEFINE_MUTEX(binder_deferred_lock);
78 
79 static HLIST_HEAD(binder_devices);
80 static HLIST_HEAD(binder_procs);
81 static DEFINE_MUTEX(binder_procs_lock);
82 
83 static HLIST_HEAD(binder_dead_nodes);
84 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
85 
86 static struct dentry *binder_debugfs_dir_entry_root;
87 static struct dentry *binder_debugfs_dir_entry_proc;
88 static atomic_t binder_last_id;
89 
90 static int proc_show(struct seq_file *m, void *unused);
91 DEFINE_SHOW_ATTRIBUTE(proc);
92 
93 /* This is only defined in include/asm-arm/sizes.h */
94 #ifndef SZ_1K
95 #define SZ_1K                               0x400
96 #endif
97 
98 #ifndef SZ_4M
99 #define SZ_4M                               0x400000
100 #endif
101 
102 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
103 
104 enum {
105 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
106 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
107 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
108 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
109 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
110 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
111 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
112 	BINDER_DEBUG_USER_REFS              = 1U << 7,
113 	BINDER_DEBUG_THREADS                = 1U << 8,
114 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
115 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
116 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
117 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
118 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
119 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
120 };
121 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
124 
125 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126 module_param_named(devices, binder_devices_param, charp, 0444);
127 
128 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
129 static int binder_stop_on_user_error;
130 
131 static int binder_set_stop_on_user_error(const char *val,
132 					 const struct kernel_param *kp)
133 {
134 	int ret;
135 
136 	ret = param_set_int(val, kp);
137 	if (binder_stop_on_user_error < 2)
138 		wake_up(&binder_user_error_wait);
139 	return ret;
140 }
141 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
142 	param_get_int, &binder_stop_on_user_error, 0644);
143 
144 #define binder_debug(mask, x...) \
145 	do { \
146 		if (binder_debug_mask & mask) \
147 			pr_info_ratelimited(x); \
148 	} while (0)
149 
150 #define binder_user_error(x...) \
151 	do { \
152 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
153 			pr_info_ratelimited(x); \
154 		if (binder_stop_on_user_error) \
155 			binder_stop_on_user_error = 2; \
156 	} while (0)
157 
158 #define to_flat_binder_object(hdr) \
159 	container_of(hdr, struct flat_binder_object, hdr)
160 
161 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
162 
163 #define to_binder_buffer_object(hdr) \
164 	container_of(hdr, struct binder_buffer_object, hdr)
165 
166 #define to_binder_fd_array_object(hdr) \
167 	container_of(hdr, struct binder_fd_array_object, hdr)
168 
169 enum binder_stat_types {
170 	BINDER_STAT_PROC,
171 	BINDER_STAT_THREAD,
172 	BINDER_STAT_NODE,
173 	BINDER_STAT_REF,
174 	BINDER_STAT_DEATH,
175 	BINDER_STAT_TRANSACTION,
176 	BINDER_STAT_TRANSACTION_COMPLETE,
177 	BINDER_STAT_COUNT
178 };
179 
180 struct binder_stats {
181 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
182 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
183 	atomic_t obj_created[BINDER_STAT_COUNT];
184 	atomic_t obj_deleted[BINDER_STAT_COUNT];
185 };
186 
187 static struct binder_stats binder_stats;
188 
189 static inline void binder_stats_deleted(enum binder_stat_types type)
190 {
191 	atomic_inc(&binder_stats.obj_deleted[type]);
192 }
193 
194 static inline void binder_stats_created(enum binder_stat_types type)
195 {
196 	atomic_inc(&binder_stats.obj_created[type]);
197 }
198 
199 struct binder_transaction_log_entry {
200 	int debug_id;
201 	int debug_id_done;
202 	int call_type;
203 	int from_proc;
204 	int from_thread;
205 	int target_handle;
206 	int to_proc;
207 	int to_thread;
208 	int to_node;
209 	int data_size;
210 	int offsets_size;
211 	int return_error_line;
212 	uint32_t return_error;
213 	uint32_t return_error_param;
214 	const char *context_name;
215 };
216 struct binder_transaction_log {
217 	atomic_t cur;
218 	bool full;
219 	struct binder_transaction_log_entry entry[32];
220 };
221 static struct binder_transaction_log binder_transaction_log;
222 static struct binder_transaction_log binder_transaction_log_failed;
223 
224 static struct binder_transaction_log_entry *binder_transaction_log_add(
225 	struct binder_transaction_log *log)
226 {
227 	struct binder_transaction_log_entry *e;
228 	unsigned int cur = atomic_inc_return(&log->cur);
229 
230 	if (cur >= ARRAY_SIZE(log->entry))
231 		log->full = true;
232 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
233 	WRITE_ONCE(e->debug_id_done, 0);
234 	/*
235 	 * write-barrier to synchronize access to e->debug_id_done.
236 	 * We make sure the initialized 0 value is seen before
237 	 * memset() other fields are zeroed by memset.
238 	 */
239 	smp_wmb();
240 	memset(e, 0, sizeof(*e));
241 	return e;
242 }
243 
244 /**
245  * struct binder_work - work enqueued on a worklist
246  * @entry:             node enqueued on list
247  * @type:              type of work to be performed
248  *
249  * There are separate work lists for proc, thread, and node (async).
250  */
251 struct binder_work {
252 	struct list_head entry;
253 
254 	enum {
255 		BINDER_WORK_TRANSACTION = 1,
256 		BINDER_WORK_TRANSACTION_COMPLETE,
257 		BINDER_WORK_RETURN_ERROR,
258 		BINDER_WORK_NODE,
259 		BINDER_WORK_DEAD_BINDER,
260 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
261 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
262 	} type;
263 };
264 
265 struct binder_error {
266 	struct binder_work work;
267 	uint32_t cmd;
268 };
269 
270 /**
271  * struct binder_node - binder node bookkeeping
272  * @debug_id:             unique ID for debugging
273  *                        (invariant after initialized)
274  * @lock:                 lock for node fields
275  * @work:                 worklist element for node work
276  *                        (protected by @proc->inner_lock)
277  * @rb_node:              element for proc->nodes tree
278  *                        (protected by @proc->inner_lock)
279  * @dead_node:            element for binder_dead_nodes list
280  *                        (protected by binder_dead_nodes_lock)
281  * @proc:                 binder_proc that owns this node
282  *                        (invariant after initialized)
283  * @refs:                 list of references on this node
284  *                        (protected by @lock)
285  * @internal_strong_refs: used to take strong references when
286  *                        initiating a transaction
287  *                        (protected by @proc->inner_lock if @proc
288  *                        and by @lock)
289  * @local_weak_refs:      weak user refs from local process
290  *                        (protected by @proc->inner_lock if @proc
291  *                        and by @lock)
292  * @local_strong_refs:    strong user refs from local process
293  *                        (protected by @proc->inner_lock if @proc
294  *                        and by @lock)
295  * @tmp_refs:             temporary kernel refs
296  *                        (protected by @proc->inner_lock while @proc
297  *                        is valid, and by binder_dead_nodes_lock
298  *                        if @proc is NULL. During inc/dec and node release
299  *                        it is also protected by @lock to provide safety
300  *                        as the node dies and @proc becomes NULL)
301  * @ptr:                  userspace pointer for node
302  *                        (invariant, no lock needed)
303  * @cookie:               userspace cookie for node
304  *                        (invariant, no lock needed)
305  * @has_strong_ref:       userspace notified of strong ref
306  *                        (protected by @proc->inner_lock if @proc
307  *                        and by @lock)
308  * @pending_strong_ref:   userspace has acked notification of strong ref
309  *                        (protected by @proc->inner_lock if @proc
310  *                        and by @lock)
311  * @has_weak_ref:         userspace notified of weak ref
312  *                        (protected by @proc->inner_lock if @proc
313  *                        and by @lock)
314  * @pending_weak_ref:     userspace has acked notification of weak ref
315  *                        (protected by @proc->inner_lock if @proc
316  *                        and by @lock)
317  * @has_async_transaction: async transaction to node in progress
318  *                        (protected by @lock)
319  * @accept_fds:           file descriptor operations supported for node
320  *                        (invariant after initialized)
321  * @min_priority:         minimum scheduling priority
322  *                        (invariant after initialized)
323  * @txn_security_ctx:     require sender's security context
324  *                        (invariant after initialized)
325  * @async_todo:           list of async work items
326  *                        (protected by @proc->inner_lock)
327  *
328  * Bookkeeping structure for binder nodes.
329  */
330 struct binder_node {
331 	int debug_id;
332 	spinlock_t lock;
333 	struct binder_work work;
334 	union {
335 		struct rb_node rb_node;
336 		struct hlist_node dead_node;
337 	};
338 	struct binder_proc *proc;
339 	struct hlist_head refs;
340 	int internal_strong_refs;
341 	int local_weak_refs;
342 	int local_strong_refs;
343 	int tmp_refs;
344 	binder_uintptr_t ptr;
345 	binder_uintptr_t cookie;
346 	struct {
347 		/*
348 		 * bitfield elements protected by
349 		 * proc inner_lock
350 		 */
351 		u8 has_strong_ref:1;
352 		u8 pending_strong_ref:1;
353 		u8 has_weak_ref:1;
354 		u8 pending_weak_ref:1;
355 	};
356 	struct {
357 		/*
358 		 * invariant after initialization
359 		 */
360 		u8 accept_fds:1;
361 		u8 txn_security_ctx:1;
362 		u8 min_priority;
363 	};
364 	bool has_async_transaction;
365 	struct list_head async_todo;
366 };
367 
368 struct binder_ref_death {
369 	/**
370 	 * @work: worklist element for death notifications
371 	 *        (protected by inner_lock of the proc that
372 	 *        this ref belongs to)
373 	 */
374 	struct binder_work work;
375 	binder_uintptr_t cookie;
376 };
377 
378 /**
379  * struct binder_ref_data - binder_ref counts and id
380  * @debug_id:        unique ID for the ref
381  * @desc:            unique userspace handle for ref
382  * @strong:          strong ref count (debugging only if not locked)
383  * @weak:            weak ref count (debugging only if not locked)
384  *
385  * Structure to hold ref count and ref id information. Since
386  * the actual ref can only be accessed with a lock, this structure
387  * is used to return information about the ref to callers of
388  * ref inc/dec functions.
389  */
390 struct binder_ref_data {
391 	int debug_id;
392 	uint32_t desc;
393 	int strong;
394 	int weak;
395 };
396 
397 /**
398  * struct binder_ref - struct to track references on nodes
399  * @data:        binder_ref_data containing id, handle, and current refcounts
400  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
401  * @rb_node_node: node for lookup by @node in proc's rb_tree
402  * @node_entry:  list entry for node->refs list in target node
403  *               (protected by @node->lock)
404  * @proc:        binder_proc containing ref
405  * @node:        binder_node of target node. When cleaning up a
406  *               ref for deletion in binder_cleanup_ref, a non-NULL
407  *               @node indicates the node must be freed
408  * @death:       pointer to death notification (ref_death) if requested
409  *               (protected by @node->lock)
410  *
411  * Structure to track references from procA to target node (on procB). This
412  * structure is unsafe to access without holding @proc->outer_lock.
413  */
414 struct binder_ref {
415 	/* Lookups needed: */
416 	/*   node + proc => ref (transaction) */
417 	/*   desc + proc => ref (transaction, inc/dec ref) */
418 	/*   node => refs + procs (proc exit) */
419 	struct binder_ref_data data;
420 	struct rb_node rb_node_desc;
421 	struct rb_node rb_node_node;
422 	struct hlist_node node_entry;
423 	struct binder_proc *proc;
424 	struct binder_node *node;
425 	struct binder_ref_death *death;
426 };
427 
428 enum binder_deferred_state {
429 	BINDER_DEFERRED_FLUSH        = 0x01,
430 	BINDER_DEFERRED_RELEASE      = 0x02,
431 };
432 
433 /**
434  * struct binder_proc - binder process bookkeeping
435  * @proc_node:            element for binder_procs list
436  * @threads:              rbtree of binder_threads in this proc
437  *                        (protected by @inner_lock)
438  * @nodes:                rbtree of binder nodes associated with
439  *                        this proc ordered by node->ptr
440  *                        (protected by @inner_lock)
441  * @refs_by_desc:         rbtree of refs ordered by ref->desc
442  *                        (protected by @outer_lock)
443  * @refs_by_node:         rbtree of refs ordered by ref->node
444  *                        (protected by @outer_lock)
445  * @waiting_threads:      threads currently waiting for proc work
446  *                        (protected by @inner_lock)
447  * @pid                   PID of group_leader of process
448  *                        (invariant after initialized)
449  * @tsk                   task_struct for group_leader of process
450  *                        (invariant after initialized)
451  * @deferred_work_node:   element for binder_deferred_list
452  *                        (protected by binder_deferred_lock)
453  * @deferred_work:        bitmap of deferred work to perform
454  *                        (protected by binder_deferred_lock)
455  * @is_dead:              process is dead and awaiting free
456  *                        when outstanding transactions are cleaned up
457  *                        (protected by @inner_lock)
458  * @todo:                 list of work for this process
459  *                        (protected by @inner_lock)
460  * @stats:                per-process binder statistics
461  *                        (atomics, no lock needed)
462  * @delivered_death:      list of delivered death notification
463  *                        (protected by @inner_lock)
464  * @max_threads:          cap on number of binder threads
465  *                        (protected by @inner_lock)
466  * @requested_threads:    number of binder threads requested but not
467  *                        yet started. In current implementation, can
468  *                        only be 0 or 1.
469  *                        (protected by @inner_lock)
470  * @requested_threads_started: number binder threads started
471  *                        (protected by @inner_lock)
472  * @tmp_ref:              temporary reference to indicate proc is in use
473  *                        (protected by @inner_lock)
474  * @default_priority:     default scheduler priority
475  *                        (invariant after initialized)
476  * @debugfs_entry:        debugfs node
477  * @alloc:                binder allocator bookkeeping
478  * @context:              binder_context for this proc
479  *                        (invariant after initialized)
480  * @inner_lock:           can nest under outer_lock and/or node lock
481  * @outer_lock:           no nesting under innor or node lock
482  *                        Lock order: 1) outer, 2) node, 3) inner
483  *
484  * Bookkeeping structure for binder processes
485  */
486 struct binder_proc {
487 	struct hlist_node proc_node;
488 	struct rb_root threads;
489 	struct rb_root nodes;
490 	struct rb_root refs_by_desc;
491 	struct rb_root refs_by_node;
492 	struct list_head waiting_threads;
493 	int pid;
494 	struct task_struct *tsk;
495 	struct hlist_node deferred_work_node;
496 	int deferred_work;
497 	bool is_dead;
498 
499 	struct list_head todo;
500 	struct binder_stats stats;
501 	struct list_head delivered_death;
502 	int max_threads;
503 	int requested_threads;
504 	int requested_threads_started;
505 	int tmp_ref;
506 	long default_priority;
507 	struct dentry *debugfs_entry;
508 	struct binder_alloc alloc;
509 	struct binder_context *context;
510 	spinlock_t inner_lock;
511 	spinlock_t outer_lock;
512 };
513 
514 enum {
515 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
516 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
517 	BINDER_LOOPER_STATE_EXITED      = 0x04,
518 	BINDER_LOOPER_STATE_INVALID     = 0x08,
519 	BINDER_LOOPER_STATE_WAITING     = 0x10,
520 	BINDER_LOOPER_STATE_POLL        = 0x20,
521 };
522 
523 /**
524  * struct binder_thread - binder thread bookkeeping
525  * @proc:                 binder process for this thread
526  *                        (invariant after initialization)
527  * @rb_node:              element for proc->threads rbtree
528  *                        (protected by @proc->inner_lock)
529  * @waiting_thread_node:  element for @proc->waiting_threads list
530  *                        (protected by @proc->inner_lock)
531  * @pid:                  PID for this thread
532  *                        (invariant after initialization)
533  * @looper:               bitmap of looping state
534  *                        (only accessed by this thread)
535  * @looper_needs_return:  looping thread needs to exit driver
536  *                        (no lock needed)
537  * @transaction_stack:    stack of in-progress transactions for this thread
538  *                        (protected by @proc->inner_lock)
539  * @todo:                 list of work to do for this thread
540  *                        (protected by @proc->inner_lock)
541  * @process_todo:         whether work in @todo should be processed
542  *                        (protected by @proc->inner_lock)
543  * @return_error:         transaction errors reported by this thread
544  *                        (only accessed by this thread)
545  * @reply_error:          transaction errors reported by target thread
546  *                        (protected by @proc->inner_lock)
547  * @wait:                 wait queue for thread work
548  * @stats:                per-thread statistics
549  *                        (atomics, no lock needed)
550  * @tmp_ref:              temporary reference to indicate thread is in use
551  *                        (atomic since @proc->inner_lock cannot
552  *                        always be acquired)
553  * @is_dead:              thread is dead and awaiting free
554  *                        when outstanding transactions are cleaned up
555  *                        (protected by @proc->inner_lock)
556  *
557  * Bookkeeping structure for binder threads.
558  */
559 struct binder_thread {
560 	struct binder_proc *proc;
561 	struct rb_node rb_node;
562 	struct list_head waiting_thread_node;
563 	int pid;
564 	int looper;              /* only modified by this thread */
565 	bool looper_need_return; /* can be written by other thread */
566 	struct binder_transaction *transaction_stack;
567 	struct list_head todo;
568 	bool process_todo;
569 	struct binder_error return_error;
570 	struct binder_error reply_error;
571 	wait_queue_head_t wait;
572 	struct binder_stats stats;
573 	atomic_t tmp_ref;
574 	bool is_dead;
575 };
576 
577 /**
578  * struct binder_txn_fd_fixup - transaction fd fixup list element
579  * @fixup_entry:          list entry
580  * @file:                 struct file to be associated with new fd
581  * @offset:               offset in buffer data to this fixup
582  *
583  * List element for fd fixups in a transaction. Since file
584  * descriptors need to be allocated in the context of the
585  * target process, we pass each fd to be processed in this
586  * struct.
587  */
588 struct binder_txn_fd_fixup {
589 	struct list_head fixup_entry;
590 	struct file *file;
591 	size_t offset;
592 };
593 
594 struct binder_transaction {
595 	int debug_id;
596 	struct binder_work work;
597 	struct binder_thread *from;
598 	struct binder_transaction *from_parent;
599 	struct binder_proc *to_proc;
600 	struct binder_thread *to_thread;
601 	struct binder_transaction *to_parent;
602 	unsigned need_reply:1;
603 	/* unsigned is_dead:1; */	/* not used at the moment */
604 
605 	struct binder_buffer *buffer;
606 	unsigned int	code;
607 	unsigned int	flags;
608 	long	priority;
609 	long	saved_priority;
610 	kuid_t	sender_euid;
611 	struct list_head fd_fixups;
612 	binder_uintptr_t security_ctx;
613 	/**
614 	 * @lock:  protects @from, @to_proc, and @to_thread
615 	 *
616 	 * @from, @to_proc, and @to_thread can be set to NULL
617 	 * during thread teardown
618 	 */
619 	spinlock_t lock;
620 };
621 
622 /**
623  * struct binder_object - union of flat binder object types
624  * @hdr:   generic object header
625  * @fbo:   binder object (nodes and refs)
626  * @fdo:   file descriptor object
627  * @bbo:   binder buffer pointer
628  * @fdao:  file descriptor array
629  *
630  * Used for type-independent object copies
631  */
632 struct binder_object {
633 	union {
634 		struct binder_object_header hdr;
635 		struct flat_binder_object fbo;
636 		struct binder_fd_object fdo;
637 		struct binder_buffer_object bbo;
638 		struct binder_fd_array_object fdao;
639 	};
640 };
641 
642 /**
643  * binder_proc_lock() - Acquire outer lock for given binder_proc
644  * @proc:         struct binder_proc to acquire
645  *
646  * Acquires proc->outer_lock. Used to protect binder_ref
647  * structures associated with the given proc.
648  */
649 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
650 static void
651 _binder_proc_lock(struct binder_proc *proc, int line)
652 	__acquires(&proc->outer_lock)
653 {
654 	binder_debug(BINDER_DEBUG_SPINLOCKS,
655 		     "%s: line=%d\n", __func__, line);
656 	spin_lock(&proc->outer_lock);
657 }
658 
659 /**
660  * binder_proc_unlock() - Release spinlock for given binder_proc
661  * @proc:         struct binder_proc to acquire
662  *
663  * Release lock acquired via binder_proc_lock()
664  */
665 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
666 static void
667 _binder_proc_unlock(struct binder_proc *proc, int line)
668 	__releases(&proc->outer_lock)
669 {
670 	binder_debug(BINDER_DEBUG_SPINLOCKS,
671 		     "%s: line=%d\n", __func__, line);
672 	spin_unlock(&proc->outer_lock);
673 }
674 
675 /**
676  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
677  * @proc:         struct binder_proc to acquire
678  *
679  * Acquires proc->inner_lock. Used to protect todo lists
680  */
681 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
682 static void
683 _binder_inner_proc_lock(struct binder_proc *proc, int line)
684 	__acquires(&proc->inner_lock)
685 {
686 	binder_debug(BINDER_DEBUG_SPINLOCKS,
687 		     "%s: line=%d\n", __func__, line);
688 	spin_lock(&proc->inner_lock);
689 }
690 
691 /**
692  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
693  * @proc:         struct binder_proc to acquire
694  *
695  * Release lock acquired via binder_inner_proc_lock()
696  */
697 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
698 static void
699 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
700 	__releases(&proc->inner_lock)
701 {
702 	binder_debug(BINDER_DEBUG_SPINLOCKS,
703 		     "%s: line=%d\n", __func__, line);
704 	spin_unlock(&proc->inner_lock);
705 }
706 
707 /**
708  * binder_node_lock() - Acquire spinlock for given binder_node
709  * @node:         struct binder_node to acquire
710  *
711  * Acquires node->lock. Used to protect binder_node fields
712  */
713 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
714 static void
715 _binder_node_lock(struct binder_node *node, int line)
716 	__acquires(&node->lock)
717 {
718 	binder_debug(BINDER_DEBUG_SPINLOCKS,
719 		     "%s: line=%d\n", __func__, line);
720 	spin_lock(&node->lock);
721 }
722 
723 /**
724  * binder_node_unlock() - Release spinlock for given binder_proc
725  * @node:         struct binder_node to acquire
726  *
727  * Release lock acquired via binder_node_lock()
728  */
729 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
730 static void
731 _binder_node_unlock(struct binder_node *node, int line)
732 	__releases(&node->lock)
733 {
734 	binder_debug(BINDER_DEBUG_SPINLOCKS,
735 		     "%s: line=%d\n", __func__, line);
736 	spin_unlock(&node->lock);
737 }
738 
739 /**
740  * binder_node_inner_lock() - Acquire node and inner locks
741  * @node:         struct binder_node to acquire
742  *
743  * Acquires node->lock. If node->proc also acquires
744  * proc->inner_lock. Used to protect binder_node fields
745  */
746 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
747 static void
748 _binder_node_inner_lock(struct binder_node *node, int line)
749 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
750 {
751 	binder_debug(BINDER_DEBUG_SPINLOCKS,
752 		     "%s: line=%d\n", __func__, line);
753 	spin_lock(&node->lock);
754 	if (node->proc)
755 		binder_inner_proc_lock(node->proc);
756 	else
757 		/* annotation for sparse */
758 		__acquire(&node->proc->inner_lock);
759 }
760 
761 /**
762  * binder_node_unlock() - Release node and inner locks
763  * @node:         struct binder_node to acquire
764  *
765  * Release lock acquired via binder_node_lock()
766  */
767 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
768 static void
769 _binder_node_inner_unlock(struct binder_node *node, int line)
770 	__releases(&node->lock) __releases(&node->proc->inner_lock)
771 {
772 	struct binder_proc *proc = node->proc;
773 
774 	binder_debug(BINDER_DEBUG_SPINLOCKS,
775 		     "%s: line=%d\n", __func__, line);
776 	if (proc)
777 		binder_inner_proc_unlock(proc);
778 	else
779 		/* annotation for sparse */
780 		__release(&node->proc->inner_lock);
781 	spin_unlock(&node->lock);
782 }
783 
784 static bool binder_worklist_empty_ilocked(struct list_head *list)
785 {
786 	return list_empty(list);
787 }
788 
789 /**
790  * binder_worklist_empty() - Check if no items on the work list
791  * @proc:       binder_proc associated with list
792  * @list:	list to check
793  *
794  * Return: true if there are no items on list, else false
795  */
796 static bool binder_worklist_empty(struct binder_proc *proc,
797 				  struct list_head *list)
798 {
799 	bool ret;
800 
801 	binder_inner_proc_lock(proc);
802 	ret = binder_worklist_empty_ilocked(list);
803 	binder_inner_proc_unlock(proc);
804 	return ret;
805 }
806 
807 /**
808  * binder_enqueue_work_ilocked() - Add an item to the work list
809  * @work:         struct binder_work to add to list
810  * @target_list:  list to add work to
811  *
812  * Adds the work to the specified list. Asserts that work
813  * is not already on a list.
814  *
815  * Requires the proc->inner_lock to be held.
816  */
817 static void
818 binder_enqueue_work_ilocked(struct binder_work *work,
819 			   struct list_head *target_list)
820 {
821 	BUG_ON(target_list == NULL);
822 	BUG_ON(work->entry.next && !list_empty(&work->entry));
823 	list_add_tail(&work->entry, target_list);
824 }
825 
826 /**
827  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
828  * @thread:       thread to queue work to
829  * @work:         struct binder_work to add to list
830  *
831  * Adds the work to the todo list of the thread. Doesn't set the process_todo
832  * flag, which means that (if it wasn't already set) the thread will go to
833  * sleep without handling this work when it calls read.
834  *
835  * Requires the proc->inner_lock to be held.
836  */
837 static void
838 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
839 					    struct binder_work *work)
840 {
841 	WARN_ON(!list_empty(&thread->waiting_thread_node));
842 	binder_enqueue_work_ilocked(work, &thread->todo);
843 }
844 
845 /**
846  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
847  * @thread:       thread to queue work to
848  * @work:         struct binder_work to add to list
849  *
850  * Adds the work to the todo list of the thread, and enables processing
851  * of the todo queue.
852  *
853  * Requires the proc->inner_lock to be held.
854  */
855 static void
856 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
857 				   struct binder_work *work)
858 {
859 	WARN_ON(!list_empty(&thread->waiting_thread_node));
860 	binder_enqueue_work_ilocked(work, &thread->todo);
861 	thread->process_todo = true;
862 }
863 
864 /**
865  * binder_enqueue_thread_work() - Add an item to the thread work list
866  * @thread:       thread to queue work to
867  * @work:         struct binder_work to add to list
868  *
869  * Adds the work to the todo list of the thread, and enables processing
870  * of the todo queue.
871  */
872 static void
873 binder_enqueue_thread_work(struct binder_thread *thread,
874 			   struct binder_work *work)
875 {
876 	binder_inner_proc_lock(thread->proc);
877 	binder_enqueue_thread_work_ilocked(thread, work);
878 	binder_inner_proc_unlock(thread->proc);
879 }
880 
881 static void
882 binder_dequeue_work_ilocked(struct binder_work *work)
883 {
884 	list_del_init(&work->entry);
885 }
886 
887 /**
888  * binder_dequeue_work() - Removes an item from the work list
889  * @proc:         binder_proc associated with list
890  * @work:         struct binder_work to remove from list
891  *
892  * Removes the specified work item from whatever list it is on.
893  * Can safely be called if work is not on any list.
894  */
895 static void
896 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
897 {
898 	binder_inner_proc_lock(proc);
899 	binder_dequeue_work_ilocked(work);
900 	binder_inner_proc_unlock(proc);
901 }
902 
903 static struct binder_work *binder_dequeue_work_head_ilocked(
904 					struct list_head *list)
905 {
906 	struct binder_work *w;
907 
908 	w = list_first_entry_or_null(list, struct binder_work, entry);
909 	if (w)
910 		list_del_init(&w->entry);
911 	return w;
912 }
913 
914 /**
915  * binder_dequeue_work_head() - Dequeues the item at head of list
916  * @proc:         binder_proc associated with list
917  * @list:         list to dequeue head
918  *
919  * Removes the head of the list if there are items on the list
920  *
921  * Return: pointer dequeued binder_work, NULL if list was empty
922  */
923 static struct binder_work *binder_dequeue_work_head(
924 					struct binder_proc *proc,
925 					struct list_head *list)
926 {
927 	struct binder_work *w;
928 
929 	binder_inner_proc_lock(proc);
930 	w = binder_dequeue_work_head_ilocked(list);
931 	binder_inner_proc_unlock(proc);
932 	return w;
933 }
934 
935 static void
936 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
937 static void binder_free_thread(struct binder_thread *thread);
938 static void binder_free_proc(struct binder_proc *proc);
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
940 
941 static bool binder_has_work_ilocked(struct binder_thread *thread,
942 				    bool do_proc_work)
943 {
944 	return thread->process_todo ||
945 		thread->looper_need_return ||
946 		(do_proc_work &&
947 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
948 }
949 
950 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
951 {
952 	bool has_work;
953 
954 	binder_inner_proc_lock(thread->proc);
955 	has_work = binder_has_work_ilocked(thread, do_proc_work);
956 	binder_inner_proc_unlock(thread->proc);
957 
958 	return has_work;
959 }
960 
961 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
962 {
963 	return !thread->transaction_stack &&
964 		binder_worklist_empty_ilocked(&thread->todo) &&
965 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
966 				   BINDER_LOOPER_STATE_REGISTERED));
967 }
968 
969 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
970 					       bool sync)
971 {
972 	struct rb_node *n;
973 	struct binder_thread *thread;
974 
975 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
976 		thread = rb_entry(n, struct binder_thread, rb_node);
977 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
978 		    binder_available_for_proc_work_ilocked(thread)) {
979 			if (sync)
980 				wake_up_interruptible_sync(&thread->wait);
981 			else
982 				wake_up_interruptible(&thread->wait);
983 		}
984 	}
985 }
986 
987 /**
988  * binder_select_thread_ilocked() - selects a thread for doing proc work.
989  * @proc:	process to select a thread from
990  *
991  * Note that calling this function moves the thread off the waiting_threads
992  * list, so it can only be woken up by the caller of this function, or a
993  * signal. Therefore, callers *should* always wake up the thread this function
994  * returns.
995  *
996  * Return:	If there's a thread currently waiting for process work,
997  *		returns that thread. Otherwise returns NULL.
998  */
999 static struct binder_thread *
1000 binder_select_thread_ilocked(struct binder_proc *proc)
1001 {
1002 	struct binder_thread *thread;
1003 
1004 	assert_spin_locked(&proc->inner_lock);
1005 	thread = list_first_entry_or_null(&proc->waiting_threads,
1006 					  struct binder_thread,
1007 					  waiting_thread_node);
1008 
1009 	if (thread)
1010 		list_del_init(&thread->waiting_thread_node);
1011 
1012 	return thread;
1013 }
1014 
1015 /**
1016  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1017  * @proc:	process to wake up a thread in
1018  * @thread:	specific thread to wake-up (may be NULL)
1019  * @sync:	whether to do a synchronous wake-up
1020  *
1021  * This function wakes up a thread in the @proc process.
1022  * The caller may provide a specific thread to wake-up in
1023  * the @thread parameter. If @thread is NULL, this function
1024  * will wake up threads that have called poll().
1025  *
1026  * Note that for this function to work as expected, callers
1027  * should first call binder_select_thread() to find a thread
1028  * to handle the work (if they don't have a thread already),
1029  * and pass the result into the @thread parameter.
1030  */
1031 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1032 					 struct binder_thread *thread,
1033 					 bool sync)
1034 {
1035 	assert_spin_locked(&proc->inner_lock);
1036 
1037 	if (thread) {
1038 		if (sync)
1039 			wake_up_interruptible_sync(&thread->wait);
1040 		else
1041 			wake_up_interruptible(&thread->wait);
1042 		return;
1043 	}
1044 
1045 	/* Didn't find a thread waiting for proc work; this can happen
1046 	 * in two scenarios:
1047 	 * 1. All threads are busy handling transactions
1048 	 *    In that case, one of those threads should call back into
1049 	 *    the kernel driver soon and pick up this work.
1050 	 * 2. Threads are using the (e)poll interface, in which case
1051 	 *    they may be blocked on the waitqueue without having been
1052 	 *    added to waiting_threads. For this case, we just iterate
1053 	 *    over all threads not handling transaction work, and
1054 	 *    wake them all up. We wake all because we don't know whether
1055 	 *    a thread that called into (e)poll is handling non-binder
1056 	 *    work currently.
1057 	 */
1058 	binder_wakeup_poll_threads_ilocked(proc, sync);
1059 }
1060 
1061 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1062 {
1063 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1064 
1065 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1066 }
1067 
1068 static void binder_set_nice(long nice)
1069 {
1070 	long min_nice;
1071 
1072 	if (can_nice(current, nice)) {
1073 		set_user_nice(current, nice);
1074 		return;
1075 	}
1076 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1077 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1078 		     "%d: nice value %ld not allowed use %ld instead\n",
1079 		      current->pid, nice, min_nice);
1080 	set_user_nice(current, min_nice);
1081 	if (min_nice <= MAX_NICE)
1082 		return;
1083 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1084 }
1085 
1086 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1087 						   binder_uintptr_t ptr)
1088 {
1089 	struct rb_node *n = proc->nodes.rb_node;
1090 	struct binder_node *node;
1091 
1092 	assert_spin_locked(&proc->inner_lock);
1093 
1094 	while (n) {
1095 		node = rb_entry(n, struct binder_node, rb_node);
1096 
1097 		if (ptr < node->ptr)
1098 			n = n->rb_left;
1099 		else if (ptr > node->ptr)
1100 			n = n->rb_right;
1101 		else {
1102 			/*
1103 			 * take an implicit weak reference
1104 			 * to ensure node stays alive until
1105 			 * call to binder_put_node()
1106 			 */
1107 			binder_inc_node_tmpref_ilocked(node);
1108 			return node;
1109 		}
1110 	}
1111 	return NULL;
1112 }
1113 
1114 static struct binder_node *binder_get_node(struct binder_proc *proc,
1115 					   binder_uintptr_t ptr)
1116 {
1117 	struct binder_node *node;
1118 
1119 	binder_inner_proc_lock(proc);
1120 	node = binder_get_node_ilocked(proc, ptr);
1121 	binder_inner_proc_unlock(proc);
1122 	return node;
1123 }
1124 
1125 static struct binder_node *binder_init_node_ilocked(
1126 						struct binder_proc *proc,
1127 						struct binder_node *new_node,
1128 						struct flat_binder_object *fp)
1129 {
1130 	struct rb_node **p = &proc->nodes.rb_node;
1131 	struct rb_node *parent = NULL;
1132 	struct binder_node *node;
1133 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1134 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1135 	__u32 flags = fp ? fp->flags : 0;
1136 
1137 	assert_spin_locked(&proc->inner_lock);
1138 
1139 	while (*p) {
1140 
1141 		parent = *p;
1142 		node = rb_entry(parent, struct binder_node, rb_node);
1143 
1144 		if (ptr < node->ptr)
1145 			p = &(*p)->rb_left;
1146 		else if (ptr > node->ptr)
1147 			p = &(*p)->rb_right;
1148 		else {
1149 			/*
1150 			 * A matching node is already in
1151 			 * the rb tree. Abandon the init
1152 			 * and return it.
1153 			 */
1154 			binder_inc_node_tmpref_ilocked(node);
1155 			return node;
1156 		}
1157 	}
1158 	node = new_node;
1159 	binder_stats_created(BINDER_STAT_NODE);
1160 	node->tmp_refs++;
1161 	rb_link_node(&node->rb_node, parent, p);
1162 	rb_insert_color(&node->rb_node, &proc->nodes);
1163 	node->debug_id = atomic_inc_return(&binder_last_id);
1164 	node->proc = proc;
1165 	node->ptr = ptr;
1166 	node->cookie = cookie;
1167 	node->work.type = BINDER_WORK_NODE;
1168 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1169 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1170 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1171 	spin_lock_init(&node->lock);
1172 	INIT_LIST_HEAD(&node->work.entry);
1173 	INIT_LIST_HEAD(&node->async_todo);
1174 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175 		     "%d:%d node %d u%016llx c%016llx created\n",
1176 		     proc->pid, current->pid, node->debug_id,
1177 		     (u64)node->ptr, (u64)node->cookie);
1178 
1179 	return node;
1180 }
1181 
1182 static struct binder_node *binder_new_node(struct binder_proc *proc,
1183 					   struct flat_binder_object *fp)
1184 {
1185 	struct binder_node *node;
1186 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1187 
1188 	if (!new_node)
1189 		return NULL;
1190 	binder_inner_proc_lock(proc);
1191 	node = binder_init_node_ilocked(proc, new_node, fp);
1192 	binder_inner_proc_unlock(proc);
1193 	if (node != new_node)
1194 		/*
1195 		 * The node was already added by another thread
1196 		 */
1197 		kfree(new_node);
1198 
1199 	return node;
1200 }
1201 
1202 static void binder_free_node(struct binder_node *node)
1203 {
1204 	kfree(node);
1205 	binder_stats_deleted(BINDER_STAT_NODE);
1206 }
1207 
1208 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1209 				    int internal,
1210 				    struct list_head *target_list)
1211 {
1212 	struct binder_proc *proc = node->proc;
1213 
1214 	assert_spin_locked(&node->lock);
1215 	if (proc)
1216 		assert_spin_locked(&proc->inner_lock);
1217 	if (strong) {
1218 		if (internal) {
1219 			if (target_list == NULL &&
1220 			    node->internal_strong_refs == 0 &&
1221 			    !(node->proc &&
1222 			      node == node->proc->context->binder_context_mgr_node &&
1223 			      node->has_strong_ref)) {
1224 				pr_err("invalid inc strong node for %d\n",
1225 					node->debug_id);
1226 				return -EINVAL;
1227 			}
1228 			node->internal_strong_refs++;
1229 		} else
1230 			node->local_strong_refs++;
1231 		if (!node->has_strong_ref && target_list) {
1232 			struct binder_thread *thread = container_of(target_list,
1233 						    struct binder_thread, todo);
1234 			binder_dequeue_work_ilocked(&node->work);
1235 			BUG_ON(&thread->todo != target_list);
1236 			binder_enqueue_deferred_thread_work_ilocked(thread,
1237 								   &node->work);
1238 		}
1239 	} else {
1240 		if (!internal)
1241 			node->local_weak_refs++;
1242 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1243 			if (target_list == NULL) {
1244 				pr_err("invalid inc weak node for %d\n",
1245 					node->debug_id);
1246 				return -EINVAL;
1247 			}
1248 			/*
1249 			 * See comment above
1250 			 */
1251 			binder_enqueue_work_ilocked(&node->work, target_list);
1252 		}
1253 	}
1254 	return 0;
1255 }
1256 
1257 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1258 			   struct list_head *target_list)
1259 {
1260 	int ret;
1261 
1262 	binder_node_inner_lock(node);
1263 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1264 	binder_node_inner_unlock(node);
1265 
1266 	return ret;
1267 }
1268 
1269 static bool binder_dec_node_nilocked(struct binder_node *node,
1270 				     int strong, int internal)
1271 {
1272 	struct binder_proc *proc = node->proc;
1273 
1274 	assert_spin_locked(&node->lock);
1275 	if (proc)
1276 		assert_spin_locked(&proc->inner_lock);
1277 	if (strong) {
1278 		if (internal)
1279 			node->internal_strong_refs--;
1280 		else
1281 			node->local_strong_refs--;
1282 		if (node->local_strong_refs || node->internal_strong_refs)
1283 			return false;
1284 	} else {
1285 		if (!internal)
1286 			node->local_weak_refs--;
1287 		if (node->local_weak_refs || node->tmp_refs ||
1288 				!hlist_empty(&node->refs))
1289 			return false;
1290 	}
1291 
1292 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1293 		if (list_empty(&node->work.entry)) {
1294 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1295 			binder_wakeup_proc_ilocked(proc);
1296 		}
1297 	} else {
1298 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1299 		    !node->local_weak_refs && !node->tmp_refs) {
1300 			if (proc) {
1301 				binder_dequeue_work_ilocked(&node->work);
1302 				rb_erase(&node->rb_node, &proc->nodes);
1303 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1304 					     "refless node %d deleted\n",
1305 					     node->debug_id);
1306 			} else {
1307 				BUG_ON(!list_empty(&node->work.entry));
1308 				spin_lock(&binder_dead_nodes_lock);
1309 				/*
1310 				 * tmp_refs could have changed so
1311 				 * check it again
1312 				 */
1313 				if (node->tmp_refs) {
1314 					spin_unlock(&binder_dead_nodes_lock);
1315 					return false;
1316 				}
1317 				hlist_del(&node->dead_node);
1318 				spin_unlock(&binder_dead_nodes_lock);
1319 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1320 					     "dead node %d deleted\n",
1321 					     node->debug_id);
1322 			}
1323 			return true;
1324 		}
1325 	}
1326 	return false;
1327 }
1328 
1329 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1330 {
1331 	bool free_node;
1332 
1333 	binder_node_inner_lock(node);
1334 	free_node = binder_dec_node_nilocked(node, strong, internal);
1335 	binder_node_inner_unlock(node);
1336 	if (free_node)
1337 		binder_free_node(node);
1338 }
1339 
1340 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1341 {
1342 	/*
1343 	 * No call to binder_inc_node() is needed since we
1344 	 * don't need to inform userspace of any changes to
1345 	 * tmp_refs
1346 	 */
1347 	node->tmp_refs++;
1348 }
1349 
1350 /**
1351  * binder_inc_node_tmpref() - take a temporary reference on node
1352  * @node:	node to reference
1353  *
1354  * Take reference on node to prevent the node from being freed
1355  * while referenced only by a local variable. The inner lock is
1356  * needed to serialize with the node work on the queue (which
1357  * isn't needed after the node is dead). If the node is dead
1358  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1359  * node->tmp_refs against dead-node-only cases where the node
1360  * lock cannot be acquired (eg traversing the dead node list to
1361  * print nodes)
1362  */
1363 static void binder_inc_node_tmpref(struct binder_node *node)
1364 {
1365 	binder_node_lock(node);
1366 	if (node->proc)
1367 		binder_inner_proc_lock(node->proc);
1368 	else
1369 		spin_lock(&binder_dead_nodes_lock);
1370 	binder_inc_node_tmpref_ilocked(node);
1371 	if (node->proc)
1372 		binder_inner_proc_unlock(node->proc);
1373 	else
1374 		spin_unlock(&binder_dead_nodes_lock);
1375 	binder_node_unlock(node);
1376 }
1377 
1378 /**
1379  * binder_dec_node_tmpref() - remove a temporary reference on node
1380  * @node:	node to reference
1381  *
1382  * Release temporary reference on node taken via binder_inc_node_tmpref()
1383  */
1384 static void binder_dec_node_tmpref(struct binder_node *node)
1385 {
1386 	bool free_node;
1387 
1388 	binder_node_inner_lock(node);
1389 	if (!node->proc)
1390 		spin_lock(&binder_dead_nodes_lock);
1391 	else
1392 		__acquire(&binder_dead_nodes_lock);
1393 	node->tmp_refs--;
1394 	BUG_ON(node->tmp_refs < 0);
1395 	if (!node->proc)
1396 		spin_unlock(&binder_dead_nodes_lock);
1397 	else
1398 		__release(&binder_dead_nodes_lock);
1399 	/*
1400 	 * Call binder_dec_node() to check if all refcounts are 0
1401 	 * and cleanup is needed. Calling with strong=0 and internal=1
1402 	 * causes no actual reference to be released in binder_dec_node().
1403 	 * If that changes, a change is needed here too.
1404 	 */
1405 	free_node = binder_dec_node_nilocked(node, 0, 1);
1406 	binder_node_inner_unlock(node);
1407 	if (free_node)
1408 		binder_free_node(node);
1409 }
1410 
1411 static void binder_put_node(struct binder_node *node)
1412 {
1413 	binder_dec_node_tmpref(node);
1414 }
1415 
1416 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1417 						 u32 desc, bool need_strong_ref)
1418 {
1419 	struct rb_node *n = proc->refs_by_desc.rb_node;
1420 	struct binder_ref *ref;
1421 
1422 	while (n) {
1423 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1424 
1425 		if (desc < ref->data.desc) {
1426 			n = n->rb_left;
1427 		} else if (desc > ref->data.desc) {
1428 			n = n->rb_right;
1429 		} else if (need_strong_ref && !ref->data.strong) {
1430 			binder_user_error("tried to use weak ref as strong ref\n");
1431 			return NULL;
1432 		} else {
1433 			return ref;
1434 		}
1435 	}
1436 	return NULL;
1437 }
1438 
1439 /**
1440  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1441  * @proc:	binder_proc that owns the ref
1442  * @node:	binder_node of target
1443  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1444  *
1445  * Look up the ref for the given node and return it if it exists
1446  *
1447  * If it doesn't exist and the caller provides a newly allocated
1448  * ref, initialize the fields of the newly allocated ref and insert
1449  * into the given proc rb_trees and node refs list.
1450  *
1451  * Return:	the ref for node. It is possible that another thread
1452  *		allocated/initialized the ref first in which case the
1453  *		returned ref would be different than the passed-in
1454  *		new_ref. new_ref must be kfree'd by the caller in
1455  *		this case.
1456  */
1457 static struct binder_ref *binder_get_ref_for_node_olocked(
1458 					struct binder_proc *proc,
1459 					struct binder_node *node,
1460 					struct binder_ref *new_ref)
1461 {
1462 	struct binder_context *context = proc->context;
1463 	struct rb_node **p = &proc->refs_by_node.rb_node;
1464 	struct rb_node *parent = NULL;
1465 	struct binder_ref *ref;
1466 	struct rb_node *n;
1467 
1468 	while (*p) {
1469 		parent = *p;
1470 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1471 
1472 		if (node < ref->node)
1473 			p = &(*p)->rb_left;
1474 		else if (node > ref->node)
1475 			p = &(*p)->rb_right;
1476 		else
1477 			return ref;
1478 	}
1479 	if (!new_ref)
1480 		return NULL;
1481 
1482 	binder_stats_created(BINDER_STAT_REF);
1483 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1484 	new_ref->proc = proc;
1485 	new_ref->node = node;
1486 	rb_link_node(&new_ref->rb_node_node, parent, p);
1487 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1488 
1489 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1490 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1491 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1492 		if (ref->data.desc > new_ref->data.desc)
1493 			break;
1494 		new_ref->data.desc = ref->data.desc + 1;
1495 	}
1496 
1497 	p = &proc->refs_by_desc.rb_node;
1498 	while (*p) {
1499 		parent = *p;
1500 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1501 
1502 		if (new_ref->data.desc < ref->data.desc)
1503 			p = &(*p)->rb_left;
1504 		else if (new_ref->data.desc > ref->data.desc)
1505 			p = &(*p)->rb_right;
1506 		else
1507 			BUG();
1508 	}
1509 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1510 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1511 
1512 	binder_node_lock(node);
1513 	hlist_add_head(&new_ref->node_entry, &node->refs);
1514 
1515 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1516 		     "%d new ref %d desc %d for node %d\n",
1517 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1518 		      node->debug_id);
1519 	binder_node_unlock(node);
1520 	return new_ref;
1521 }
1522 
1523 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1524 {
1525 	bool delete_node = false;
1526 
1527 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1528 		     "%d delete ref %d desc %d for node %d\n",
1529 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1530 		      ref->node->debug_id);
1531 
1532 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1533 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1534 
1535 	binder_node_inner_lock(ref->node);
1536 	if (ref->data.strong)
1537 		binder_dec_node_nilocked(ref->node, 1, 1);
1538 
1539 	hlist_del(&ref->node_entry);
1540 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1541 	binder_node_inner_unlock(ref->node);
1542 	/*
1543 	 * Clear ref->node unless we want the caller to free the node
1544 	 */
1545 	if (!delete_node) {
1546 		/*
1547 		 * The caller uses ref->node to determine
1548 		 * whether the node needs to be freed. Clear
1549 		 * it since the node is still alive.
1550 		 */
1551 		ref->node = NULL;
1552 	}
1553 
1554 	if (ref->death) {
1555 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1556 			     "%d delete ref %d desc %d has death notification\n",
1557 			      ref->proc->pid, ref->data.debug_id,
1558 			      ref->data.desc);
1559 		binder_dequeue_work(ref->proc, &ref->death->work);
1560 		binder_stats_deleted(BINDER_STAT_DEATH);
1561 	}
1562 	binder_stats_deleted(BINDER_STAT_REF);
1563 }
1564 
1565 /**
1566  * binder_inc_ref_olocked() - increment the ref for given handle
1567  * @ref:         ref to be incremented
1568  * @strong:      if true, strong increment, else weak
1569  * @target_list: list to queue node work on
1570  *
1571  * Increment the ref. @ref->proc->outer_lock must be held on entry
1572  *
1573  * Return: 0, if successful, else errno
1574  */
1575 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1576 				  struct list_head *target_list)
1577 {
1578 	int ret;
1579 
1580 	if (strong) {
1581 		if (ref->data.strong == 0) {
1582 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1583 			if (ret)
1584 				return ret;
1585 		}
1586 		ref->data.strong++;
1587 	} else {
1588 		if (ref->data.weak == 0) {
1589 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1590 			if (ret)
1591 				return ret;
1592 		}
1593 		ref->data.weak++;
1594 	}
1595 	return 0;
1596 }
1597 
1598 /**
1599  * binder_dec_ref() - dec the ref for given handle
1600  * @ref:	ref to be decremented
1601  * @strong:	if true, strong decrement, else weak
1602  *
1603  * Decrement the ref.
1604  *
1605  * Return: true if ref is cleaned up and ready to be freed
1606  */
1607 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1608 {
1609 	if (strong) {
1610 		if (ref->data.strong == 0) {
1611 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1612 					  ref->proc->pid, ref->data.debug_id,
1613 					  ref->data.desc, ref->data.strong,
1614 					  ref->data.weak);
1615 			return false;
1616 		}
1617 		ref->data.strong--;
1618 		if (ref->data.strong == 0)
1619 			binder_dec_node(ref->node, strong, 1);
1620 	} else {
1621 		if (ref->data.weak == 0) {
1622 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1623 					  ref->proc->pid, ref->data.debug_id,
1624 					  ref->data.desc, ref->data.strong,
1625 					  ref->data.weak);
1626 			return false;
1627 		}
1628 		ref->data.weak--;
1629 	}
1630 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1631 		binder_cleanup_ref_olocked(ref);
1632 		return true;
1633 	}
1634 	return false;
1635 }
1636 
1637 /**
1638  * binder_get_node_from_ref() - get the node from the given proc/desc
1639  * @proc:	proc containing the ref
1640  * @desc:	the handle associated with the ref
1641  * @need_strong_ref: if true, only return node if ref is strong
1642  * @rdata:	the id/refcount data for the ref
1643  *
1644  * Given a proc and ref handle, return the associated binder_node
1645  *
1646  * Return: a binder_node or NULL if not found or not strong when strong required
1647  */
1648 static struct binder_node *binder_get_node_from_ref(
1649 		struct binder_proc *proc,
1650 		u32 desc, bool need_strong_ref,
1651 		struct binder_ref_data *rdata)
1652 {
1653 	struct binder_node *node;
1654 	struct binder_ref *ref;
1655 
1656 	binder_proc_lock(proc);
1657 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1658 	if (!ref)
1659 		goto err_no_ref;
1660 	node = ref->node;
1661 	/*
1662 	 * Take an implicit reference on the node to ensure
1663 	 * it stays alive until the call to binder_put_node()
1664 	 */
1665 	binder_inc_node_tmpref(node);
1666 	if (rdata)
1667 		*rdata = ref->data;
1668 	binder_proc_unlock(proc);
1669 
1670 	return node;
1671 
1672 err_no_ref:
1673 	binder_proc_unlock(proc);
1674 	return NULL;
1675 }
1676 
1677 /**
1678  * binder_free_ref() - free the binder_ref
1679  * @ref:	ref to free
1680  *
1681  * Free the binder_ref. Free the binder_node indicated by ref->node
1682  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1683  */
1684 static void binder_free_ref(struct binder_ref *ref)
1685 {
1686 	if (ref->node)
1687 		binder_free_node(ref->node);
1688 	kfree(ref->death);
1689 	kfree(ref);
1690 }
1691 
1692 /**
1693  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1694  * @proc:	proc containing the ref
1695  * @desc:	the handle associated with the ref
1696  * @increment:	true=inc reference, false=dec reference
1697  * @strong:	true=strong reference, false=weak reference
1698  * @rdata:	the id/refcount data for the ref
1699  *
1700  * Given a proc and ref handle, increment or decrement the ref
1701  * according to "increment" arg.
1702  *
1703  * Return: 0 if successful, else errno
1704  */
1705 static int binder_update_ref_for_handle(struct binder_proc *proc,
1706 		uint32_t desc, bool increment, bool strong,
1707 		struct binder_ref_data *rdata)
1708 {
1709 	int ret = 0;
1710 	struct binder_ref *ref;
1711 	bool delete_ref = false;
1712 
1713 	binder_proc_lock(proc);
1714 	ref = binder_get_ref_olocked(proc, desc, strong);
1715 	if (!ref) {
1716 		ret = -EINVAL;
1717 		goto err_no_ref;
1718 	}
1719 	if (increment)
1720 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1721 	else
1722 		delete_ref = binder_dec_ref_olocked(ref, strong);
1723 
1724 	if (rdata)
1725 		*rdata = ref->data;
1726 	binder_proc_unlock(proc);
1727 
1728 	if (delete_ref)
1729 		binder_free_ref(ref);
1730 	return ret;
1731 
1732 err_no_ref:
1733 	binder_proc_unlock(proc);
1734 	return ret;
1735 }
1736 
1737 /**
1738  * binder_dec_ref_for_handle() - dec the ref for given handle
1739  * @proc:	proc containing the ref
1740  * @desc:	the handle associated with the ref
1741  * @strong:	true=strong reference, false=weak reference
1742  * @rdata:	the id/refcount data for the ref
1743  *
1744  * Just calls binder_update_ref_for_handle() to decrement the ref.
1745  *
1746  * Return: 0 if successful, else errno
1747  */
1748 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1749 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1750 {
1751 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1752 }
1753 
1754 
1755 /**
1756  * binder_inc_ref_for_node() - increment the ref for given proc/node
1757  * @proc:	 proc containing the ref
1758  * @node:	 target node
1759  * @strong:	 true=strong reference, false=weak reference
1760  * @target_list: worklist to use if node is incremented
1761  * @rdata:	 the id/refcount data for the ref
1762  *
1763  * Given a proc and node, increment the ref. Create the ref if it
1764  * doesn't already exist
1765  *
1766  * Return: 0 if successful, else errno
1767  */
1768 static int binder_inc_ref_for_node(struct binder_proc *proc,
1769 			struct binder_node *node,
1770 			bool strong,
1771 			struct list_head *target_list,
1772 			struct binder_ref_data *rdata)
1773 {
1774 	struct binder_ref *ref;
1775 	struct binder_ref *new_ref = NULL;
1776 	int ret = 0;
1777 
1778 	binder_proc_lock(proc);
1779 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1780 	if (!ref) {
1781 		binder_proc_unlock(proc);
1782 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1783 		if (!new_ref)
1784 			return -ENOMEM;
1785 		binder_proc_lock(proc);
1786 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1787 	}
1788 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1789 	*rdata = ref->data;
1790 	binder_proc_unlock(proc);
1791 	if (new_ref && ref != new_ref)
1792 		/*
1793 		 * Another thread created the ref first so
1794 		 * free the one we allocated
1795 		 */
1796 		kfree(new_ref);
1797 	return ret;
1798 }
1799 
1800 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1801 					   struct binder_transaction *t)
1802 {
1803 	BUG_ON(!target_thread);
1804 	assert_spin_locked(&target_thread->proc->inner_lock);
1805 	BUG_ON(target_thread->transaction_stack != t);
1806 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1807 	target_thread->transaction_stack =
1808 		target_thread->transaction_stack->from_parent;
1809 	t->from = NULL;
1810 }
1811 
1812 /**
1813  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1814  * @thread:	thread to decrement
1815  *
1816  * A thread needs to be kept alive while being used to create or
1817  * handle a transaction. binder_get_txn_from() is used to safely
1818  * extract t->from from a binder_transaction and keep the thread
1819  * indicated by t->from from being freed. When done with that
1820  * binder_thread, this function is called to decrement the
1821  * tmp_ref and free if appropriate (thread has been released
1822  * and no transaction being processed by the driver)
1823  */
1824 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1825 {
1826 	/*
1827 	 * atomic is used to protect the counter value while
1828 	 * it cannot reach zero or thread->is_dead is false
1829 	 */
1830 	binder_inner_proc_lock(thread->proc);
1831 	atomic_dec(&thread->tmp_ref);
1832 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1833 		binder_inner_proc_unlock(thread->proc);
1834 		binder_free_thread(thread);
1835 		return;
1836 	}
1837 	binder_inner_proc_unlock(thread->proc);
1838 }
1839 
1840 /**
1841  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1842  * @proc:	proc to decrement
1843  *
1844  * A binder_proc needs to be kept alive while being used to create or
1845  * handle a transaction. proc->tmp_ref is incremented when
1846  * creating a new transaction or the binder_proc is currently in-use
1847  * by threads that are being released. When done with the binder_proc,
1848  * this function is called to decrement the counter and free the
1849  * proc if appropriate (proc has been released, all threads have
1850  * been released and not currenly in-use to process a transaction).
1851  */
1852 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1853 {
1854 	binder_inner_proc_lock(proc);
1855 	proc->tmp_ref--;
1856 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1857 			!proc->tmp_ref) {
1858 		binder_inner_proc_unlock(proc);
1859 		binder_free_proc(proc);
1860 		return;
1861 	}
1862 	binder_inner_proc_unlock(proc);
1863 }
1864 
1865 /**
1866  * binder_get_txn_from() - safely extract the "from" thread in transaction
1867  * @t:	binder transaction for t->from
1868  *
1869  * Atomically return the "from" thread and increment the tmp_ref
1870  * count for the thread to ensure it stays alive until
1871  * binder_thread_dec_tmpref() is called.
1872  *
1873  * Return: the value of t->from
1874  */
1875 static struct binder_thread *binder_get_txn_from(
1876 		struct binder_transaction *t)
1877 {
1878 	struct binder_thread *from;
1879 
1880 	spin_lock(&t->lock);
1881 	from = t->from;
1882 	if (from)
1883 		atomic_inc(&from->tmp_ref);
1884 	spin_unlock(&t->lock);
1885 	return from;
1886 }
1887 
1888 /**
1889  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1890  * @t:	binder transaction for t->from
1891  *
1892  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1893  * to guarantee that the thread cannot be released while operating on it.
1894  * The caller must call binder_inner_proc_unlock() to release the inner lock
1895  * as well as call binder_dec_thread_txn() to release the reference.
1896  *
1897  * Return: the value of t->from
1898  */
1899 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1900 		struct binder_transaction *t)
1901 	__acquires(&t->from->proc->inner_lock)
1902 {
1903 	struct binder_thread *from;
1904 
1905 	from = binder_get_txn_from(t);
1906 	if (!from) {
1907 		__acquire(&from->proc->inner_lock);
1908 		return NULL;
1909 	}
1910 	binder_inner_proc_lock(from->proc);
1911 	if (t->from) {
1912 		BUG_ON(from != t->from);
1913 		return from;
1914 	}
1915 	binder_inner_proc_unlock(from->proc);
1916 	__acquire(&from->proc->inner_lock);
1917 	binder_thread_dec_tmpref(from);
1918 	return NULL;
1919 }
1920 
1921 /**
1922  * binder_free_txn_fixups() - free unprocessed fd fixups
1923  * @t:	binder transaction for t->from
1924  *
1925  * If the transaction is being torn down prior to being
1926  * processed by the target process, free all of the
1927  * fd fixups and fput the file structs. It is safe to
1928  * call this function after the fixups have been
1929  * processed -- in that case, the list will be empty.
1930  */
1931 static void binder_free_txn_fixups(struct binder_transaction *t)
1932 {
1933 	struct binder_txn_fd_fixup *fixup, *tmp;
1934 
1935 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1936 		fput(fixup->file);
1937 		list_del(&fixup->fixup_entry);
1938 		kfree(fixup);
1939 	}
1940 }
1941 
1942 static void binder_free_transaction(struct binder_transaction *t)
1943 {
1944 	if (t->buffer)
1945 		t->buffer->transaction = NULL;
1946 	binder_free_txn_fixups(t);
1947 	kfree(t);
1948 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1949 }
1950 
1951 static void binder_send_failed_reply(struct binder_transaction *t,
1952 				     uint32_t error_code)
1953 {
1954 	struct binder_thread *target_thread;
1955 	struct binder_transaction *next;
1956 
1957 	BUG_ON(t->flags & TF_ONE_WAY);
1958 	while (1) {
1959 		target_thread = binder_get_txn_from_and_acq_inner(t);
1960 		if (target_thread) {
1961 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1962 				     "send failed reply for transaction %d to %d:%d\n",
1963 				      t->debug_id,
1964 				      target_thread->proc->pid,
1965 				      target_thread->pid);
1966 
1967 			binder_pop_transaction_ilocked(target_thread, t);
1968 			if (target_thread->reply_error.cmd == BR_OK) {
1969 				target_thread->reply_error.cmd = error_code;
1970 				binder_enqueue_thread_work_ilocked(
1971 					target_thread,
1972 					&target_thread->reply_error.work);
1973 				wake_up_interruptible(&target_thread->wait);
1974 			} else {
1975 				/*
1976 				 * Cannot get here for normal operation, but
1977 				 * we can if multiple synchronous transactions
1978 				 * are sent without blocking for responses.
1979 				 * Just ignore the 2nd error in this case.
1980 				 */
1981 				pr_warn("Unexpected reply error: %u\n",
1982 					target_thread->reply_error.cmd);
1983 			}
1984 			binder_inner_proc_unlock(target_thread->proc);
1985 			binder_thread_dec_tmpref(target_thread);
1986 			binder_free_transaction(t);
1987 			return;
1988 		} else {
1989 			__release(&target_thread->proc->inner_lock);
1990 		}
1991 		next = t->from_parent;
1992 
1993 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1994 			     "send failed reply for transaction %d, target dead\n",
1995 			     t->debug_id);
1996 
1997 		binder_free_transaction(t);
1998 		if (next == NULL) {
1999 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2000 				     "reply failed, no target thread at root\n");
2001 			return;
2002 		}
2003 		t = next;
2004 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2005 			     "reply failed, no target thread -- retry %d\n",
2006 			      t->debug_id);
2007 	}
2008 }
2009 
2010 /**
2011  * binder_cleanup_transaction() - cleans up undelivered transaction
2012  * @t:		transaction that needs to be cleaned up
2013  * @reason:	reason the transaction wasn't delivered
2014  * @error_code:	error to return to caller (if synchronous call)
2015  */
2016 static void binder_cleanup_transaction(struct binder_transaction *t,
2017 				       const char *reason,
2018 				       uint32_t error_code)
2019 {
2020 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2021 		binder_send_failed_reply(t, error_code);
2022 	} else {
2023 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2024 			"undelivered transaction %d, %s\n",
2025 			t->debug_id, reason);
2026 		binder_free_transaction(t);
2027 	}
2028 }
2029 
2030 /**
2031  * binder_get_object() - gets object and checks for valid metadata
2032  * @proc:	binder_proc owning the buffer
2033  * @buffer:	binder_buffer that we're parsing.
2034  * @offset:	offset in the @buffer at which to validate an object.
2035  * @object:	struct binder_object to read into
2036  *
2037  * Return:	If there's a valid metadata object at @offset in @buffer, the
2038  *		size of that object. Otherwise, it returns zero. The object
2039  *		is read into the struct binder_object pointed to by @object.
2040  */
2041 static size_t binder_get_object(struct binder_proc *proc,
2042 				struct binder_buffer *buffer,
2043 				unsigned long offset,
2044 				struct binder_object *object)
2045 {
2046 	size_t read_size;
2047 	struct binder_object_header *hdr;
2048 	size_t object_size = 0;
2049 
2050 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2051 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2052 	    !IS_ALIGNED(offset, sizeof(u32)))
2053 		return 0;
2054 	binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2055 				      offset, read_size);
2056 
2057 	/* Ok, now see if we read a complete object. */
2058 	hdr = &object->hdr;
2059 	switch (hdr->type) {
2060 	case BINDER_TYPE_BINDER:
2061 	case BINDER_TYPE_WEAK_BINDER:
2062 	case BINDER_TYPE_HANDLE:
2063 	case BINDER_TYPE_WEAK_HANDLE:
2064 		object_size = sizeof(struct flat_binder_object);
2065 		break;
2066 	case BINDER_TYPE_FD:
2067 		object_size = sizeof(struct binder_fd_object);
2068 		break;
2069 	case BINDER_TYPE_PTR:
2070 		object_size = sizeof(struct binder_buffer_object);
2071 		break;
2072 	case BINDER_TYPE_FDA:
2073 		object_size = sizeof(struct binder_fd_array_object);
2074 		break;
2075 	default:
2076 		return 0;
2077 	}
2078 	if (offset <= buffer->data_size - object_size &&
2079 	    buffer->data_size >= object_size)
2080 		return object_size;
2081 	else
2082 		return 0;
2083 }
2084 
2085 /**
2086  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2087  * @proc:	binder_proc owning the buffer
2088  * @b:		binder_buffer containing the object
2089  * @object:	struct binder_object to read into
2090  * @index:	index in offset array at which the binder_buffer_object is
2091  *		located
2092  * @start_offset: points to the start of the offset array
2093  * @object_offsetp: offset of @object read from @b
2094  * @num_valid:	the number of valid offsets in the offset array
2095  *
2096  * Return:	If @index is within the valid range of the offset array
2097  *		described by @start and @num_valid, and if there's a valid
2098  *		binder_buffer_object at the offset found in index @index
2099  *		of the offset array, that object is returned. Otherwise,
2100  *		%NULL is returned.
2101  *		Note that the offset found in index @index itself is not
2102  *		verified; this function assumes that @num_valid elements
2103  *		from @start were previously verified to have valid offsets.
2104  *		If @object_offsetp is non-NULL, then the offset within
2105  *		@b is written to it.
2106  */
2107 static struct binder_buffer_object *binder_validate_ptr(
2108 						struct binder_proc *proc,
2109 						struct binder_buffer *b,
2110 						struct binder_object *object,
2111 						binder_size_t index,
2112 						binder_size_t start_offset,
2113 						binder_size_t *object_offsetp,
2114 						binder_size_t num_valid)
2115 {
2116 	size_t object_size;
2117 	binder_size_t object_offset;
2118 	unsigned long buffer_offset;
2119 
2120 	if (index >= num_valid)
2121 		return NULL;
2122 
2123 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2124 	binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2125 				      b, buffer_offset, sizeof(object_offset));
2126 	object_size = binder_get_object(proc, b, object_offset, object);
2127 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2128 		return NULL;
2129 	if (object_offsetp)
2130 		*object_offsetp = object_offset;
2131 
2132 	return &object->bbo;
2133 }
2134 
2135 /**
2136  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2137  * @proc:		binder_proc owning the buffer
2138  * @b:			transaction buffer
2139  * @objects_start_offset: offset to start of objects buffer
2140  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2141  * @fixup_offset:	start offset in @buffer to fix up
2142  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2143  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2144  *
2145  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2146  *			allowed.
2147  *
2148  * For safety reasons, we only allow fixups inside a buffer to happen
2149  * at increasing offsets; additionally, we only allow fixup on the last
2150  * buffer object that was verified, or one of its parents.
2151  *
2152  * Example of what is allowed:
2153  *
2154  * A
2155  *   B (parent = A, offset = 0)
2156  *   C (parent = A, offset = 16)
2157  *     D (parent = C, offset = 0)
2158  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2159  *
2160  * Examples of what is not allowed:
2161  *
2162  * Decreasing offsets within the same parent:
2163  * A
2164  *   C (parent = A, offset = 16)
2165  *   B (parent = A, offset = 0) // decreasing offset within A
2166  *
2167  * Referring to a parent that wasn't the last object or any of its parents:
2168  * A
2169  *   B (parent = A, offset = 0)
2170  *   C (parent = A, offset = 0)
2171  *   C (parent = A, offset = 16)
2172  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2173  */
2174 static bool binder_validate_fixup(struct binder_proc *proc,
2175 				  struct binder_buffer *b,
2176 				  binder_size_t objects_start_offset,
2177 				  binder_size_t buffer_obj_offset,
2178 				  binder_size_t fixup_offset,
2179 				  binder_size_t last_obj_offset,
2180 				  binder_size_t last_min_offset)
2181 {
2182 	if (!last_obj_offset) {
2183 		/* Nothing to fix up in */
2184 		return false;
2185 	}
2186 
2187 	while (last_obj_offset != buffer_obj_offset) {
2188 		unsigned long buffer_offset;
2189 		struct binder_object last_object;
2190 		struct binder_buffer_object *last_bbo;
2191 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2192 						       &last_object);
2193 		if (object_size != sizeof(*last_bbo))
2194 			return false;
2195 
2196 		last_bbo = &last_object.bbo;
2197 		/*
2198 		 * Safe to retrieve the parent of last_obj, since it
2199 		 * was already previously verified by the driver.
2200 		 */
2201 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2202 			return false;
2203 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2204 		buffer_offset = objects_start_offset +
2205 			sizeof(binder_size_t) * last_bbo->parent,
2206 		binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2207 					      b, buffer_offset,
2208 					      sizeof(last_obj_offset));
2209 	}
2210 	return (fixup_offset >= last_min_offset);
2211 }
2212 
2213 /**
2214  * struct binder_task_work_cb - for deferred close
2215  *
2216  * @twork:                callback_head for task work
2217  * @fd:                   fd to close
2218  *
2219  * Structure to pass task work to be handled after
2220  * returning from binder_ioctl() via task_work_add().
2221  */
2222 struct binder_task_work_cb {
2223 	struct callback_head twork;
2224 	struct file *file;
2225 };
2226 
2227 /**
2228  * binder_do_fd_close() - close list of file descriptors
2229  * @twork:	callback head for task work
2230  *
2231  * It is not safe to call ksys_close() during the binder_ioctl()
2232  * function if there is a chance that binder's own file descriptor
2233  * might be closed. This is to meet the requirements for using
2234  * fdget() (see comments for __fget_light()). Therefore use
2235  * task_work_add() to schedule the close operation once we have
2236  * returned from binder_ioctl(). This function is a callback
2237  * for that mechanism and does the actual ksys_close() on the
2238  * given file descriptor.
2239  */
2240 static void binder_do_fd_close(struct callback_head *twork)
2241 {
2242 	struct binder_task_work_cb *twcb = container_of(twork,
2243 			struct binder_task_work_cb, twork);
2244 
2245 	fput(twcb->file);
2246 	kfree(twcb);
2247 }
2248 
2249 /**
2250  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2251  * @fd:		file-descriptor to close
2252  *
2253  * See comments in binder_do_fd_close(). This function is used to schedule
2254  * a file-descriptor to be closed after returning from binder_ioctl().
2255  */
2256 static void binder_deferred_fd_close(int fd)
2257 {
2258 	struct binder_task_work_cb *twcb;
2259 
2260 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2261 	if (!twcb)
2262 		return;
2263 	init_task_work(&twcb->twork, binder_do_fd_close);
2264 	__close_fd_get_file(fd, &twcb->file);
2265 	if (twcb->file)
2266 		task_work_add(current, &twcb->twork, true);
2267 	else
2268 		kfree(twcb);
2269 }
2270 
2271 static void binder_transaction_buffer_release(struct binder_proc *proc,
2272 					      struct binder_buffer *buffer,
2273 					      binder_size_t failed_at,
2274 					      bool is_failure)
2275 {
2276 	int debug_id = buffer->debug_id;
2277 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2278 
2279 	binder_debug(BINDER_DEBUG_TRANSACTION,
2280 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2281 		     proc->pid, buffer->debug_id,
2282 		     buffer->data_size, buffer->offsets_size,
2283 		     (unsigned long long)failed_at);
2284 
2285 	if (buffer->target_node)
2286 		binder_dec_node(buffer->target_node, 1, 0);
2287 
2288 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2289 	off_end_offset = is_failure ? failed_at :
2290 				off_start_offset + buffer->offsets_size;
2291 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2292 	     buffer_offset += sizeof(binder_size_t)) {
2293 		struct binder_object_header *hdr;
2294 		size_t object_size;
2295 		struct binder_object object;
2296 		binder_size_t object_offset;
2297 
2298 		binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2299 					      buffer, buffer_offset,
2300 					      sizeof(object_offset));
2301 		object_size = binder_get_object(proc, buffer,
2302 						object_offset, &object);
2303 		if (object_size == 0) {
2304 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2305 			       debug_id, (u64)object_offset, buffer->data_size);
2306 			continue;
2307 		}
2308 		hdr = &object.hdr;
2309 		switch (hdr->type) {
2310 		case BINDER_TYPE_BINDER:
2311 		case BINDER_TYPE_WEAK_BINDER: {
2312 			struct flat_binder_object *fp;
2313 			struct binder_node *node;
2314 
2315 			fp = to_flat_binder_object(hdr);
2316 			node = binder_get_node(proc, fp->binder);
2317 			if (node == NULL) {
2318 				pr_err("transaction release %d bad node %016llx\n",
2319 				       debug_id, (u64)fp->binder);
2320 				break;
2321 			}
2322 			binder_debug(BINDER_DEBUG_TRANSACTION,
2323 				     "        node %d u%016llx\n",
2324 				     node->debug_id, (u64)node->ptr);
2325 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2326 					0);
2327 			binder_put_node(node);
2328 		} break;
2329 		case BINDER_TYPE_HANDLE:
2330 		case BINDER_TYPE_WEAK_HANDLE: {
2331 			struct flat_binder_object *fp;
2332 			struct binder_ref_data rdata;
2333 			int ret;
2334 
2335 			fp = to_flat_binder_object(hdr);
2336 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2337 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2338 
2339 			if (ret) {
2340 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2341 				 debug_id, fp->handle, ret);
2342 				break;
2343 			}
2344 			binder_debug(BINDER_DEBUG_TRANSACTION,
2345 				     "        ref %d desc %d\n",
2346 				     rdata.debug_id, rdata.desc);
2347 		} break;
2348 
2349 		case BINDER_TYPE_FD: {
2350 			/*
2351 			 * No need to close the file here since user-space
2352 			 * closes it for for successfully delivered
2353 			 * transactions. For transactions that weren't
2354 			 * delivered, the new fd was never allocated so
2355 			 * there is no need to close and the fput on the
2356 			 * file is done when the transaction is torn
2357 			 * down.
2358 			 */
2359 			WARN_ON(failed_at &&
2360 				proc->tsk == current->group_leader);
2361 		} break;
2362 		case BINDER_TYPE_PTR:
2363 			/*
2364 			 * Nothing to do here, this will get cleaned up when the
2365 			 * transaction buffer gets freed
2366 			 */
2367 			break;
2368 		case BINDER_TYPE_FDA: {
2369 			struct binder_fd_array_object *fda;
2370 			struct binder_buffer_object *parent;
2371 			struct binder_object ptr_object;
2372 			binder_size_t fda_offset;
2373 			size_t fd_index;
2374 			binder_size_t fd_buf_size;
2375 			binder_size_t num_valid;
2376 
2377 			if (proc->tsk != current->group_leader) {
2378 				/*
2379 				 * Nothing to do if running in sender context
2380 				 * The fd fixups have not been applied so no
2381 				 * fds need to be closed.
2382 				 */
2383 				continue;
2384 			}
2385 
2386 			num_valid = (buffer_offset - off_start_offset) /
2387 						sizeof(binder_size_t);
2388 			fda = to_binder_fd_array_object(hdr);
2389 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2390 						     fda->parent,
2391 						     off_start_offset,
2392 						     NULL,
2393 						     num_valid);
2394 			if (!parent) {
2395 				pr_err("transaction release %d bad parent offset\n",
2396 				       debug_id);
2397 				continue;
2398 			}
2399 			fd_buf_size = sizeof(u32) * fda->num_fds;
2400 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2401 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2402 				       debug_id, (u64)fda->num_fds);
2403 				continue;
2404 			}
2405 			if (fd_buf_size > parent->length ||
2406 			    fda->parent_offset > parent->length - fd_buf_size) {
2407 				/* No space for all file descriptors here. */
2408 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2409 				       debug_id, (u64)fda->num_fds);
2410 				continue;
2411 			}
2412 			/*
2413 			 * the source data for binder_buffer_object is visible
2414 			 * to user-space and the @buffer element is the user
2415 			 * pointer to the buffer_object containing the fd_array.
2416 			 * Convert the address to an offset relative to
2417 			 * the base of the transaction buffer.
2418 			 */
2419 			fda_offset =
2420 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2421 			    fda->parent_offset;
2422 			for (fd_index = 0; fd_index < fda->num_fds;
2423 			     fd_index++) {
2424 				u32 fd;
2425 				binder_size_t offset = fda_offset +
2426 					fd_index * sizeof(fd);
2427 
2428 				binder_alloc_copy_from_buffer(&proc->alloc,
2429 							      &fd,
2430 							      buffer,
2431 							      offset,
2432 							      sizeof(fd));
2433 				binder_deferred_fd_close(fd);
2434 			}
2435 		} break;
2436 		default:
2437 			pr_err("transaction release %d bad object type %x\n",
2438 				debug_id, hdr->type);
2439 			break;
2440 		}
2441 	}
2442 }
2443 
2444 static int binder_translate_binder(struct flat_binder_object *fp,
2445 				   struct binder_transaction *t,
2446 				   struct binder_thread *thread)
2447 {
2448 	struct binder_node *node;
2449 	struct binder_proc *proc = thread->proc;
2450 	struct binder_proc *target_proc = t->to_proc;
2451 	struct binder_ref_data rdata;
2452 	int ret = 0;
2453 
2454 	node = binder_get_node(proc, fp->binder);
2455 	if (!node) {
2456 		node = binder_new_node(proc, fp);
2457 		if (!node)
2458 			return -ENOMEM;
2459 	}
2460 	if (fp->cookie != node->cookie) {
2461 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2462 				  proc->pid, thread->pid, (u64)fp->binder,
2463 				  node->debug_id, (u64)fp->cookie,
2464 				  (u64)node->cookie);
2465 		ret = -EINVAL;
2466 		goto done;
2467 	}
2468 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2469 		ret = -EPERM;
2470 		goto done;
2471 	}
2472 
2473 	ret = binder_inc_ref_for_node(target_proc, node,
2474 			fp->hdr.type == BINDER_TYPE_BINDER,
2475 			&thread->todo, &rdata);
2476 	if (ret)
2477 		goto done;
2478 
2479 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2480 		fp->hdr.type = BINDER_TYPE_HANDLE;
2481 	else
2482 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2483 	fp->binder = 0;
2484 	fp->handle = rdata.desc;
2485 	fp->cookie = 0;
2486 
2487 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2488 	binder_debug(BINDER_DEBUG_TRANSACTION,
2489 		     "        node %d u%016llx -> ref %d desc %d\n",
2490 		     node->debug_id, (u64)node->ptr,
2491 		     rdata.debug_id, rdata.desc);
2492 done:
2493 	binder_put_node(node);
2494 	return ret;
2495 }
2496 
2497 static int binder_translate_handle(struct flat_binder_object *fp,
2498 				   struct binder_transaction *t,
2499 				   struct binder_thread *thread)
2500 {
2501 	struct binder_proc *proc = thread->proc;
2502 	struct binder_proc *target_proc = t->to_proc;
2503 	struct binder_node *node;
2504 	struct binder_ref_data src_rdata;
2505 	int ret = 0;
2506 
2507 	node = binder_get_node_from_ref(proc, fp->handle,
2508 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2509 	if (!node) {
2510 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2511 				  proc->pid, thread->pid, fp->handle);
2512 		return -EINVAL;
2513 	}
2514 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2515 		ret = -EPERM;
2516 		goto done;
2517 	}
2518 
2519 	binder_node_lock(node);
2520 	if (node->proc == target_proc) {
2521 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2522 			fp->hdr.type = BINDER_TYPE_BINDER;
2523 		else
2524 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2525 		fp->binder = node->ptr;
2526 		fp->cookie = node->cookie;
2527 		if (node->proc)
2528 			binder_inner_proc_lock(node->proc);
2529 		else
2530 			__acquire(&node->proc->inner_lock);
2531 		binder_inc_node_nilocked(node,
2532 					 fp->hdr.type == BINDER_TYPE_BINDER,
2533 					 0, NULL);
2534 		if (node->proc)
2535 			binder_inner_proc_unlock(node->proc);
2536 		else
2537 			__release(&node->proc->inner_lock);
2538 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2539 		binder_debug(BINDER_DEBUG_TRANSACTION,
2540 			     "        ref %d desc %d -> node %d u%016llx\n",
2541 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2542 			     (u64)node->ptr);
2543 		binder_node_unlock(node);
2544 	} else {
2545 		struct binder_ref_data dest_rdata;
2546 
2547 		binder_node_unlock(node);
2548 		ret = binder_inc_ref_for_node(target_proc, node,
2549 				fp->hdr.type == BINDER_TYPE_HANDLE,
2550 				NULL, &dest_rdata);
2551 		if (ret)
2552 			goto done;
2553 
2554 		fp->binder = 0;
2555 		fp->handle = dest_rdata.desc;
2556 		fp->cookie = 0;
2557 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2558 						    &dest_rdata);
2559 		binder_debug(BINDER_DEBUG_TRANSACTION,
2560 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2561 			     src_rdata.debug_id, src_rdata.desc,
2562 			     dest_rdata.debug_id, dest_rdata.desc,
2563 			     node->debug_id);
2564 	}
2565 done:
2566 	binder_put_node(node);
2567 	return ret;
2568 }
2569 
2570 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2571 			       struct binder_transaction *t,
2572 			       struct binder_thread *thread,
2573 			       struct binder_transaction *in_reply_to)
2574 {
2575 	struct binder_proc *proc = thread->proc;
2576 	struct binder_proc *target_proc = t->to_proc;
2577 	struct binder_txn_fd_fixup *fixup;
2578 	struct file *file;
2579 	int ret = 0;
2580 	bool target_allows_fd;
2581 
2582 	if (in_reply_to)
2583 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2584 	else
2585 		target_allows_fd = t->buffer->target_node->accept_fds;
2586 	if (!target_allows_fd) {
2587 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2588 				  proc->pid, thread->pid,
2589 				  in_reply_to ? "reply" : "transaction",
2590 				  fd);
2591 		ret = -EPERM;
2592 		goto err_fd_not_accepted;
2593 	}
2594 
2595 	file = fget(fd);
2596 	if (!file) {
2597 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2598 				  proc->pid, thread->pid, fd);
2599 		ret = -EBADF;
2600 		goto err_fget;
2601 	}
2602 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2603 	if (ret < 0) {
2604 		ret = -EPERM;
2605 		goto err_security;
2606 	}
2607 
2608 	/*
2609 	 * Add fixup record for this transaction. The allocation
2610 	 * of the fd in the target needs to be done from a
2611 	 * target thread.
2612 	 */
2613 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2614 	if (!fixup) {
2615 		ret = -ENOMEM;
2616 		goto err_alloc;
2617 	}
2618 	fixup->file = file;
2619 	fixup->offset = fd_offset;
2620 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2621 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2622 
2623 	return ret;
2624 
2625 err_alloc:
2626 err_security:
2627 	fput(file);
2628 err_fget:
2629 err_fd_not_accepted:
2630 	return ret;
2631 }
2632 
2633 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2634 				     struct binder_buffer_object *parent,
2635 				     struct binder_transaction *t,
2636 				     struct binder_thread *thread,
2637 				     struct binder_transaction *in_reply_to)
2638 {
2639 	binder_size_t fdi, fd_buf_size;
2640 	binder_size_t fda_offset;
2641 	struct binder_proc *proc = thread->proc;
2642 	struct binder_proc *target_proc = t->to_proc;
2643 
2644 	fd_buf_size = sizeof(u32) * fda->num_fds;
2645 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2646 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2647 				  proc->pid, thread->pid, (u64)fda->num_fds);
2648 		return -EINVAL;
2649 	}
2650 	if (fd_buf_size > parent->length ||
2651 	    fda->parent_offset > parent->length - fd_buf_size) {
2652 		/* No space for all file descriptors here. */
2653 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2654 				  proc->pid, thread->pid, (u64)fda->num_fds);
2655 		return -EINVAL;
2656 	}
2657 	/*
2658 	 * the source data for binder_buffer_object is visible
2659 	 * to user-space and the @buffer element is the user
2660 	 * pointer to the buffer_object containing the fd_array.
2661 	 * Convert the address to an offset relative to
2662 	 * the base of the transaction buffer.
2663 	 */
2664 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2665 		fda->parent_offset;
2666 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2667 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2668 				  proc->pid, thread->pid);
2669 		return -EINVAL;
2670 	}
2671 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2672 		u32 fd;
2673 		int ret;
2674 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2675 
2676 		binder_alloc_copy_from_buffer(&target_proc->alloc,
2677 					      &fd, t->buffer,
2678 					      offset, sizeof(fd));
2679 		ret = binder_translate_fd(fd, offset, t, thread,
2680 					  in_reply_to);
2681 		if (ret < 0)
2682 			return ret;
2683 	}
2684 	return 0;
2685 }
2686 
2687 static int binder_fixup_parent(struct binder_transaction *t,
2688 			       struct binder_thread *thread,
2689 			       struct binder_buffer_object *bp,
2690 			       binder_size_t off_start_offset,
2691 			       binder_size_t num_valid,
2692 			       binder_size_t last_fixup_obj_off,
2693 			       binder_size_t last_fixup_min_off)
2694 {
2695 	struct binder_buffer_object *parent;
2696 	struct binder_buffer *b = t->buffer;
2697 	struct binder_proc *proc = thread->proc;
2698 	struct binder_proc *target_proc = t->to_proc;
2699 	struct binder_object object;
2700 	binder_size_t buffer_offset;
2701 	binder_size_t parent_offset;
2702 
2703 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2704 		return 0;
2705 
2706 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2707 				     off_start_offset, &parent_offset,
2708 				     num_valid);
2709 	if (!parent) {
2710 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2711 				  proc->pid, thread->pid);
2712 		return -EINVAL;
2713 	}
2714 
2715 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2716 				   parent_offset, bp->parent_offset,
2717 				   last_fixup_obj_off,
2718 				   last_fixup_min_off)) {
2719 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2720 				  proc->pid, thread->pid);
2721 		return -EINVAL;
2722 	}
2723 
2724 	if (parent->length < sizeof(binder_uintptr_t) ||
2725 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2726 		/* No space for a pointer here! */
2727 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2728 				  proc->pid, thread->pid);
2729 		return -EINVAL;
2730 	}
2731 	buffer_offset = bp->parent_offset +
2732 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2733 	binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2734 				    &bp->buffer, sizeof(bp->buffer));
2735 
2736 	return 0;
2737 }
2738 
2739 /**
2740  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2741  * @t:		transaction to send
2742  * @proc:	process to send the transaction to
2743  * @thread:	thread in @proc to send the transaction to (may be NULL)
2744  *
2745  * This function queues a transaction to the specified process. It will try
2746  * to find a thread in the target process to handle the transaction and
2747  * wake it up. If no thread is found, the work is queued to the proc
2748  * waitqueue.
2749  *
2750  * If the @thread parameter is not NULL, the transaction is always queued
2751  * to the waitlist of that specific thread.
2752  *
2753  * Return:	true if the transactions was successfully queued
2754  *		false if the target process or thread is dead
2755  */
2756 static bool binder_proc_transaction(struct binder_transaction *t,
2757 				    struct binder_proc *proc,
2758 				    struct binder_thread *thread)
2759 {
2760 	struct binder_node *node = t->buffer->target_node;
2761 	bool oneway = !!(t->flags & TF_ONE_WAY);
2762 	bool pending_async = false;
2763 
2764 	BUG_ON(!node);
2765 	binder_node_lock(node);
2766 	if (oneway) {
2767 		BUG_ON(thread);
2768 		if (node->has_async_transaction) {
2769 			pending_async = true;
2770 		} else {
2771 			node->has_async_transaction = true;
2772 		}
2773 	}
2774 
2775 	binder_inner_proc_lock(proc);
2776 
2777 	if (proc->is_dead || (thread && thread->is_dead)) {
2778 		binder_inner_proc_unlock(proc);
2779 		binder_node_unlock(node);
2780 		return false;
2781 	}
2782 
2783 	if (!thread && !pending_async)
2784 		thread = binder_select_thread_ilocked(proc);
2785 
2786 	if (thread)
2787 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2788 	else if (!pending_async)
2789 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2790 	else
2791 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2792 
2793 	if (!pending_async)
2794 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2795 
2796 	binder_inner_proc_unlock(proc);
2797 	binder_node_unlock(node);
2798 
2799 	return true;
2800 }
2801 
2802 /**
2803  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2804  * @node:         struct binder_node for which to get refs
2805  * @proc:         returns @node->proc if valid
2806  * @error:        if no @proc then returns BR_DEAD_REPLY
2807  *
2808  * User-space normally keeps the node alive when creating a transaction
2809  * since it has a reference to the target. The local strong ref keeps it
2810  * alive if the sending process dies before the target process processes
2811  * the transaction. If the source process is malicious or has a reference
2812  * counting bug, relying on the local strong ref can fail.
2813  *
2814  * Since user-space can cause the local strong ref to go away, we also take
2815  * a tmpref on the node to ensure it survives while we are constructing
2816  * the transaction. We also need a tmpref on the proc while we are
2817  * constructing the transaction, so we take that here as well.
2818  *
2819  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2820  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2821  * target proc has died, @error is set to BR_DEAD_REPLY
2822  */
2823 static struct binder_node *binder_get_node_refs_for_txn(
2824 		struct binder_node *node,
2825 		struct binder_proc **procp,
2826 		uint32_t *error)
2827 {
2828 	struct binder_node *target_node = NULL;
2829 
2830 	binder_node_inner_lock(node);
2831 	if (node->proc) {
2832 		target_node = node;
2833 		binder_inc_node_nilocked(node, 1, 0, NULL);
2834 		binder_inc_node_tmpref_ilocked(node);
2835 		node->proc->tmp_ref++;
2836 		*procp = node->proc;
2837 	} else
2838 		*error = BR_DEAD_REPLY;
2839 	binder_node_inner_unlock(node);
2840 
2841 	return target_node;
2842 }
2843 
2844 static void binder_transaction(struct binder_proc *proc,
2845 			       struct binder_thread *thread,
2846 			       struct binder_transaction_data *tr, int reply,
2847 			       binder_size_t extra_buffers_size)
2848 {
2849 	int ret;
2850 	struct binder_transaction *t;
2851 	struct binder_work *w;
2852 	struct binder_work *tcomplete;
2853 	binder_size_t buffer_offset = 0;
2854 	binder_size_t off_start_offset, off_end_offset;
2855 	binder_size_t off_min;
2856 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2857 	struct binder_proc *target_proc = NULL;
2858 	struct binder_thread *target_thread = NULL;
2859 	struct binder_node *target_node = NULL;
2860 	struct binder_transaction *in_reply_to = NULL;
2861 	struct binder_transaction_log_entry *e;
2862 	uint32_t return_error = 0;
2863 	uint32_t return_error_param = 0;
2864 	uint32_t return_error_line = 0;
2865 	binder_size_t last_fixup_obj_off = 0;
2866 	binder_size_t last_fixup_min_off = 0;
2867 	struct binder_context *context = proc->context;
2868 	int t_debug_id = atomic_inc_return(&binder_last_id);
2869 	char *secctx = NULL;
2870 	u32 secctx_sz = 0;
2871 
2872 	e = binder_transaction_log_add(&binder_transaction_log);
2873 	e->debug_id = t_debug_id;
2874 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2875 	e->from_proc = proc->pid;
2876 	e->from_thread = thread->pid;
2877 	e->target_handle = tr->target.handle;
2878 	e->data_size = tr->data_size;
2879 	e->offsets_size = tr->offsets_size;
2880 	e->context_name = proc->context->name;
2881 
2882 	if (reply) {
2883 		binder_inner_proc_lock(proc);
2884 		in_reply_to = thread->transaction_stack;
2885 		if (in_reply_to == NULL) {
2886 			binder_inner_proc_unlock(proc);
2887 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2888 					  proc->pid, thread->pid);
2889 			return_error = BR_FAILED_REPLY;
2890 			return_error_param = -EPROTO;
2891 			return_error_line = __LINE__;
2892 			goto err_empty_call_stack;
2893 		}
2894 		if (in_reply_to->to_thread != thread) {
2895 			spin_lock(&in_reply_to->lock);
2896 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2897 				proc->pid, thread->pid, in_reply_to->debug_id,
2898 				in_reply_to->to_proc ?
2899 				in_reply_to->to_proc->pid : 0,
2900 				in_reply_to->to_thread ?
2901 				in_reply_to->to_thread->pid : 0);
2902 			spin_unlock(&in_reply_to->lock);
2903 			binder_inner_proc_unlock(proc);
2904 			return_error = BR_FAILED_REPLY;
2905 			return_error_param = -EPROTO;
2906 			return_error_line = __LINE__;
2907 			in_reply_to = NULL;
2908 			goto err_bad_call_stack;
2909 		}
2910 		thread->transaction_stack = in_reply_to->to_parent;
2911 		binder_inner_proc_unlock(proc);
2912 		binder_set_nice(in_reply_to->saved_priority);
2913 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2914 		if (target_thread == NULL) {
2915 			/* annotation for sparse */
2916 			__release(&target_thread->proc->inner_lock);
2917 			return_error = BR_DEAD_REPLY;
2918 			return_error_line = __LINE__;
2919 			goto err_dead_binder;
2920 		}
2921 		if (target_thread->transaction_stack != in_reply_to) {
2922 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2923 				proc->pid, thread->pid,
2924 				target_thread->transaction_stack ?
2925 				target_thread->transaction_stack->debug_id : 0,
2926 				in_reply_to->debug_id);
2927 			binder_inner_proc_unlock(target_thread->proc);
2928 			return_error = BR_FAILED_REPLY;
2929 			return_error_param = -EPROTO;
2930 			return_error_line = __LINE__;
2931 			in_reply_to = NULL;
2932 			target_thread = NULL;
2933 			goto err_dead_binder;
2934 		}
2935 		target_proc = target_thread->proc;
2936 		target_proc->tmp_ref++;
2937 		binder_inner_proc_unlock(target_thread->proc);
2938 	} else {
2939 		if (tr->target.handle) {
2940 			struct binder_ref *ref;
2941 
2942 			/*
2943 			 * There must already be a strong ref
2944 			 * on this node. If so, do a strong
2945 			 * increment on the node to ensure it
2946 			 * stays alive until the transaction is
2947 			 * done.
2948 			 */
2949 			binder_proc_lock(proc);
2950 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2951 						     true);
2952 			if (ref) {
2953 				target_node = binder_get_node_refs_for_txn(
2954 						ref->node, &target_proc,
2955 						&return_error);
2956 			} else {
2957 				binder_user_error("%d:%d got transaction to invalid handle\n",
2958 						  proc->pid, thread->pid);
2959 				return_error = BR_FAILED_REPLY;
2960 			}
2961 			binder_proc_unlock(proc);
2962 		} else {
2963 			mutex_lock(&context->context_mgr_node_lock);
2964 			target_node = context->binder_context_mgr_node;
2965 			if (target_node)
2966 				target_node = binder_get_node_refs_for_txn(
2967 						target_node, &target_proc,
2968 						&return_error);
2969 			else
2970 				return_error = BR_DEAD_REPLY;
2971 			mutex_unlock(&context->context_mgr_node_lock);
2972 			if (target_node && target_proc == proc) {
2973 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2974 						  proc->pid, thread->pid);
2975 				return_error = BR_FAILED_REPLY;
2976 				return_error_param = -EINVAL;
2977 				return_error_line = __LINE__;
2978 				goto err_invalid_target_handle;
2979 			}
2980 		}
2981 		if (!target_node) {
2982 			/*
2983 			 * return_error is set above
2984 			 */
2985 			return_error_param = -EINVAL;
2986 			return_error_line = __LINE__;
2987 			goto err_dead_binder;
2988 		}
2989 		e->to_node = target_node->debug_id;
2990 		if (security_binder_transaction(proc->tsk,
2991 						target_proc->tsk) < 0) {
2992 			return_error = BR_FAILED_REPLY;
2993 			return_error_param = -EPERM;
2994 			return_error_line = __LINE__;
2995 			goto err_invalid_target_handle;
2996 		}
2997 		binder_inner_proc_lock(proc);
2998 
2999 		w = list_first_entry_or_null(&thread->todo,
3000 					     struct binder_work, entry);
3001 		if (!(tr->flags & TF_ONE_WAY) && w &&
3002 		    w->type == BINDER_WORK_TRANSACTION) {
3003 			/*
3004 			 * Do not allow new outgoing transaction from a
3005 			 * thread that has a transaction at the head of
3006 			 * its todo list. Only need to check the head
3007 			 * because binder_select_thread_ilocked picks a
3008 			 * thread from proc->waiting_threads to enqueue
3009 			 * the transaction, and nothing is queued to the
3010 			 * todo list while the thread is on waiting_threads.
3011 			 */
3012 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3013 					  proc->pid, thread->pid);
3014 			binder_inner_proc_unlock(proc);
3015 			return_error = BR_FAILED_REPLY;
3016 			return_error_param = -EPROTO;
3017 			return_error_line = __LINE__;
3018 			goto err_bad_todo_list;
3019 		}
3020 
3021 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3022 			struct binder_transaction *tmp;
3023 
3024 			tmp = thread->transaction_stack;
3025 			if (tmp->to_thread != thread) {
3026 				spin_lock(&tmp->lock);
3027 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3028 					proc->pid, thread->pid, tmp->debug_id,
3029 					tmp->to_proc ? tmp->to_proc->pid : 0,
3030 					tmp->to_thread ?
3031 					tmp->to_thread->pid : 0);
3032 				spin_unlock(&tmp->lock);
3033 				binder_inner_proc_unlock(proc);
3034 				return_error = BR_FAILED_REPLY;
3035 				return_error_param = -EPROTO;
3036 				return_error_line = __LINE__;
3037 				goto err_bad_call_stack;
3038 			}
3039 			while (tmp) {
3040 				struct binder_thread *from;
3041 
3042 				spin_lock(&tmp->lock);
3043 				from = tmp->from;
3044 				if (from && from->proc == target_proc) {
3045 					atomic_inc(&from->tmp_ref);
3046 					target_thread = from;
3047 					spin_unlock(&tmp->lock);
3048 					break;
3049 				}
3050 				spin_unlock(&tmp->lock);
3051 				tmp = tmp->from_parent;
3052 			}
3053 		}
3054 		binder_inner_proc_unlock(proc);
3055 	}
3056 	if (target_thread)
3057 		e->to_thread = target_thread->pid;
3058 	e->to_proc = target_proc->pid;
3059 
3060 	/* TODO: reuse incoming transaction for reply */
3061 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3062 	if (t == NULL) {
3063 		return_error = BR_FAILED_REPLY;
3064 		return_error_param = -ENOMEM;
3065 		return_error_line = __LINE__;
3066 		goto err_alloc_t_failed;
3067 	}
3068 	INIT_LIST_HEAD(&t->fd_fixups);
3069 	binder_stats_created(BINDER_STAT_TRANSACTION);
3070 	spin_lock_init(&t->lock);
3071 
3072 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3073 	if (tcomplete == NULL) {
3074 		return_error = BR_FAILED_REPLY;
3075 		return_error_param = -ENOMEM;
3076 		return_error_line = __LINE__;
3077 		goto err_alloc_tcomplete_failed;
3078 	}
3079 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3080 
3081 	t->debug_id = t_debug_id;
3082 
3083 	if (reply)
3084 		binder_debug(BINDER_DEBUG_TRANSACTION,
3085 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3086 			     proc->pid, thread->pid, t->debug_id,
3087 			     target_proc->pid, target_thread->pid,
3088 			     (u64)tr->data.ptr.buffer,
3089 			     (u64)tr->data.ptr.offsets,
3090 			     (u64)tr->data_size, (u64)tr->offsets_size,
3091 			     (u64)extra_buffers_size);
3092 	else
3093 		binder_debug(BINDER_DEBUG_TRANSACTION,
3094 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3095 			     proc->pid, thread->pid, t->debug_id,
3096 			     target_proc->pid, target_node->debug_id,
3097 			     (u64)tr->data.ptr.buffer,
3098 			     (u64)tr->data.ptr.offsets,
3099 			     (u64)tr->data_size, (u64)tr->offsets_size,
3100 			     (u64)extra_buffers_size);
3101 
3102 	if (!reply && !(tr->flags & TF_ONE_WAY))
3103 		t->from = thread;
3104 	else
3105 		t->from = NULL;
3106 	t->sender_euid = task_euid(proc->tsk);
3107 	t->to_proc = target_proc;
3108 	t->to_thread = target_thread;
3109 	t->code = tr->code;
3110 	t->flags = tr->flags;
3111 	t->priority = task_nice(current);
3112 
3113 	if (target_node && target_node->txn_security_ctx) {
3114 		u32 secid;
3115 		size_t added_size;
3116 
3117 		security_task_getsecid(proc->tsk, &secid);
3118 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3119 		if (ret) {
3120 			return_error = BR_FAILED_REPLY;
3121 			return_error_param = ret;
3122 			return_error_line = __LINE__;
3123 			goto err_get_secctx_failed;
3124 		}
3125 		added_size = ALIGN(secctx_sz, sizeof(u64));
3126 		extra_buffers_size += added_size;
3127 		if (extra_buffers_size < added_size) {
3128 			/* integer overflow of extra_buffers_size */
3129 			return_error = BR_FAILED_REPLY;
3130 			return_error_param = EINVAL;
3131 			return_error_line = __LINE__;
3132 			goto err_bad_extra_size;
3133 		}
3134 	}
3135 
3136 	trace_binder_transaction(reply, t, target_node);
3137 
3138 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3139 		tr->offsets_size, extra_buffers_size,
3140 		!reply && (t->flags & TF_ONE_WAY));
3141 	if (IS_ERR(t->buffer)) {
3142 		/*
3143 		 * -ESRCH indicates VMA cleared. The target is dying.
3144 		 */
3145 		return_error_param = PTR_ERR(t->buffer);
3146 		return_error = return_error_param == -ESRCH ?
3147 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3148 		return_error_line = __LINE__;
3149 		t->buffer = NULL;
3150 		goto err_binder_alloc_buf_failed;
3151 	}
3152 	if (secctx) {
3153 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3154 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3155 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3156 				    ALIGN(secctx_sz, sizeof(u64));
3157 
3158 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3159 		binder_alloc_copy_to_buffer(&target_proc->alloc,
3160 					    t->buffer, buf_offset,
3161 					    secctx, secctx_sz);
3162 		security_release_secctx(secctx, secctx_sz);
3163 		secctx = NULL;
3164 	}
3165 	t->buffer->debug_id = t->debug_id;
3166 	t->buffer->transaction = t;
3167 	t->buffer->target_node = target_node;
3168 	trace_binder_transaction_alloc_buf(t->buffer);
3169 
3170 	if (binder_alloc_copy_user_to_buffer(
3171 				&target_proc->alloc,
3172 				t->buffer, 0,
3173 				(const void __user *)
3174 					(uintptr_t)tr->data.ptr.buffer,
3175 				tr->data_size)) {
3176 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3177 				proc->pid, thread->pid);
3178 		return_error = BR_FAILED_REPLY;
3179 		return_error_param = -EFAULT;
3180 		return_error_line = __LINE__;
3181 		goto err_copy_data_failed;
3182 	}
3183 	if (binder_alloc_copy_user_to_buffer(
3184 				&target_proc->alloc,
3185 				t->buffer,
3186 				ALIGN(tr->data_size, sizeof(void *)),
3187 				(const void __user *)
3188 					(uintptr_t)tr->data.ptr.offsets,
3189 				tr->offsets_size)) {
3190 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3191 				proc->pid, thread->pid);
3192 		return_error = BR_FAILED_REPLY;
3193 		return_error_param = -EFAULT;
3194 		return_error_line = __LINE__;
3195 		goto err_copy_data_failed;
3196 	}
3197 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3198 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3199 				proc->pid, thread->pid, (u64)tr->offsets_size);
3200 		return_error = BR_FAILED_REPLY;
3201 		return_error_param = -EINVAL;
3202 		return_error_line = __LINE__;
3203 		goto err_bad_offset;
3204 	}
3205 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3206 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3207 				  proc->pid, thread->pid,
3208 				  (u64)extra_buffers_size);
3209 		return_error = BR_FAILED_REPLY;
3210 		return_error_param = -EINVAL;
3211 		return_error_line = __LINE__;
3212 		goto err_bad_offset;
3213 	}
3214 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3215 	buffer_offset = off_start_offset;
3216 	off_end_offset = off_start_offset + tr->offsets_size;
3217 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3218 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3219 	off_min = 0;
3220 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3221 	     buffer_offset += sizeof(binder_size_t)) {
3222 		struct binder_object_header *hdr;
3223 		size_t object_size;
3224 		struct binder_object object;
3225 		binder_size_t object_offset;
3226 
3227 		binder_alloc_copy_from_buffer(&target_proc->alloc,
3228 					      &object_offset,
3229 					      t->buffer,
3230 					      buffer_offset,
3231 					      sizeof(object_offset));
3232 		object_size = binder_get_object(target_proc, t->buffer,
3233 						object_offset, &object);
3234 		if (object_size == 0 || object_offset < off_min) {
3235 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3236 					  proc->pid, thread->pid,
3237 					  (u64)object_offset,
3238 					  (u64)off_min,
3239 					  (u64)t->buffer->data_size);
3240 			return_error = BR_FAILED_REPLY;
3241 			return_error_param = -EINVAL;
3242 			return_error_line = __LINE__;
3243 			goto err_bad_offset;
3244 		}
3245 
3246 		hdr = &object.hdr;
3247 		off_min = object_offset + object_size;
3248 		switch (hdr->type) {
3249 		case BINDER_TYPE_BINDER:
3250 		case BINDER_TYPE_WEAK_BINDER: {
3251 			struct flat_binder_object *fp;
3252 
3253 			fp = to_flat_binder_object(hdr);
3254 			ret = binder_translate_binder(fp, t, thread);
3255 			if (ret < 0) {
3256 				return_error = BR_FAILED_REPLY;
3257 				return_error_param = ret;
3258 				return_error_line = __LINE__;
3259 				goto err_translate_failed;
3260 			}
3261 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3262 						    t->buffer, object_offset,
3263 						    fp, sizeof(*fp));
3264 		} break;
3265 		case BINDER_TYPE_HANDLE:
3266 		case BINDER_TYPE_WEAK_HANDLE: {
3267 			struct flat_binder_object *fp;
3268 
3269 			fp = to_flat_binder_object(hdr);
3270 			ret = binder_translate_handle(fp, t, thread);
3271 			if (ret < 0) {
3272 				return_error = BR_FAILED_REPLY;
3273 				return_error_param = ret;
3274 				return_error_line = __LINE__;
3275 				goto err_translate_failed;
3276 			}
3277 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3278 						    t->buffer, object_offset,
3279 						    fp, sizeof(*fp));
3280 		} break;
3281 
3282 		case BINDER_TYPE_FD: {
3283 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3284 			binder_size_t fd_offset = object_offset +
3285 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3286 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3287 						      thread, in_reply_to);
3288 
3289 			if (ret < 0) {
3290 				return_error = BR_FAILED_REPLY;
3291 				return_error_param = ret;
3292 				return_error_line = __LINE__;
3293 				goto err_translate_failed;
3294 			}
3295 			fp->pad_binder = 0;
3296 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3297 						    t->buffer, object_offset,
3298 						    fp, sizeof(*fp));
3299 		} break;
3300 		case BINDER_TYPE_FDA: {
3301 			struct binder_object ptr_object;
3302 			binder_size_t parent_offset;
3303 			struct binder_fd_array_object *fda =
3304 				to_binder_fd_array_object(hdr);
3305 			size_t num_valid = (buffer_offset - off_start_offset) *
3306 						sizeof(binder_size_t);
3307 			struct binder_buffer_object *parent =
3308 				binder_validate_ptr(target_proc, t->buffer,
3309 						    &ptr_object, fda->parent,
3310 						    off_start_offset,
3311 						    &parent_offset,
3312 						    num_valid);
3313 			if (!parent) {
3314 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3315 						  proc->pid, thread->pid);
3316 				return_error = BR_FAILED_REPLY;
3317 				return_error_param = -EINVAL;
3318 				return_error_line = __LINE__;
3319 				goto err_bad_parent;
3320 			}
3321 			if (!binder_validate_fixup(target_proc, t->buffer,
3322 						   off_start_offset,
3323 						   parent_offset,
3324 						   fda->parent_offset,
3325 						   last_fixup_obj_off,
3326 						   last_fixup_min_off)) {
3327 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3328 						  proc->pid, thread->pid);
3329 				return_error = BR_FAILED_REPLY;
3330 				return_error_param = -EINVAL;
3331 				return_error_line = __LINE__;
3332 				goto err_bad_parent;
3333 			}
3334 			ret = binder_translate_fd_array(fda, parent, t, thread,
3335 							in_reply_to);
3336 			if (ret < 0) {
3337 				return_error = BR_FAILED_REPLY;
3338 				return_error_param = ret;
3339 				return_error_line = __LINE__;
3340 				goto err_translate_failed;
3341 			}
3342 			last_fixup_obj_off = parent_offset;
3343 			last_fixup_min_off =
3344 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3345 		} break;
3346 		case BINDER_TYPE_PTR: {
3347 			struct binder_buffer_object *bp =
3348 				to_binder_buffer_object(hdr);
3349 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3350 			size_t num_valid;
3351 
3352 			if (bp->length > buf_left) {
3353 				binder_user_error("%d:%d got transaction with too large buffer\n",
3354 						  proc->pid, thread->pid);
3355 				return_error = BR_FAILED_REPLY;
3356 				return_error_param = -EINVAL;
3357 				return_error_line = __LINE__;
3358 				goto err_bad_offset;
3359 			}
3360 			if (binder_alloc_copy_user_to_buffer(
3361 						&target_proc->alloc,
3362 						t->buffer,
3363 						sg_buf_offset,
3364 						(const void __user *)
3365 							(uintptr_t)bp->buffer,
3366 						bp->length)) {
3367 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3368 						  proc->pid, thread->pid);
3369 				return_error_param = -EFAULT;
3370 				return_error = BR_FAILED_REPLY;
3371 				return_error_line = __LINE__;
3372 				goto err_copy_data_failed;
3373 			}
3374 			/* Fixup buffer pointer to target proc address space */
3375 			bp->buffer = (uintptr_t)
3376 				t->buffer->user_data + sg_buf_offset;
3377 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3378 
3379 			num_valid = (buffer_offset - off_start_offset) *
3380 					sizeof(binder_size_t);
3381 			ret = binder_fixup_parent(t, thread, bp,
3382 						  off_start_offset,
3383 						  num_valid,
3384 						  last_fixup_obj_off,
3385 						  last_fixup_min_off);
3386 			if (ret < 0) {
3387 				return_error = BR_FAILED_REPLY;
3388 				return_error_param = ret;
3389 				return_error_line = __LINE__;
3390 				goto err_translate_failed;
3391 			}
3392 			binder_alloc_copy_to_buffer(&target_proc->alloc,
3393 						    t->buffer, object_offset,
3394 						    bp, sizeof(*bp));
3395 			last_fixup_obj_off = object_offset;
3396 			last_fixup_min_off = 0;
3397 		} break;
3398 		default:
3399 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3400 				proc->pid, thread->pid, hdr->type);
3401 			return_error = BR_FAILED_REPLY;
3402 			return_error_param = -EINVAL;
3403 			return_error_line = __LINE__;
3404 			goto err_bad_object_type;
3405 		}
3406 	}
3407 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3408 	t->work.type = BINDER_WORK_TRANSACTION;
3409 
3410 	if (reply) {
3411 		binder_enqueue_thread_work(thread, tcomplete);
3412 		binder_inner_proc_lock(target_proc);
3413 		if (target_thread->is_dead) {
3414 			binder_inner_proc_unlock(target_proc);
3415 			goto err_dead_proc_or_thread;
3416 		}
3417 		BUG_ON(t->buffer->async_transaction != 0);
3418 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3419 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3420 		binder_inner_proc_unlock(target_proc);
3421 		wake_up_interruptible_sync(&target_thread->wait);
3422 		binder_free_transaction(in_reply_to);
3423 	} else if (!(t->flags & TF_ONE_WAY)) {
3424 		BUG_ON(t->buffer->async_transaction != 0);
3425 		binder_inner_proc_lock(proc);
3426 		/*
3427 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3428 		 * userspace immediately; this allows the target process to
3429 		 * immediately start processing this transaction, reducing
3430 		 * latency. We will then return the TRANSACTION_COMPLETE when
3431 		 * the target replies (or there is an error).
3432 		 */
3433 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3434 		t->need_reply = 1;
3435 		t->from_parent = thread->transaction_stack;
3436 		thread->transaction_stack = t;
3437 		binder_inner_proc_unlock(proc);
3438 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3439 			binder_inner_proc_lock(proc);
3440 			binder_pop_transaction_ilocked(thread, t);
3441 			binder_inner_proc_unlock(proc);
3442 			goto err_dead_proc_or_thread;
3443 		}
3444 	} else {
3445 		BUG_ON(target_node == NULL);
3446 		BUG_ON(t->buffer->async_transaction != 1);
3447 		binder_enqueue_thread_work(thread, tcomplete);
3448 		if (!binder_proc_transaction(t, target_proc, NULL))
3449 			goto err_dead_proc_or_thread;
3450 	}
3451 	if (target_thread)
3452 		binder_thread_dec_tmpref(target_thread);
3453 	binder_proc_dec_tmpref(target_proc);
3454 	if (target_node)
3455 		binder_dec_node_tmpref(target_node);
3456 	/*
3457 	 * write barrier to synchronize with initialization
3458 	 * of log entry
3459 	 */
3460 	smp_wmb();
3461 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3462 	return;
3463 
3464 err_dead_proc_or_thread:
3465 	return_error = BR_DEAD_REPLY;
3466 	return_error_line = __LINE__;
3467 	binder_dequeue_work(proc, tcomplete);
3468 err_translate_failed:
3469 err_bad_object_type:
3470 err_bad_offset:
3471 err_bad_parent:
3472 err_copy_data_failed:
3473 	binder_free_txn_fixups(t);
3474 	trace_binder_transaction_failed_buffer_release(t->buffer);
3475 	binder_transaction_buffer_release(target_proc, t->buffer,
3476 					  buffer_offset, true);
3477 	if (target_node)
3478 		binder_dec_node_tmpref(target_node);
3479 	target_node = NULL;
3480 	t->buffer->transaction = NULL;
3481 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3482 err_binder_alloc_buf_failed:
3483 err_bad_extra_size:
3484 	if (secctx)
3485 		security_release_secctx(secctx, secctx_sz);
3486 err_get_secctx_failed:
3487 	kfree(tcomplete);
3488 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3489 err_alloc_tcomplete_failed:
3490 	kfree(t);
3491 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3492 err_alloc_t_failed:
3493 err_bad_todo_list:
3494 err_bad_call_stack:
3495 err_empty_call_stack:
3496 err_dead_binder:
3497 err_invalid_target_handle:
3498 	if (target_thread)
3499 		binder_thread_dec_tmpref(target_thread);
3500 	if (target_proc)
3501 		binder_proc_dec_tmpref(target_proc);
3502 	if (target_node) {
3503 		binder_dec_node(target_node, 1, 0);
3504 		binder_dec_node_tmpref(target_node);
3505 	}
3506 
3507 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3508 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3509 		     proc->pid, thread->pid, return_error, return_error_param,
3510 		     (u64)tr->data_size, (u64)tr->offsets_size,
3511 		     return_error_line);
3512 
3513 	{
3514 		struct binder_transaction_log_entry *fe;
3515 
3516 		e->return_error = return_error;
3517 		e->return_error_param = return_error_param;
3518 		e->return_error_line = return_error_line;
3519 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3520 		*fe = *e;
3521 		/*
3522 		 * write barrier to synchronize with initialization
3523 		 * of log entry
3524 		 */
3525 		smp_wmb();
3526 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3527 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3528 	}
3529 
3530 	BUG_ON(thread->return_error.cmd != BR_OK);
3531 	if (in_reply_to) {
3532 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3533 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3534 		binder_send_failed_reply(in_reply_to, return_error);
3535 	} else {
3536 		thread->return_error.cmd = return_error;
3537 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3538 	}
3539 }
3540 
3541 /**
3542  * binder_free_buf() - free the specified buffer
3543  * @proc:	binder proc that owns buffer
3544  * @buffer:	buffer to be freed
3545  *
3546  * If buffer for an async transaction, enqueue the next async
3547  * transaction from the node.
3548  *
3549  * Cleanup buffer and free it.
3550  */
3551 static void
3552 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3553 {
3554 	if (buffer->transaction) {
3555 		buffer->transaction->buffer = NULL;
3556 		buffer->transaction = NULL;
3557 	}
3558 	if (buffer->async_transaction && buffer->target_node) {
3559 		struct binder_node *buf_node;
3560 		struct binder_work *w;
3561 
3562 		buf_node = buffer->target_node;
3563 		binder_node_inner_lock(buf_node);
3564 		BUG_ON(!buf_node->has_async_transaction);
3565 		BUG_ON(buf_node->proc != proc);
3566 		w = binder_dequeue_work_head_ilocked(
3567 				&buf_node->async_todo);
3568 		if (!w) {
3569 			buf_node->has_async_transaction = false;
3570 		} else {
3571 			binder_enqueue_work_ilocked(
3572 					w, &proc->todo);
3573 			binder_wakeup_proc_ilocked(proc);
3574 		}
3575 		binder_node_inner_unlock(buf_node);
3576 	}
3577 	trace_binder_transaction_buffer_release(buffer);
3578 	binder_transaction_buffer_release(proc, buffer, 0, false);
3579 	binder_alloc_free_buf(&proc->alloc, buffer);
3580 }
3581 
3582 static int binder_thread_write(struct binder_proc *proc,
3583 			struct binder_thread *thread,
3584 			binder_uintptr_t binder_buffer, size_t size,
3585 			binder_size_t *consumed)
3586 {
3587 	uint32_t cmd;
3588 	struct binder_context *context = proc->context;
3589 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3590 	void __user *ptr = buffer + *consumed;
3591 	void __user *end = buffer + size;
3592 
3593 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3594 		int ret;
3595 
3596 		if (get_user(cmd, (uint32_t __user *)ptr))
3597 			return -EFAULT;
3598 		ptr += sizeof(uint32_t);
3599 		trace_binder_command(cmd);
3600 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3601 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3602 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3603 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3604 		}
3605 		switch (cmd) {
3606 		case BC_INCREFS:
3607 		case BC_ACQUIRE:
3608 		case BC_RELEASE:
3609 		case BC_DECREFS: {
3610 			uint32_t target;
3611 			const char *debug_string;
3612 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3613 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3614 			struct binder_ref_data rdata;
3615 
3616 			if (get_user(target, (uint32_t __user *)ptr))
3617 				return -EFAULT;
3618 
3619 			ptr += sizeof(uint32_t);
3620 			ret = -1;
3621 			if (increment && !target) {
3622 				struct binder_node *ctx_mgr_node;
3623 				mutex_lock(&context->context_mgr_node_lock);
3624 				ctx_mgr_node = context->binder_context_mgr_node;
3625 				if (ctx_mgr_node)
3626 					ret = binder_inc_ref_for_node(
3627 							proc, ctx_mgr_node,
3628 							strong, NULL, &rdata);
3629 				mutex_unlock(&context->context_mgr_node_lock);
3630 			}
3631 			if (ret)
3632 				ret = binder_update_ref_for_handle(
3633 						proc, target, increment, strong,
3634 						&rdata);
3635 			if (!ret && rdata.desc != target) {
3636 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3637 					proc->pid, thread->pid,
3638 					target, rdata.desc);
3639 			}
3640 			switch (cmd) {
3641 			case BC_INCREFS:
3642 				debug_string = "IncRefs";
3643 				break;
3644 			case BC_ACQUIRE:
3645 				debug_string = "Acquire";
3646 				break;
3647 			case BC_RELEASE:
3648 				debug_string = "Release";
3649 				break;
3650 			case BC_DECREFS:
3651 			default:
3652 				debug_string = "DecRefs";
3653 				break;
3654 			}
3655 			if (ret) {
3656 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3657 					proc->pid, thread->pid, debug_string,
3658 					strong, target, ret);
3659 				break;
3660 			}
3661 			binder_debug(BINDER_DEBUG_USER_REFS,
3662 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3663 				     proc->pid, thread->pid, debug_string,
3664 				     rdata.debug_id, rdata.desc, rdata.strong,
3665 				     rdata.weak);
3666 			break;
3667 		}
3668 		case BC_INCREFS_DONE:
3669 		case BC_ACQUIRE_DONE: {
3670 			binder_uintptr_t node_ptr;
3671 			binder_uintptr_t cookie;
3672 			struct binder_node *node;
3673 			bool free_node;
3674 
3675 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3676 				return -EFAULT;
3677 			ptr += sizeof(binder_uintptr_t);
3678 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3679 				return -EFAULT;
3680 			ptr += sizeof(binder_uintptr_t);
3681 			node = binder_get_node(proc, node_ptr);
3682 			if (node == NULL) {
3683 				binder_user_error("%d:%d %s u%016llx no match\n",
3684 					proc->pid, thread->pid,
3685 					cmd == BC_INCREFS_DONE ?
3686 					"BC_INCREFS_DONE" :
3687 					"BC_ACQUIRE_DONE",
3688 					(u64)node_ptr);
3689 				break;
3690 			}
3691 			if (cookie != node->cookie) {
3692 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3693 					proc->pid, thread->pid,
3694 					cmd == BC_INCREFS_DONE ?
3695 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3696 					(u64)node_ptr, node->debug_id,
3697 					(u64)cookie, (u64)node->cookie);
3698 				binder_put_node(node);
3699 				break;
3700 			}
3701 			binder_node_inner_lock(node);
3702 			if (cmd == BC_ACQUIRE_DONE) {
3703 				if (node->pending_strong_ref == 0) {
3704 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3705 						proc->pid, thread->pid,
3706 						node->debug_id);
3707 					binder_node_inner_unlock(node);
3708 					binder_put_node(node);
3709 					break;
3710 				}
3711 				node->pending_strong_ref = 0;
3712 			} else {
3713 				if (node->pending_weak_ref == 0) {
3714 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3715 						proc->pid, thread->pid,
3716 						node->debug_id);
3717 					binder_node_inner_unlock(node);
3718 					binder_put_node(node);
3719 					break;
3720 				}
3721 				node->pending_weak_ref = 0;
3722 			}
3723 			free_node = binder_dec_node_nilocked(node,
3724 					cmd == BC_ACQUIRE_DONE, 0);
3725 			WARN_ON(free_node);
3726 			binder_debug(BINDER_DEBUG_USER_REFS,
3727 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3728 				     proc->pid, thread->pid,
3729 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3730 				     node->debug_id, node->local_strong_refs,
3731 				     node->local_weak_refs, node->tmp_refs);
3732 			binder_node_inner_unlock(node);
3733 			binder_put_node(node);
3734 			break;
3735 		}
3736 		case BC_ATTEMPT_ACQUIRE:
3737 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3738 			return -EINVAL;
3739 		case BC_ACQUIRE_RESULT:
3740 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3741 			return -EINVAL;
3742 
3743 		case BC_FREE_BUFFER: {
3744 			binder_uintptr_t data_ptr;
3745 			struct binder_buffer *buffer;
3746 
3747 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3748 				return -EFAULT;
3749 			ptr += sizeof(binder_uintptr_t);
3750 
3751 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3752 							      data_ptr);
3753 			if (IS_ERR_OR_NULL(buffer)) {
3754 				if (PTR_ERR(buffer) == -EPERM) {
3755 					binder_user_error(
3756 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3757 						proc->pid, thread->pid,
3758 						(u64)data_ptr);
3759 				} else {
3760 					binder_user_error(
3761 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3762 						proc->pid, thread->pid,
3763 						(u64)data_ptr);
3764 				}
3765 				break;
3766 			}
3767 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3768 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3769 				     proc->pid, thread->pid, (u64)data_ptr,
3770 				     buffer->debug_id,
3771 				     buffer->transaction ? "active" : "finished");
3772 			binder_free_buf(proc, buffer);
3773 			break;
3774 		}
3775 
3776 		case BC_TRANSACTION_SG:
3777 		case BC_REPLY_SG: {
3778 			struct binder_transaction_data_sg tr;
3779 
3780 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3781 				return -EFAULT;
3782 			ptr += sizeof(tr);
3783 			binder_transaction(proc, thread, &tr.transaction_data,
3784 					   cmd == BC_REPLY_SG, tr.buffers_size);
3785 			break;
3786 		}
3787 		case BC_TRANSACTION:
3788 		case BC_REPLY: {
3789 			struct binder_transaction_data tr;
3790 
3791 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3792 				return -EFAULT;
3793 			ptr += sizeof(tr);
3794 			binder_transaction(proc, thread, &tr,
3795 					   cmd == BC_REPLY, 0);
3796 			break;
3797 		}
3798 
3799 		case BC_REGISTER_LOOPER:
3800 			binder_debug(BINDER_DEBUG_THREADS,
3801 				     "%d:%d BC_REGISTER_LOOPER\n",
3802 				     proc->pid, thread->pid);
3803 			binder_inner_proc_lock(proc);
3804 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3805 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3806 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3807 					proc->pid, thread->pid);
3808 			} else if (proc->requested_threads == 0) {
3809 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3810 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3811 					proc->pid, thread->pid);
3812 			} else {
3813 				proc->requested_threads--;
3814 				proc->requested_threads_started++;
3815 			}
3816 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3817 			binder_inner_proc_unlock(proc);
3818 			break;
3819 		case BC_ENTER_LOOPER:
3820 			binder_debug(BINDER_DEBUG_THREADS,
3821 				     "%d:%d BC_ENTER_LOOPER\n",
3822 				     proc->pid, thread->pid);
3823 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3824 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3825 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3826 					proc->pid, thread->pid);
3827 			}
3828 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3829 			break;
3830 		case BC_EXIT_LOOPER:
3831 			binder_debug(BINDER_DEBUG_THREADS,
3832 				     "%d:%d BC_EXIT_LOOPER\n",
3833 				     proc->pid, thread->pid);
3834 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3835 			break;
3836 
3837 		case BC_REQUEST_DEATH_NOTIFICATION:
3838 		case BC_CLEAR_DEATH_NOTIFICATION: {
3839 			uint32_t target;
3840 			binder_uintptr_t cookie;
3841 			struct binder_ref *ref;
3842 			struct binder_ref_death *death = NULL;
3843 
3844 			if (get_user(target, (uint32_t __user *)ptr))
3845 				return -EFAULT;
3846 			ptr += sizeof(uint32_t);
3847 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3848 				return -EFAULT;
3849 			ptr += sizeof(binder_uintptr_t);
3850 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3851 				/*
3852 				 * Allocate memory for death notification
3853 				 * before taking lock
3854 				 */
3855 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3856 				if (death == NULL) {
3857 					WARN_ON(thread->return_error.cmd !=
3858 						BR_OK);
3859 					thread->return_error.cmd = BR_ERROR;
3860 					binder_enqueue_thread_work(
3861 						thread,
3862 						&thread->return_error.work);
3863 					binder_debug(
3864 						BINDER_DEBUG_FAILED_TRANSACTION,
3865 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3866 						proc->pid, thread->pid);
3867 					break;
3868 				}
3869 			}
3870 			binder_proc_lock(proc);
3871 			ref = binder_get_ref_olocked(proc, target, false);
3872 			if (ref == NULL) {
3873 				binder_user_error("%d:%d %s invalid ref %d\n",
3874 					proc->pid, thread->pid,
3875 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3876 					"BC_REQUEST_DEATH_NOTIFICATION" :
3877 					"BC_CLEAR_DEATH_NOTIFICATION",
3878 					target);
3879 				binder_proc_unlock(proc);
3880 				kfree(death);
3881 				break;
3882 			}
3883 
3884 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3885 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3886 				     proc->pid, thread->pid,
3887 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3888 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3889 				     "BC_CLEAR_DEATH_NOTIFICATION",
3890 				     (u64)cookie, ref->data.debug_id,
3891 				     ref->data.desc, ref->data.strong,
3892 				     ref->data.weak, ref->node->debug_id);
3893 
3894 			binder_node_lock(ref->node);
3895 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3896 				if (ref->death) {
3897 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3898 						proc->pid, thread->pid);
3899 					binder_node_unlock(ref->node);
3900 					binder_proc_unlock(proc);
3901 					kfree(death);
3902 					break;
3903 				}
3904 				binder_stats_created(BINDER_STAT_DEATH);
3905 				INIT_LIST_HEAD(&death->work.entry);
3906 				death->cookie = cookie;
3907 				ref->death = death;
3908 				if (ref->node->proc == NULL) {
3909 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3910 
3911 					binder_inner_proc_lock(proc);
3912 					binder_enqueue_work_ilocked(
3913 						&ref->death->work, &proc->todo);
3914 					binder_wakeup_proc_ilocked(proc);
3915 					binder_inner_proc_unlock(proc);
3916 				}
3917 			} else {
3918 				if (ref->death == NULL) {
3919 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3920 						proc->pid, thread->pid);
3921 					binder_node_unlock(ref->node);
3922 					binder_proc_unlock(proc);
3923 					break;
3924 				}
3925 				death = ref->death;
3926 				if (death->cookie != cookie) {
3927 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3928 						proc->pid, thread->pid,
3929 						(u64)death->cookie,
3930 						(u64)cookie);
3931 					binder_node_unlock(ref->node);
3932 					binder_proc_unlock(proc);
3933 					break;
3934 				}
3935 				ref->death = NULL;
3936 				binder_inner_proc_lock(proc);
3937 				if (list_empty(&death->work.entry)) {
3938 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3939 					if (thread->looper &
3940 					    (BINDER_LOOPER_STATE_REGISTERED |
3941 					     BINDER_LOOPER_STATE_ENTERED))
3942 						binder_enqueue_thread_work_ilocked(
3943 								thread,
3944 								&death->work);
3945 					else {
3946 						binder_enqueue_work_ilocked(
3947 								&death->work,
3948 								&proc->todo);
3949 						binder_wakeup_proc_ilocked(
3950 								proc);
3951 					}
3952 				} else {
3953 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3954 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3955 				}
3956 				binder_inner_proc_unlock(proc);
3957 			}
3958 			binder_node_unlock(ref->node);
3959 			binder_proc_unlock(proc);
3960 		} break;
3961 		case BC_DEAD_BINDER_DONE: {
3962 			struct binder_work *w;
3963 			binder_uintptr_t cookie;
3964 			struct binder_ref_death *death = NULL;
3965 
3966 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3967 				return -EFAULT;
3968 
3969 			ptr += sizeof(cookie);
3970 			binder_inner_proc_lock(proc);
3971 			list_for_each_entry(w, &proc->delivered_death,
3972 					    entry) {
3973 				struct binder_ref_death *tmp_death =
3974 					container_of(w,
3975 						     struct binder_ref_death,
3976 						     work);
3977 
3978 				if (tmp_death->cookie == cookie) {
3979 					death = tmp_death;
3980 					break;
3981 				}
3982 			}
3983 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3984 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3985 				     proc->pid, thread->pid, (u64)cookie,
3986 				     death);
3987 			if (death == NULL) {
3988 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3989 					proc->pid, thread->pid, (u64)cookie);
3990 				binder_inner_proc_unlock(proc);
3991 				break;
3992 			}
3993 			binder_dequeue_work_ilocked(&death->work);
3994 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3995 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3996 				if (thread->looper &
3997 					(BINDER_LOOPER_STATE_REGISTERED |
3998 					 BINDER_LOOPER_STATE_ENTERED))
3999 					binder_enqueue_thread_work_ilocked(
4000 						thread, &death->work);
4001 				else {
4002 					binder_enqueue_work_ilocked(
4003 							&death->work,
4004 							&proc->todo);
4005 					binder_wakeup_proc_ilocked(proc);
4006 				}
4007 			}
4008 			binder_inner_proc_unlock(proc);
4009 		} break;
4010 
4011 		default:
4012 			pr_err("%d:%d unknown command %d\n",
4013 			       proc->pid, thread->pid, cmd);
4014 			return -EINVAL;
4015 		}
4016 		*consumed = ptr - buffer;
4017 	}
4018 	return 0;
4019 }
4020 
4021 static void binder_stat_br(struct binder_proc *proc,
4022 			   struct binder_thread *thread, uint32_t cmd)
4023 {
4024 	trace_binder_return(cmd);
4025 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4026 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4027 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4028 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4029 	}
4030 }
4031 
4032 static int binder_put_node_cmd(struct binder_proc *proc,
4033 			       struct binder_thread *thread,
4034 			       void __user **ptrp,
4035 			       binder_uintptr_t node_ptr,
4036 			       binder_uintptr_t node_cookie,
4037 			       int node_debug_id,
4038 			       uint32_t cmd, const char *cmd_name)
4039 {
4040 	void __user *ptr = *ptrp;
4041 
4042 	if (put_user(cmd, (uint32_t __user *)ptr))
4043 		return -EFAULT;
4044 	ptr += sizeof(uint32_t);
4045 
4046 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4047 		return -EFAULT;
4048 	ptr += sizeof(binder_uintptr_t);
4049 
4050 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4051 		return -EFAULT;
4052 	ptr += sizeof(binder_uintptr_t);
4053 
4054 	binder_stat_br(proc, thread, cmd);
4055 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4056 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4057 		     (u64)node_ptr, (u64)node_cookie);
4058 
4059 	*ptrp = ptr;
4060 	return 0;
4061 }
4062 
4063 static int binder_wait_for_work(struct binder_thread *thread,
4064 				bool do_proc_work)
4065 {
4066 	DEFINE_WAIT(wait);
4067 	struct binder_proc *proc = thread->proc;
4068 	int ret = 0;
4069 
4070 	freezer_do_not_count();
4071 	binder_inner_proc_lock(proc);
4072 	for (;;) {
4073 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4074 		if (binder_has_work_ilocked(thread, do_proc_work))
4075 			break;
4076 		if (do_proc_work)
4077 			list_add(&thread->waiting_thread_node,
4078 				 &proc->waiting_threads);
4079 		binder_inner_proc_unlock(proc);
4080 		schedule();
4081 		binder_inner_proc_lock(proc);
4082 		list_del_init(&thread->waiting_thread_node);
4083 		if (signal_pending(current)) {
4084 			ret = -ERESTARTSYS;
4085 			break;
4086 		}
4087 	}
4088 	finish_wait(&thread->wait, &wait);
4089 	binder_inner_proc_unlock(proc);
4090 	freezer_count();
4091 
4092 	return ret;
4093 }
4094 
4095 /**
4096  * binder_apply_fd_fixups() - finish fd translation
4097  * @proc:         binder_proc associated @t->buffer
4098  * @t:	binder transaction with list of fd fixups
4099  *
4100  * Now that we are in the context of the transaction target
4101  * process, we can allocate and install fds. Process the
4102  * list of fds to translate and fixup the buffer with the
4103  * new fds.
4104  *
4105  * If we fail to allocate an fd, then free the resources by
4106  * fput'ing files that have not been processed and ksys_close'ing
4107  * any fds that have already been allocated.
4108  */
4109 static int binder_apply_fd_fixups(struct binder_proc *proc,
4110 				  struct binder_transaction *t)
4111 {
4112 	struct binder_txn_fd_fixup *fixup, *tmp;
4113 	int ret = 0;
4114 
4115 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4116 		int fd = get_unused_fd_flags(O_CLOEXEC);
4117 
4118 		if (fd < 0) {
4119 			binder_debug(BINDER_DEBUG_TRANSACTION,
4120 				     "failed fd fixup txn %d fd %d\n",
4121 				     t->debug_id, fd);
4122 			ret = -ENOMEM;
4123 			break;
4124 		}
4125 		binder_debug(BINDER_DEBUG_TRANSACTION,
4126 			     "fd fixup txn %d fd %d\n",
4127 			     t->debug_id, fd);
4128 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4129 		fd_install(fd, fixup->file);
4130 		fixup->file = NULL;
4131 		binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4132 					    fixup->offset, &fd,
4133 					    sizeof(u32));
4134 	}
4135 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4136 		if (fixup->file) {
4137 			fput(fixup->file);
4138 		} else if (ret) {
4139 			u32 fd;
4140 
4141 			binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4142 						      t->buffer, fixup->offset,
4143 						      sizeof(fd));
4144 			binder_deferred_fd_close(fd);
4145 		}
4146 		list_del(&fixup->fixup_entry);
4147 		kfree(fixup);
4148 	}
4149 
4150 	return ret;
4151 }
4152 
4153 static int binder_thread_read(struct binder_proc *proc,
4154 			      struct binder_thread *thread,
4155 			      binder_uintptr_t binder_buffer, size_t size,
4156 			      binder_size_t *consumed, int non_block)
4157 {
4158 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4159 	void __user *ptr = buffer + *consumed;
4160 	void __user *end = buffer + size;
4161 
4162 	int ret = 0;
4163 	int wait_for_proc_work;
4164 
4165 	if (*consumed == 0) {
4166 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4167 			return -EFAULT;
4168 		ptr += sizeof(uint32_t);
4169 	}
4170 
4171 retry:
4172 	binder_inner_proc_lock(proc);
4173 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4174 	binder_inner_proc_unlock(proc);
4175 
4176 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4177 
4178 	trace_binder_wait_for_work(wait_for_proc_work,
4179 				   !!thread->transaction_stack,
4180 				   !binder_worklist_empty(proc, &thread->todo));
4181 	if (wait_for_proc_work) {
4182 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4183 					BINDER_LOOPER_STATE_ENTERED))) {
4184 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4185 				proc->pid, thread->pid, thread->looper);
4186 			wait_event_interruptible(binder_user_error_wait,
4187 						 binder_stop_on_user_error < 2);
4188 		}
4189 		binder_set_nice(proc->default_priority);
4190 	}
4191 
4192 	if (non_block) {
4193 		if (!binder_has_work(thread, wait_for_proc_work))
4194 			ret = -EAGAIN;
4195 	} else {
4196 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4197 	}
4198 
4199 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4200 
4201 	if (ret)
4202 		return ret;
4203 
4204 	while (1) {
4205 		uint32_t cmd;
4206 		struct binder_transaction_data_secctx tr;
4207 		struct binder_transaction_data *trd = &tr.transaction_data;
4208 		struct binder_work *w = NULL;
4209 		struct list_head *list = NULL;
4210 		struct binder_transaction *t = NULL;
4211 		struct binder_thread *t_from;
4212 		size_t trsize = sizeof(*trd);
4213 
4214 		binder_inner_proc_lock(proc);
4215 		if (!binder_worklist_empty_ilocked(&thread->todo))
4216 			list = &thread->todo;
4217 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4218 			   wait_for_proc_work)
4219 			list = &proc->todo;
4220 		else {
4221 			binder_inner_proc_unlock(proc);
4222 
4223 			/* no data added */
4224 			if (ptr - buffer == 4 && !thread->looper_need_return)
4225 				goto retry;
4226 			break;
4227 		}
4228 
4229 		if (end - ptr < sizeof(tr) + 4) {
4230 			binder_inner_proc_unlock(proc);
4231 			break;
4232 		}
4233 		w = binder_dequeue_work_head_ilocked(list);
4234 		if (binder_worklist_empty_ilocked(&thread->todo))
4235 			thread->process_todo = false;
4236 
4237 		switch (w->type) {
4238 		case BINDER_WORK_TRANSACTION: {
4239 			binder_inner_proc_unlock(proc);
4240 			t = container_of(w, struct binder_transaction, work);
4241 		} break;
4242 		case BINDER_WORK_RETURN_ERROR: {
4243 			struct binder_error *e = container_of(
4244 					w, struct binder_error, work);
4245 
4246 			WARN_ON(e->cmd == BR_OK);
4247 			binder_inner_proc_unlock(proc);
4248 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4249 				return -EFAULT;
4250 			cmd = e->cmd;
4251 			e->cmd = BR_OK;
4252 			ptr += sizeof(uint32_t);
4253 
4254 			binder_stat_br(proc, thread, cmd);
4255 		} break;
4256 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4257 			binder_inner_proc_unlock(proc);
4258 			cmd = BR_TRANSACTION_COMPLETE;
4259 			if (put_user(cmd, (uint32_t __user *)ptr))
4260 				return -EFAULT;
4261 			ptr += sizeof(uint32_t);
4262 
4263 			binder_stat_br(proc, thread, cmd);
4264 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4265 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4266 				     proc->pid, thread->pid);
4267 			kfree(w);
4268 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4269 		} break;
4270 		case BINDER_WORK_NODE: {
4271 			struct binder_node *node = container_of(w, struct binder_node, work);
4272 			int strong, weak;
4273 			binder_uintptr_t node_ptr = node->ptr;
4274 			binder_uintptr_t node_cookie = node->cookie;
4275 			int node_debug_id = node->debug_id;
4276 			int has_weak_ref;
4277 			int has_strong_ref;
4278 			void __user *orig_ptr = ptr;
4279 
4280 			BUG_ON(proc != node->proc);
4281 			strong = node->internal_strong_refs ||
4282 					node->local_strong_refs;
4283 			weak = !hlist_empty(&node->refs) ||
4284 					node->local_weak_refs ||
4285 					node->tmp_refs || strong;
4286 			has_strong_ref = node->has_strong_ref;
4287 			has_weak_ref = node->has_weak_ref;
4288 
4289 			if (weak && !has_weak_ref) {
4290 				node->has_weak_ref = 1;
4291 				node->pending_weak_ref = 1;
4292 				node->local_weak_refs++;
4293 			}
4294 			if (strong && !has_strong_ref) {
4295 				node->has_strong_ref = 1;
4296 				node->pending_strong_ref = 1;
4297 				node->local_strong_refs++;
4298 			}
4299 			if (!strong && has_strong_ref)
4300 				node->has_strong_ref = 0;
4301 			if (!weak && has_weak_ref)
4302 				node->has_weak_ref = 0;
4303 			if (!weak && !strong) {
4304 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4305 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4306 					     proc->pid, thread->pid,
4307 					     node_debug_id,
4308 					     (u64)node_ptr,
4309 					     (u64)node_cookie);
4310 				rb_erase(&node->rb_node, &proc->nodes);
4311 				binder_inner_proc_unlock(proc);
4312 				binder_node_lock(node);
4313 				/*
4314 				 * Acquire the node lock before freeing the
4315 				 * node to serialize with other threads that
4316 				 * may have been holding the node lock while
4317 				 * decrementing this node (avoids race where
4318 				 * this thread frees while the other thread
4319 				 * is unlocking the node after the final
4320 				 * decrement)
4321 				 */
4322 				binder_node_unlock(node);
4323 				binder_free_node(node);
4324 			} else
4325 				binder_inner_proc_unlock(proc);
4326 
4327 			if (weak && !has_weak_ref)
4328 				ret = binder_put_node_cmd(
4329 						proc, thread, &ptr, node_ptr,
4330 						node_cookie, node_debug_id,
4331 						BR_INCREFS, "BR_INCREFS");
4332 			if (!ret && strong && !has_strong_ref)
4333 				ret = binder_put_node_cmd(
4334 						proc, thread, &ptr, node_ptr,
4335 						node_cookie, node_debug_id,
4336 						BR_ACQUIRE, "BR_ACQUIRE");
4337 			if (!ret && !strong && has_strong_ref)
4338 				ret = binder_put_node_cmd(
4339 						proc, thread, &ptr, node_ptr,
4340 						node_cookie, node_debug_id,
4341 						BR_RELEASE, "BR_RELEASE");
4342 			if (!ret && !weak && has_weak_ref)
4343 				ret = binder_put_node_cmd(
4344 						proc, thread, &ptr, node_ptr,
4345 						node_cookie, node_debug_id,
4346 						BR_DECREFS, "BR_DECREFS");
4347 			if (orig_ptr == ptr)
4348 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4349 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4350 					     proc->pid, thread->pid,
4351 					     node_debug_id,
4352 					     (u64)node_ptr,
4353 					     (u64)node_cookie);
4354 			if (ret)
4355 				return ret;
4356 		} break;
4357 		case BINDER_WORK_DEAD_BINDER:
4358 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4359 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4360 			struct binder_ref_death *death;
4361 			uint32_t cmd;
4362 			binder_uintptr_t cookie;
4363 
4364 			death = container_of(w, struct binder_ref_death, work);
4365 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4366 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4367 			else
4368 				cmd = BR_DEAD_BINDER;
4369 			cookie = death->cookie;
4370 
4371 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4372 				     "%d:%d %s %016llx\n",
4373 				      proc->pid, thread->pid,
4374 				      cmd == BR_DEAD_BINDER ?
4375 				      "BR_DEAD_BINDER" :
4376 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4377 				      (u64)cookie);
4378 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4379 				binder_inner_proc_unlock(proc);
4380 				kfree(death);
4381 				binder_stats_deleted(BINDER_STAT_DEATH);
4382 			} else {
4383 				binder_enqueue_work_ilocked(
4384 						w, &proc->delivered_death);
4385 				binder_inner_proc_unlock(proc);
4386 			}
4387 			if (put_user(cmd, (uint32_t __user *)ptr))
4388 				return -EFAULT;
4389 			ptr += sizeof(uint32_t);
4390 			if (put_user(cookie,
4391 				     (binder_uintptr_t __user *)ptr))
4392 				return -EFAULT;
4393 			ptr += sizeof(binder_uintptr_t);
4394 			binder_stat_br(proc, thread, cmd);
4395 			if (cmd == BR_DEAD_BINDER)
4396 				goto done; /* DEAD_BINDER notifications can cause transactions */
4397 		} break;
4398 		default:
4399 			binder_inner_proc_unlock(proc);
4400 			pr_err("%d:%d: bad work type %d\n",
4401 			       proc->pid, thread->pid, w->type);
4402 			break;
4403 		}
4404 
4405 		if (!t)
4406 			continue;
4407 
4408 		BUG_ON(t->buffer == NULL);
4409 		if (t->buffer->target_node) {
4410 			struct binder_node *target_node = t->buffer->target_node;
4411 
4412 			trd->target.ptr = target_node->ptr;
4413 			trd->cookie =  target_node->cookie;
4414 			t->saved_priority = task_nice(current);
4415 			if (t->priority < target_node->min_priority &&
4416 			    !(t->flags & TF_ONE_WAY))
4417 				binder_set_nice(t->priority);
4418 			else if (!(t->flags & TF_ONE_WAY) ||
4419 				 t->saved_priority > target_node->min_priority)
4420 				binder_set_nice(target_node->min_priority);
4421 			cmd = BR_TRANSACTION;
4422 		} else {
4423 			trd->target.ptr = 0;
4424 			trd->cookie = 0;
4425 			cmd = BR_REPLY;
4426 		}
4427 		trd->code = t->code;
4428 		trd->flags = t->flags;
4429 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4430 
4431 		t_from = binder_get_txn_from(t);
4432 		if (t_from) {
4433 			struct task_struct *sender = t_from->proc->tsk;
4434 
4435 			trd->sender_pid =
4436 				task_tgid_nr_ns(sender,
4437 						task_active_pid_ns(current));
4438 		} else {
4439 			trd->sender_pid = 0;
4440 		}
4441 
4442 		ret = binder_apply_fd_fixups(proc, t);
4443 		if (ret) {
4444 			struct binder_buffer *buffer = t->buffer;
4445 			bool oneway = !!(t->flags & TF_ONE_WAY);
4446 			int tid = t->debug_id;
4447 
4448 			if (t_from)
4449 				binder_thread_dec_tmpref(t_from);
4450 			buffer->transaction = NULL;
4451 			binder_cleanup_transaction(t, "fd fixups failed",
4452 						   BR_FAILED_REPLY);
4453 			binder_free_buf(proc, buffer);
4454 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4455 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4456 				     proc->pid, thread->pid,
4457 				     oneway ? "async " :
4458 					(cmd == BR_REPLY ? "reply " : ""),
4459 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4460 			if (cmd == BR_REPLY) {
4461 				cmd = BR_FAILED_REPLY;
4462 				if (put_user(cmd, (uint32_t __user *)ptr))
4463 					return -EFAULT;
4464 				ptr += sizeof(uint32_t);
4465 				binder_stat_br(proc, thread, cmd);
4466 				break;
4467 			}
4468 			continue;
4469 		}
4470 		trd->data_size = t->buffer->data_size;
4471 		trd->offsets_size = t->buffer->offsets_size;
4472 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4473 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4474 					ALIGN(t->buffer->data_size,
4475 					    sizeof(void *));
4476 
4477 		tr.secctx = t->security_ctx;
4478 		if (t->security_ctx) {
4479 			cmd = BR_TRANSACTION_SEC_CTX;
4480 			trsize = sizeof(tr);
4481 		}
4482 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4483 			if (t_from)
4484 				binder_thread_dec_tmpref(t_from);
4485 
4486 			binder_cleanup_transaction(t, "put_user failed",
4487 						   BR_FAILED_REPLY);
4488 
4489 			return -EFAULT;
4490 		}
4491 		ptr += sizeof(uint32_t);
4492 		if (copy_to_user(ptr, &tr, trsize)) {
4493 			if (t_from)
4494 				binder_thread_dec_tmpref(t_from);
4495 
4496 			binder_cleanup_transaction(t, "copy_to_user failed",
4497 						   BR_FAILED_REPLY);
4498 
4499 			return -EFAULT;
4500 		}
4501 		ptr += trsize;
4502 
4503 		trace_binder_transaction_received(t);
4504 		binder_stat_br(proc, thread, cmd);
4505 		binder_debug(BINDER_DEBUG_TRANSACTION,
4506 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4507 			     proc->pid, thread->pid,
4508 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4509 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4510 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4511 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4512 			     t_from ? t_from->pid : 0, cmd,
4513 			     t->buffer->data_size, t->buffer->offsets_size,
4514 			     (u64)trd->data.ptr.buffer,
4515 			     (u64)trd->data.ptr.offsets);
4516 
4517 		if (t_from)
4518 			binder_thread_dec_tmpref(t_from);
4519 		t->buffer->allow_user_free = 1;
4520 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4521 			binder_inner_proc_lock(thread->proc);
4522 			t->to_parent = thread->transaction_stack;
4523 			t->to_thread = thread;
4524 			thread->transaction_stack = t;
4525 			binder_inner_proc_unlock(thread->proc);
4526 		} else {
4527 			binder_free_transaction(t);
4528 		}
4529 		break;
4530 	}
4531 
4532 done:
4533 
4534 	*consumed = ptr - buffer;
4535 	binder_inner_proc_lock(proc);
4536 	if (proc->requested_threads == 0 &&
4537 	    list_empty(&thread->proc->waiting_threads) &&
4538 	    proc->requested_threads_started < proc->max_threads &&
4539 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4540 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4541 	     /*spawn a new thread if we leave this out */) {
4542 		proc->requested_threads++;
4543 		binder_inner_proc_unlock(proc);
4544 		binder_debug(BINDER_DEBUG_THREADS,
4545 			     "%d:%d BR_SPAWN_LOOPER\n",
4546 			     proc->pid, thread->pid);
4547 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4548 			return -EFAULT;
4549 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4550 	} else
4551 		binder_inner_proc_unlock(proc);
4552 	return 0;
4553 }
4554 
4555 static void binder_release_work(struct binder_proc *proc,
4556 				struct list_head *list)
4557 {
4558 	struct binder_work *w;
4559 
4560 	while (1) {
4561 		w = binder_dequeue_work_head(proc, list);
4562 		if (!w)
4563 			return;
4564 
4565 		switch (w->type) {
4566 		case BINDER_WORK_TRANSACTION: {
4567 			struct binder_transaction *t;
4568 
4569 			t = container_of(w, struct binder_transaction, work);
4570 
4571 			binder_cleanup_transaction(t, "process died.",
4572 						   BR_DEAD_REPLY);
4573 		} break;
4574 		case BINDER_WORK_RETURN_ERROR: {
4575 			struct binder_error *e = container_of(
4576 					w, struct binder_error, work);
4577 
4578 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4579 				"undelivered TRANSACTION_ERROR: %u\n",
4580 				e->cmd);
4581 		} break;
4582 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4583 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4584 				"undelivered TRANSACTION_COMPLETE\n");
4585 			kfree(w);
4586 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4587 		} break;
4588 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4589 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4590 			struct binder_ref_death *death;
4591 
4592 			death = container_of(w, struct binder_ref_death, work);
4593 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4594 				"undelivered death notification, %016llx\n",
4595 				(u64)death->cookie);
4596 			kfree(death);
4597 			binder_stats_deleted(BINDER_STAT_DEATH);
4598 		} break;
4599 		default:
4600 			pr_err("unexpected work type, %d, not freed\n",
4601 			       w->type);
4602 			break;
4603 		}
4604 	}
4605 
4606 }
4607 
4608 static struct binder_thread *binder_get_thread_ilocked(
4609 		struct binder_proc *proc, struct binder_thread *new_thread)
4610 {
4611 	struct binder_thread *thread = NULL;
4612 	struct rb_node *parent = NULL;
4613 	struct rb_node **p = &proc->threads.rb_node;
4614 
4615 	while (*p) {
4616 		parent = *p;
4617 		thread = rb_entry(parent, struct binder_thread, rb_node);
4618 
4619 		if (current->pid < thread->pid)
4620 			p = &(*p)->rb_left;
4621 		else if (current->pid > thread->pid)
4622 			p = &(*p)->rb_right;
4623 		else
4624 			return thread;
4625 	}
4626 	if (!new_thread)
4627 		return NULL;
4628 	thread = new_thread;
4629 	binder_stats_created(BINDER_STAT_THREAD);
4630 	thread->proc = proc;
4631 	thread->pid = current->pid;
4632 	atomic_set(&thread->tmp_ref, 0);
4633 	init_waitqueue_head(&thread->wait);
4634 	INIT_LIST_HEAD(&thread->todo);
4635 	rb_link_node(&thread->rb_node, parent, p);
4636 	rb_insert_color(&thread->rb_node, &proc->threads);
4637 	thread->looper_need_return = true;
4638 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4639 	thread->return_error.cmd = BR_OK;
4640 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4641 	thread->reply_error.cmd = BR_OK;
4642 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4643 	return thread;
4644 }
4645 
4646 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4647 {
4648 	struct binder_thread *thread;
4649 	struct binder_thread *new_thread;
4650 
4651 	binder_inner_proc_lock(proc);
4652 	thread = binder_get_thread_ilocked(proc, NULL);
4653 	binder_inner_proc_unlock(proc);
4654 	if (!thread) {
4655 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4656 		if (new_thread == NULL)
4657 			return NULL;
4658 		binder_inner_proc_lock(proc);
4659 		thread = binder_get_thread_ilocked(proc, new_thread);
4660 		binder_inner_proc_unlock(proc);
4661 		if (thread != new_thread)
4662 			kfree(new_thread);
4663 	}
4664 	return thread;
4665 }
4666 
4667 static void binder_free_proc(struct binder_proc *proc)
4668 {
4669 	BUG_ON(!list_empty(&proc->todo));
4670 	BUG_ON(!list_empty(&proc->delivered_death));
4671 	binder_alloc_deferred_release(&proc->alloc);
4672 	put_task_struct(proc->tsk);
4673 	binder_stats_deleted(BINDER_STAT_PROC);
4674 	kfree(proc);
4675 }
4676 
4677 static void binder_free_thread(struct binder_thread *thread)
4678 {
4679 	BUG_ON(!list_empty(&thread->todo));
4680 	binder_stats_deleted(BINDER_STAT_THREAD);
4681 	binder_proc_dec_tmpref(thread->proc);
4682 	kfree(thread);
4683 }
4684 
4685 static int binder_thread_release(struct binder_proc *proc,
4686 				 struct binder_thread *thread)
4687 {
4688 	struct binder_transaction *t;
4689 	struct binder_transaction *send_reply = NULL;
4690 	int active_transactions = 0;
4691 	struct binder_transaction *last_t = NULL;
4692 
4693 	binder_inner_proc_lock(thread->proc);
4694 	/*
4695 	 * take a ref on the proc so it survives
4696 	 * after we remove this thread from proc->threads.
4697 	 * The corresponding dec is when we actually
4698 	 * free the thread in binder_free_thread()
4699 	 */
4700 	proc->tmp_ref++;
4701 	/*
4702 	 * take a ref on this thread to ensure it
4703 	 * survives while we are releasing it
4704 	 */
4705 	atomic_inc(&thread->tmp_ref);
4706 	rb_erase(&thread->rb_node, &proc->threads);
4707 	t = thread->transaction_stack;
4708 	if (t) {
4709 		spin_lock(&t->lock);
4710 		if (t->to_thread == thread)
4711 			send_reply = t;
4712 	} else {
4713 		__acquire(&t->lock);
4714 	}
4715 	thread->is_dead = true;
4716 
4717 	while (t) {
4718 		last_t = t;
4719 		active_transactions++;
4720 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4721 			     "release %d:%d transaction %d %s, still active\n",
4722 			      proc->pid, thread->pid,
4723 			     t->debug_id,
4724 			     (t->to_thread == thread) ? "in" : "out");
4725 
4726 		if (t->to_thread == thread) {
4727 			t->to_proc = NULL;
4728 			t->to_thread = NULL;
4729 			if (t->buffer) {
4730 				t->buffer->transaction = NULL;
4731 				t->buffer = NULL;
4732 			}
4733 			t = t->to_parent;
4734 		} else if (t->from == thread) {
4735 			t->from = NULL;
4736 			t = t->from_parent;
4737 		} else
4738 			BUG();
4739 		spin_unlock(&last_t->lock);
4740 		if (t)
4741 			spin_lock(&t->lock);
4742 		else
4743 			__acquire(&t->lock);
4744 	}
4745 	/* annotation for sparse, lock not acquired in last iteration above */
4746 	__release(&t->lock);
4747 
4748 	/*
4749 	 * If this thread used poll, make sure we remove the waitqueue
4750 	 * from any epoll data structures holding it with POLLFREE.
4751 	 * waitqueue_active() is safe to use here because we're holding
4752 	 * the inner lock.
4753 	 */
4754 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4755 	    waitqueue_active(&thread->wait)) {
4756 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4757 	}
4758 
4759 	binder_inner_proc_unlock(thread->proc);
4760 
4761 	/*
4762 	 * This is needed to avoid races between wake_up_poll() above and
4763 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4764 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4765 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4766 	 */
4767 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4768 		synchronize_rcu();
4769 
4770 	if (send_reply)
4771 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4772 	binder_release_work(proc, &thread->todo);
4773 	binder_thread_dec_tmpref(thread);
4774 	return active_transactions;
4775 }
4776 
4777 static __poll_t binder_poll(struct file *filp,
4778 				struct poll_table_struct *wait)
4779 {
4780 	struct binder_proc *proc = filp->private_data;
4781 	struct binder_thread *thread = NULL;
4782 	bool wait_for_proc_work;
4783 
4784 	thread = binder_get_thread(proc);
4785 	if (!thread)
4786 		return POLLERR;
4787 
4788 	binder_inner_proc_lock(thread->proc);
4789 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4790 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4791 
4792 	binder_inner_proc_unlock(thread->proc);
4793 
4794 	poll_wait(filp, &thread->wait, wait);
4795 
4796 	if (binder_has_work(thread, wait_for_proc_work))
4797 		return EPOLLIN;
4798 
4799 	return 0;
4800 }
4801 
4802 static int binder_ioctl_write_read(struct file *filp,
4803 				unsigned int cmd, unsigned long arg,
4804 				struct binder_thread *thread)
4805 {
4806 	int ret = 0;
4807 	struct binder_proc *proc = filp->private_data;
4808 	unsigned int size = _IOC_SIZE(cmd);
4809 	void __user *ubuf = (void __user *)arg;
4810 	struct binder_write_read bwr;
4811 
4812 	if (size != sizeof(struct binder_write_read)) {
4813 		ret = -EINVAL;
4814 		goto out;
4815 	}
4816 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4817 		ret = -EFAULT;
4818 		goto out;
4819 	}
4820 	binder_debug(BINDER_DEBUG_READ_WRITE,
4821 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4822 		     proc->pid, thread->pid,
4823 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4824 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4825 
4826 	if (bwr.write_size > 0) {
4827 		ret = binder_thread_write(proc, thread,
4828 					  bwr.write_buffer,
4829 					  bwr.write_size,
4830 					  &bwr.write_consumed);
4831 		trace_binder_write_done(ret);
4832 		if (ret < 0) {
4833 			bwr.read_consumed = 0;
4834 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4835 				ret = -EFAULT;
4836 			goto out;
4837 		}
4838 	}
4839 	if (bwr.read_size > 0) {
4840 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4841 					 bwr.read_size,
4842 					 &bwr.read_consumed,
4843 					 filp->f_flags & O_NONBLOCK);
4844 		trace_binder_read_done(ret);
4845 		binder_inner_proc_lock(proc);
4846 		if (!binder_worklist_empty_ilocked(&proc->todo))
4847 			binder_wakeup_proc_ilocked(proc);
4848 		binder_inner_proc_unlock(proc);
4849 		if (ret < 0) {
4850 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4851 				ret = -EFAULT;
4852 			goto out;
4853 		}
4854 	}
4855 	binder_debug(BINDER_DEBUG_READ_WRITE,
4856 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4857 		     proc->pid, thread->pid,
4858 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4859 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4860 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4861 		ret = -EFAULT;
4862 		goto out;
4863 	}
4864 out:
4865 	return ret;
4866 }
4867 
4868 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4869 				    struct flat_binder_object *fbo)
4870 {
4871 	int ret = 0;
4872 	struct binder_proc *proc = filp->private_data;
4873 	struct binder_context *context = proc->context;
4874 	struct binder_node *new_node;
4875 	kuid_t curr_euid = current_euid();
4876 
4877 	mutex_lock(&context->context_mgr_node_lock);
4878 	if (context->binder_context_mgr_node) {
4879 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4880 		ret = -EBUSY;
4881 		goto out;
4882 	}
4883 	ret = security_binder_set_context_mgr(proc->tsk);
4884 	if (ret < 0)
4885 		goto out;
4886 	if (uid_valid(context->binder_context_mgr_uid)) {
4887 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4888 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4889 			       from_kuid(&init_user_ns, curr_euid),
4890 			       from_kuid(&init_user_ns,
4891 					 context->binder_context_mgr_uid));
4892 			ret = -EPERM;
4893 			goto out;
4894 		}
4895 	} else {
4896 		context->binder_context_mgr_uid = curr_euid;
4897 	}
4898 	new_node = binder_new_node(proc, fbo);
4899 	if (!new_node) {
4900 		ret = -ENOMEM;
4901 		goto out;
4902 	}
4903 	binder_node_lock(new_node);
4904 	new_node->local_weak_refs++;
4905 	new_node->local_strong_refs++;
4906 	new_node->has_strong_ref = 1;
4907 	new_node->has_weak_ref = 1;
4908 	context->binder_context_mgr_node = new_node;
4909 	binder_node_unlock(new_node);
4910 	binder_put_node(new_node);
4911 out:
4912 	mutex_unlock(&context->context_mgr_node_lock);
4913 	return ret;
4914 }
4915 
4916 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4917 		struct binder_node_info_for_ref *info)
4918 {
4919 	struct binder_node *node;
4920 	struct binder_context *context = proc->context;
4921 	__u32 handle = info->handle;
4922 
4923 	if (info->strong_count || info->weak_count || info->reserved1 ||
4924 	    info->reserved2 || info->reserved3) {
4925 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4926 				  proc->pid);
4927 		return -EINVAL;
4928 	}
4929 
4930 	/* This ioctl may only be used by the context manager */
4931 	mutex_lock(&context->context_mgr_node_lock);
4932 	if (!context->binder_context_mgr_node ||
4933 		context->binder_context_mgr_node->proc != proc) {
4934 		mutex_unlock(&context->context_mgr_node_lock);
4935 		return -EPERM;
4936 	}
4937 	mutex_unlock(&context->context_mgr_node_lock);
4938 
4939 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4940 	if (!node)
4941 		return -EINVAL;
4942 
4943 	info->strong_count = node->local_strong_refs +
4944 		node->internal_strong_refs;
4945 	info->weak_count = node->local_weak_refs;
4946 
4947 	binder_put_node(node);
4948 
4949 	return 0;
4950 }
4951 
4952 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4953 				struct binder_node_debug_info *info)
4954 {
4955 	struct rb_node *n;
4956 	binder_uintptr_t ptr = info->ptr;
4957 
4958 	memset(info, 0, sizeof(*info));
4959 
4960 	binder_inner_proc_lock(proc);
4961 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4962 		struct binder_node *node = rb_entry(n, struct binder_node,
4963 						    rb_node);
4964 		if (node->ptr > ptr) {
4965 			info->ptr = node->ptr;
4966 			info->cookie = node->cookie;
4967 			info->has_strong_ref = node->has_strong_ref;
4968 			info->has_weak_ref = node->has_weak_ref;
4969 			break;
4970 		}
4971 	}
4972 	binder_inner_proc_unlock(proc);
4973 
4974 	return 0;
4975 }
4976 
4977 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4978 {
4979 	int ret;
4980 	struct binder_proc *proc = filp->private_data;
4981 	struct binder_thread *thread;
4982 	unsigned int size = _IOC_SIZE(cmd);
4983 	void __user *ubuf = (void __user *)arg;
4984 
4985 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4986 			proc->pid, current->pid, cmd, arg);*/
4987 
4988 	binder_selftest_alloc(&proc->alloc);
4989 
4990 	trace_binder_ioctl(cmd, arg);
4991 
4992 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4993 	if (ret)
4994 		goto err_unlocked;
4995 
4996 	thread = binder_get_thread(proc);
4997 	if (thread == NULL) {
4998 		ret = -ENOMEM;
4999 		goto err;
5000 	}
5001 
5002 	switch (cmd) {
5003 	case BINDER_WRITE_READ:
5004 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5005 		if (ret)
5006 			goto err;
5007 		break;
5008 	case BINDER_SET_MAX_THREADS: {
5009 		int max_threads;
5010 
5011 		if (copy_from_user(&max_threads, ubuf,
5012 				   sizeof(max_threads))) {
5013 			ret = -EINVAL;
5014 			goto err;
5015 		}
5016 		binder_inner_proc_lock(proc);
5017 		proc->max_threads = max_threads;
5018 		binder_inner_proc_unlock(proc);
5019 		break;
5020 	}
5021 	case BINDER_SET_CONTEXT_MGR_EXT: {
5022 		struct flat_binder_object fbo;
5023 
5024 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5025 			ret = -EINVAL;
5026 			goto err;
5027 		}
5028 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5029 		if (ret)
5030 			goto err;
5031 		break;
5032 	}
5033 	case BINDER_SET_CONTEXT_MGR:
5034 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5035 		if (ret)
5036 			goto err;
5037 		break;
5038 	case BINDER_THREAD_EXIT:
5039 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5040 			     proc->pid, thread->pid);
5041 		binder_thread_release(proc, thread);
5042 		thread = NULL;
5043 		break;
5044 	case BINDER_VERSION: {
5045 		struct binder_version __user *ver = ubuf;
5046 
5047 		if (size != sizeof(struct binder_version)) {
5048 			ret = -EINVAL;
5049 			goto err;
5050 		}
5051 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5052 			     &ver->protocol_version)) {
5053 			ret = -EINVAL;
5054 			goto err;
5055 		}
5056 		break;
5057 	}
5058 	case BINDER_GET_NODE_INFO_FOR_REF: {
5059 		struct binder_node_info_for_ref info;
5060 
5061 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5062 			ret = -EFAULT;
5063 			goto err;
5064 		}
5065 
5066 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5067 		if (ret < 0)
5068 			goto err;
5069 
5070 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5071 			ret = -EFAULT;
5072 			goto err;
5073 		}
5074 
5075 		break;
5076 	}
5077 	case BINDER_GET_NODE_DEBUG_INFO: {
5078 		struct binder_node_debug_info info;
5079 
5080 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5081 			ret = -EFAULT;
5082 			goto err;
5083 		}
5084 
5085 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5086 		if (ret < 0)
5087 			goto err;
5088 
5089 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5090 			ret = -EFAULT;
5091 			goto err;
5092 		}
5093 		break;
5094 	}
5095 	default:
5096 		ret = -EINVAL;
5097 		goto err;
5098 	}
5099 	ret = 0;
5100 err:
5101 	if (thread)
5102 		thread->looper_need_return = false;
5103 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5104 	if (ret && ret != -ERESTARTSYS)
5105 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5106 err_unlocked:
5107 	trace_binder_ioctl_done(ret);
5108 	return ret;
5109 }
5110 
5111 static void binder_vma_open(struct vm_area_struct *vma)
5112 {
5113 	struct binder_proc *proc = vma->vm_private_data;
5114 
5115 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5116 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5117 		     proc->pid, vma->vm_start, vma->vm_end,
5118 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5119 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5120 }
5121 
5122 static void binder_vma_close(struct vm_area_struct *vma)
5123 {
5124 	struct binder_proc *proc = vma->vm_private_data;
5125 
5126 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5127 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5128 		     proc->pid, vma->vm_start, vma->vm_end,
5129 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5130 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5131 	binder_alloc_vma_close(&proc->alloc);
5132 }
5133 
5134 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5135 {
5136 	return VM_FAULT_SIGBUS;
5137 }
5138 
5139 static const struct vm_operations_struct binder_vm_ops = {
5140 	.open = binder_vma_open,
5141 	.close = binder_vma_close,
5142 	.fault = binder_vm_fault,
5143 };
5144 
5145 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5146 {
5147 	int ret;
5148 	struct binder_proc *proc = filp->private_data;
5149 	const char *failure_string;
5150 
5151 	if (proc->tsk != current->group_leader)
5152 		return -EINVAL;
5153 
5154 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
5155 		vma->vm_end = vma->vm_start + SZ_4M;
5156 
5157 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5158 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5159 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5160 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5161 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5162 
5163 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5164 		ret = -EPERM;
5165 		failure_string = "bad vm_flags";
5166 		goto err_bad_arg;
5167 	}
5168 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5169 	vma->vm_flags &= ~VM_MAYWRITE;
5170 
5171 	vma->vm_ops = &binder_vm_ops;
5172 	vma->vm_private_data = proc;
5173 
5174 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5175 	if (ret)
5176 		return ret;
5177 	return 0;
5178 
5179 err_bad_arg:
5180 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5181 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5182 	return ret;
5183 }
5184 
5185 static int binder_open(struct inode *nodp, struct file *filp)
5186 {
5187 	struct binder_proc *proc;
5188 	struct binder_device *binder_dev;
5189 
5190 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5191 		     current->group_leader->pid, current->pid);
5192 
5193 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5194 	if (proc == NULL)
5195 		return -ENOMEM;
5196 	spin_lock_init(&proc->inner_lock);
5197 	spin_lock_init(&proc->outer_lock);
5198 	get_task_struct(current->group_leader);
5199 	proc->tsk = current->group_leader;
5200 	INIT_LIST_HEAD(&proc->todo);
5201 	proc->default_priority = task_nice(current);
5202 	/* binderfs stashes devices in i_private */
5203 	if (is_binderfs_device(nodp))
5204 		binder_dev = nodp->i_private;
5205 	else
5206 		binder_dev = container_of(filp->private_data,
5207 					  struct binder_device, miscdev);
5208 	proc->context = &binder_dev->context;
5209 	binder_alloc_init(&proc->alloc);
5210 
5211 	binder_stats_created(BINDER_STAT_PROC);
5212 	proc->pid = current->group_leader->pid;
5213 	INIT_LIST_HEAD(&proc->delivered_death);
5214 	INIT_LIST_HEAD(&proc->waiting_threads);
5215 	filp->private_data = proc;
5216 
5217 	mutex_lock(&binder_procs_lock);
5218 	hlist_add_head(&proc->proc_node, &binder_procs);
5219 	mutex_unlock(&binder_procs_lock);
5220 
5221 	if (binder_debugfs_dir_entry_proc) {
5222 		char strbuf[11];
5223 
5224 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5225 		/*
5226 		 * proc debug entries are shared between contexts, so
5227 		 * this will fail if the process tries to open the driver
5228 		 * again with a different context. The priting code will
5229 		 * anyway print all contexts that a given PID has, so this
5230 		 * is not a problem.
5231 		 */
5232 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5233 			binder_debugfs_dir_entry_proc,
5234 			(void *)(unsigned long)proc->pid,
5235 			&proc_fops);
5236 	}
5237 
5238 	return 0;
5239 }
5240 
5241 static int binder_flush(struct file *filp, fl_owner_t id)
5242 {
5243 	struct binder_proc *proc = filp->private_data;
5244 
5245 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5246 
5247 	return 0;
5248 }
5249 
5250 static void binder_deferred_flush(struct binder_proc *proc)
5251 {
5252 	struct rb_node *n;
5253 	int wake_count = 0;
5254 
5255 	binder_inner_proc_lock(proc);
5256 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5257 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5258 
5259 		thread->looper_need_return = true;
5260 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5261 			wake_up_interruptible(&thread->wait);
5262 			wake_count++;
5263 		}
5264 	}
5265 	binder_inner_proc_unlock(proc);
5266 
5267 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5268 		     "binder_flush: %d woke %d threads\n", proc->pid,
5269 		     wake_count);
5270 }
5271 
5272 static int binder_release(struct inode *nodp, struct file *filp)
5273 {
5274 	struct binder_proc *proc = filp->private_data;
5275 
5276 	debugfs_remove(proc->debugfs_entry);
5277 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5278 
5279 	return 0;
5280 }
5281 
5282 static int binder_node_release(struct binder_node *node, int refs)
5283 {
5284 	struct binder_ref *ref;
5285 	int death = 0;
5286 	struct binder_proc *proc = node->proc;
5287 
5288 	binder_release_work(proc, &node->async_todo);
5289 
5290 	binder_node_lock(node);
5291 	binder_inner_proc_lock(proc);
5292 	binder_dequeue_work_ilocked(&node->work);
5293 	/*
5294 	 * The caller must have taken a temporary ref on the node,
5295 	 */
5296 	BUG_ON(!node->tmp_refs);
5297 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5298 		binder_inner_proc_unlock(proc);
5299 		binder_node_unlock(node);
5300 		binder_free_node(node);
5301 
5302 		return refs;
5303 	}
5304 
5305 	node->proc = NULL;
5306 	node->local_strong_refs = 0;
5307 	node->local_weak_refs = 0;
5308 	binder_inner_proc_unlock(proc);
5309 
5310 	spin_lock(&binder_dead_nodes_lock);
5311 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5312 	spin_unlock(&binder_dead_nodes_lock);
5313 
5314 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5315 		refs++;
5316 		/*
5317 		 * Need the node lock to synchronize
5318 		 * with new notification requests and the
5319 		 * inner lock to synchronize with queued
5320 		 * death notifications.
5321 		 */
5322 		binder_inner_proc_lock(ref->proc);
5323 		if (!ref->death) {
5324 			binder_inner_proc_unlock(ref->proc);
5325 			continue;
5326 		}
5327 
5328 		death++;
5329 
5330 		BUG_ON(!list_empty(&ref->death->work.entry));
5331 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5332 		binder_enqueue_work_ilocked(&ref->death->work,
5333 					    &ref->proc->todo);
5334 		binder_wakeup_proc_ilocked(ref->proc);
5335 		binder_inner_proc_unlock(ref->proc);
5336 	}
5337 
5338 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5339 		     "node %d now dead, refs %d, death %d\n",
5340 		     node->debug_id, refs, death);
5341 	binder_node_unlock(node);
5342 	binder_put_node(node);
5343 
5344 	return refs;
5345 }
5346 
5347 static void binder_deferred_release(struct binder_proc *proc)
5348 {
5349 	struct binder_context *context = proc->context;
5350 	struct rb_node *n;
5351 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5352 
5353 	mutex_lock(&binder_procs_lock);
5354 	hlist_del(&proc->proc_node);
5355 	mutex_unlock(&binder_procs_lock);
5356 
5357 	mutex_lock(&context->context_mgr_node_lock);
5358 	if (context->binder_context_mgr_node &&
5359 	    context->binder_context_mgr_node->proc == proc) {
5360 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5361 			     "%s: %d context_mgr_node gone\n",
5362 			     __func__, proc->pid);
5363 		context->binder_context_mgr_node = NULL;
5364 	}
5365 	mutex_unlock(&context->context_mgr_node_lock);
5366 	binder_inner_proc_lock(proc);
5367 	/*
5368 	 * Make sure proc stays alive after we
5369 	 * remove all the threads
5370 	 */
5371 	proc->tmp_ref++;
5372 
5373 	proc->is_dead = true;
5374 	threads = 0;
5375 	active_transactions = 0;
5376 	while ((n = rb_first(&proc->threads))) {
5377 		struct binder_thread *thread;
5378 
5379 		thread = rb_entry(n, struct binder_thread, rb_node);
5380 		binder_inner_proc_unlock(proc);
5381 		threads++;
5382 		active_transactions += binder_thread_release(proc, thread);
5383 		binder_inner_proc_lock(proc);
5384 	}
5385 
5386 	nodes = 0;
5387 	incoming_refs = 0;
5388 	while ((n = rb_first(&proc->nodes))) {
5389 		struct binder_node *node;
5390 
5391 		node = rb_entry(n, struct binder_node, rb_node);
5392 		nodes++;
5393 		/*
5394 		 * take a temporary ref on the node before
5395 		 * calling binder_node_release() which will either
5396 		 * kfree() the node or call binder_put_node()
5397 		 */
5398 		binder_inc_node_tmpref_ilocked(node);
5399 		rb_erase(&node->rb_node, &proc->nodes);
5400 		binder_inner_proc_unlock(proc);
5401 		incoming_refs = binder_node_release(node, incoming_refs);
5402 		binder_inner_proc_lock(proc);
5403 	}
5404 	binder_inner_proc_unlock(proc);
5405 
5406 	outgoing_refs = 0;
5407 	binder_proc_lock(proc);
5408 	while ((n = rb_first(&proc->refs_by_desc))) {
5409 		struct binder_ref *ref;
5410 
5411 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5412 		outgoing_refs++;
5413 		binder_cleanup_ref_olocked(ref);
5414 		binder_proc_unlock(proc);
5415 		binder_free_ref(ref);
5416 		binder_proc_lock(proc);
5417 	}
5418 	binder_proc_unlock(proc);
5419 
5420 	binder_release_work(proc, &proc->todo);
5421 	binder_release_work(proc, &proc->delivered_death);
5422 
5423 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5424 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5425 		     __func__, proc->pid, threads, nodes, incoming_refs,
5426 		     outgoing_refs, active_transactions);
5427 
5428 	binder_proc_dec_tmpref(proc);
5429 }
5430 
5431 static void binder_deferred_func(struct work_struct *work)
5432 {
5433 	struct binder_proc *proc;
5434 
5435 	int defer;
5436 
5437 	do {
5438 		mutex_lock(&binder_deferred_lock);
5439 		if (!hlist_empty(&binder_deferred_list)) {
5440 			proc = hlist_entry(binder_deferred_list.first,
5441 					struct binder_proc, deferred_work_node);
5442 			hlist_del_init(&proc->deferred_work_node);
5443 			defer = proc->deferred_work;
5444 			proc->deferred_work = 0;
5445 		} else {
5446 			proc = NULL;
5447 			defer = 0;
5448 		}
5449 		mutex_unlock(&binder_deferred_lock);
5450 
5451 		if (defer & BINDER_DEFERRED_FLUSH)
5452 			binder_deferred_flush(proc);
5453 
5454 		if (defer & BINDER_DEFERRED_RELEASE)
5455 			binder_deferred_release(proc); /* frees proc */
5456 	} while (proc);
5457 }
5458 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5459 
5460 static void
5461 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5462 {
5463 	mutex_lock(&binder_deferred_lock);
5464 	proc->deferred_work |= defer;
5465 	if (hlist_unhashed(&proc->deferred_work_node)) {
5466 		hlist_add_head(&proc->deferred_work_node,
5467 				&binder_deferred_list);
5468 		schedule_work(&binder_deferred_work);
5469 	}
5470 	mutex_unlock(&binder_deferred_lock);
5471 }
5472 
5473 static void print_binder_transaction_ilocked(struct seq_file *m,
5474 					     struct binder_proc *proc,
5475 					     const char *prefix,
5476 					     struct binder_transaction *t)
5477 {
5478 	struct binder_proc *to_proc;
5479 	struct binder_buffer *buffer = t->buffer;
5480 
5481 	spin_lock(&t->lock);
5482 	to_proc = t->to_proc;
5483 	seq_printf(m,
5484 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5485 		   prefix, t->debug_id, t,
5486 		   t->from ? t->from->proc->pid : 0,
5487 		   t->from ? t->from->pid : 0,
5488 		   to_proc ? to_proc->pid : 0,
5489 		   t->to_thread ? t->to_thread->pid : 0,
5490 		   t->code, t->flags, t->priority, t->need_reply);
5491 	spin_unlock(&t->lock);
5492 
5493 	if (proc != to_proc) {
5494 		/*
5495 		 * Can only safely deref buffer if we are holding the
5496 		 * correct proc inner lock for this node
5497 		 */
5498 		seq_puts(m, "\n");
5499 		return;
5500 	}
5501 
5502 	if (buffer == NULL) {
5503 		seq_puts(m, " buffer free\n");
5504 		return;
5505 	}
5506 	if (buffer->target_node)
5507 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5508 	seq_printf(m, " size %zd:%zd data %pK\n",
5509 		   buffer->data_size, buffer->offsets_size,
5510 		   buffer->user_data);
5511 }
5512 
5513 static void print_binder_work_ilocked(struct seq_file *m,
5514 				     struct binder_proc *proc,
5515 				     const char *prefix,
5516 				     const char *transaction_prefix,
5517 				     struct binder_work *w)
5518 {
5519 	struct binder_node *node;
5520 	struct binder_transaction *t;
5521 
5522 	switch (w->type) {
5523 	case BINDER_WORK_TRANSACTION:
5524 		t = container_of(w, struct binder_transaction, work);
5525 		print_binder_transaction_ilocked(
5526 				m, proc, transaction_prefix, t);
5527 		break;
5528 	case BINDER_WORK_RETURN_ERROR: {
5529 		struct binder_error *e = container_of(
5530 				w, struct binder_error, work);
5531 
5532 		seq_printf(m, "%stransaction error: %u\n",
5533 			   prefix, e->cmd);
5534 	} break;
5535 	case BINDER_WORK_TRANSACTION_COMPLETE:
5536 		seq_printf(m, "%stransaction complete\n", prefix);
5537 		break;
5538 	case BINDER_WORK_NODE:
5539 		node = container_of(w, struct binder_node, work);
5540 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5541 			   prefix, node->debug_id,
5542 			   (u64)node->ptr, (u64)node->cookie);
5543 		break;
5544 	case BINDER_WORK_DEAD_BINDER:
5545 		seq_printf(m, "%shas dead binder\n", prefix);
5546 		break;
5547 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5548 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5549 		break;
5550 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5551 		seq_printf(m, "%shas cleared death notification\n", prefix);
5552 		break;
5553 	default:
5554 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5555 		break;
5556 	}
5557 }
5558 
5559 static void print_binder_thread_ilocked(struct seq_file *m,
5560 					struct binder_thread *thread,
5561 					int print_always)
5562 {
5563 	struct binder_transaction *t;
5564 	struct binder_work *w;
5565 	size_t start_pos = m->count;
5566 	size_t header_pos;
5567 
5568 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5569 			thread->pid, thread->looper,
5570 			thread->looper_need_return,
5571 			atomic_read(&thread->tmp_ref));
5572 	header_pos = m->count;
5573 	t = thread->transaction_stack;
5574 	while (t) {
5575 		if (t->from == thread) {
5576 			print_binder_transaction_ilocked(m, thread->proc,
5577 					"    outgoing transaction", t);
5578 			t = t->from_parent;
5579 		} else if (t->to_thread == thread) {
5580 			print_binder_transaction_ilocked(m, thread->proc,
5581 						 "    incoming transaction", t);
5582 			t = t->to_parent;
5583 		} else {
5584 			print_binder_transaction_ilocked(m, thread->proc,
5585 					"    bad transaction", t);
5586 			t = NULL;
5587 		}
5588 	}
5589 	list_for_each_entry(w, &thread->todo, entry) {
5590 		print_binder_work_ilocked(m, thread->proc, "    ",
5591 					  "    pending transaction", w);
5592 	}
5593 	if (!print_always && m->count == header_pos)
5594 		m->count = start_pos;
5595 }
5596 
5597 static void print_binder_node_nilocked(struct seq_file *m,
5598 				       struct binder_node *node)
5599 {
5600 	struct binder_ref *ref;
5601 	struct binder_work *w;
5602 	int count;
5603 
5604 	count = 0;
5605 	hlist_for_each_entry(ref, &node->refs, node_entry)
5606 		count++;
5607 
5608 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5609 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5610 		   node->has_strong_ref, node->has_weak_ref,
5611 		   node->local_strong_refs, node->local_weak_refs,
5612 		   node->internal_strong_refs, count, node->tmp_refs);
5613 	if (count) {
5614 		seq_puts(m, " proc");
5615 		hlist_for_each_entry(ref, &node->refs, node_entry)
5616 			seq_printf(m, " %d", ref->proc->pid);
5617 	}
5618 	seq_puts(m, "\n");
5619 	if (node->proc) {
5620 		list_for_each_entry(w, &node->async_todo, entry)
5621 			print_binder_work_ilocked(m, node->proc, "    ",
5622 					  "    pending async transaction", w);
5623 	}
5624 }
5625 
5626 static void print_binder_ref_olocked(struct seq_file *m,
5627 				     struct binder_ref *ref)
5628 {
5629 	binder_node_lock(ref->node);
5630 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5631 		   ref->data.debug_id, ref->data.desc,
5632 		   ref->node->proc ? "" : "dead ",
5633 		   ref->node->debug_id, ref->data.strong,
5634 		   ref->data.weak, ref->death);
5635 	binder_node_unlock(ref->node);
5636 }
5637 
5638 static void print_binder_proc(struct seq_file *m,
5639 			      struct binder_proc *proc, int print_all)
5640 {
5641 	struct binder_work *w;
5642 	struct rb_node *n;
5643 	size_t start_pos = m->count;
5644 	size_t header_pos;
5645 	struct binder_node *last_node = NULL;
5646 
5647 	seq_printf(m, "proc %d\n", proc->pid);
5648 	seq_printf(m, "context %s\n", proc->context->name);
5649 	header_pos = m->count;
5650 
5651 	binder_inner_proc_lock(proc);
5652 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5653 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5654 						rb_node), print_all);
5655 
5656 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5657 		struct binder_node *node = rb_entry(n, struct binder_node,
5658 						    rb_node);
5659 		if (!print_all && !node->has_async_transaction)
5660 			continue;
5661 
5662 		/*
5663 		 * take a temporary reference on the node so it
5664 		 * survives and isn't removed from the tree
5665 		 * while we print it.
5666 		 */
5667 		binder_inc_node_tmpref_ilocked(node);
5668 		/* Need to drop inner lock to take node lock */
5669 		binder_inner_proc_unlock(proc);
5670 		if (last_node)
5671 			binder_put_node(last_node);
5672 		binder_node_inner_lock(node);
5673 		print_binder_node_nilocked(m, node);
5674 		binder_node_inner_unlock(node);
5675 		last_node = node;
5676 		binder_inner_proc_lock(proc);
5677 	}
5678 	binder_inner_proc_unlock(proc);
5679 	if (last_node)
5680 		binder_put_node(last_node);
5681 
5682 	if (print_all) {
5683 		binder_proc_lock(proc);
5684 		for (n = rb_first(&proc->refs_by_desc);
5685 		     n != NULL;
5686 		     n = rb_next(n))
5687 			print_binder_ref_olocked(m, rb_entry(n,
5688 							    struct binder_ref,
5689 							    rb_node_desc));
5690 		binder_proc_unlock(proc);
5691 	}
5692 	binder_alloc_print_allocated(m, &proc->alloc);
5693 	binder_inner_proc_lock(proc);
5694 	list_for_each_entry(w, &proc->todo, entry)
5695 		print_binder_work_ilocked(m, proc, "  ",
5696 					  "  pending transaction", w);
5697 	list_for_each_entry(w, &proc->delivered_death, entry) {
5698 		seq_puts(m, "  has delivered dead binder\n");
5699 		break;
5700 	}
5701 	binder_inner_proc_unlock(proc);
5702 	if (!print_all && m->count == header_pos)
5703 		m->count = start_pos;
5704 }
5705 
5706 static const char * const binder_return_strings[] = {
5707 	"BR_ERROR",
5708 	"BR_OK",
5709 	"BR_TRANSACTION",
5710 	"BR_REPLY",
5711 	"BR_ACQUIRE_RESULT",
5712 	"BR_DEAD_REPLY",
5713 	"BR_TRANSACTION_COMPLETE",
5714 	"BR_INCREFS",
5715 	"BR_ACQUIRE",
5716 	"BR_RELEASE",
5717 	"BR_DECREFS",
5718 	"BR_ATTEMPT_ACQUIRE",
5719 	"BR_NOOP",
5720 	"BR_SPAWN_LOOPER",
5721 	"BR_FINISHED",
5722 	"BR_DEAD_BINDER",
5723 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5724 	"BR_FAILED_REPLY"
5725 };
5726 
5727 static const char * const binder_command_strings[] = {
5728 	"BC_TRANSACTION",
5729 	"BC_REPLY",
5730 	"BC_ACQUIRE_RESULT",
5731 	"BC_FREE_BUFFER",
5732 	"BC_INCREFS",
5733 	"BC_ACQUIRE",
5734 	"BC_RELEASE",
5735 	"BC_DECREFS",
5736 	"BC_INCREFS_DONE",
5737 	"BC_ACQUIRE_DONE",
5738 	"BC_ATTEMPT_ACQUIRE",
5739 	"BC_REGISTER_LOOPER",
5740 	"BC_ENTER_LOOPER",
5741 	"BC_EXIT_LOOPER",
5742 	"BC_REQUEST_DEATH_NOTIFICATION",
5743 	"BC_CLEAR_DEATH_NOTIFICATION",
5744 	"BC_DEAD_BINDER_DONE",
5745 	"BC_TRANSACTION_SG",
5746 	"BC_REPLY_SG",
5747 };
5748 
5749 static const char * const binder_objstat_strings[] = {
5750 	"proc",
5751 	"thread",
5752 	"node",
5753 	"ref",
5754 	"death",
5755 	"transaction",
5756 	"transaction_complete"
5757 };
5758 
5759 static void print_binder_stats(struct seq_file *m, const char *prefix,
5760 			       struct binder_stats *stats)
5761 {
5762 	int i;
5763 
5764 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5765 		     ARRAY_SIZE(binder_command_strings));
5766 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5767 		int temp = atomic_read(&stats->bc[i]);
5768 
5769 		if (temp)
5770 			seq_printf(m, "%s%s: %d\n", prefix,
5771 				   binder_command_strings[i], temp);
5772 	}
5773 
5774 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5775 		     ARRAY_SIZE(binder_return_strings));
5776 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5777 		int temp = atomic_read(&stats->br[i]);
5778 
5779 		if (temp)
5780 			seq_printf(m, "%s%s: %d\n", prefix,
5781 				   binder_return_strings[i], temp);
5782 	}
5783 
5784 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5785 		     ARRAY_SIZE(binder_objstat_strings));
5786 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5787 		     ARRAY_SIZE(stats->obj_deleted));
5788 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5789 		int created = atomic_read(&stats->obj_created[i]);
5790 		int deleted = atomic_read(&stats->obj_deleted[i]);
5791 
5792 		if (created || deleted)
5793 			seq_printf(m, "%s%s: active %d total %d\n",
5794 				prefix,
5795 				binder_objstat_strings[i],
5796 				created - deleted,
5797 				created);
5798 	}
5799 }
5800 
5801 static void print_binder_proc_stats(struct seq_file *m,
5802 				    struct binder_proc *proc)
5803 {
5804 	struct binder_work *w;
5805 	struct binder_thread *thread;
5806 	struct rb_node *n;
5807 	int count, strong, weak, ready_threads;
5808 	size_t free_async_space =
5809 		binder_alloc_get_free_async_space(&proc->alloc);
5810 
5811 	seq_printf(m, "proc %d\n", proc->pid);
5812 	seq_printf(m, "context %s\n", proc->context->name);
5813 	count = 0;
5814 	ready_threads = 0;
5815 	binder_inner_proc_lock(proc);
5816 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5817 		count++;
5818 
5819 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5820 		ready_threads++;
5821 
5822 	seq_printf(m, "  threads: %d\n", count);
5823 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5824 			"  ready threads %d\n"
5825 			"  free async space %zd\n", proc->requested_threads,
5826 			proc->requested_threads_started, proc->max_threads,
5827 			ready_threads,
5828 			free_async_space);
5829 	count = 0;
5830 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5831 		count++;
5832 	binder_inner_proc_unlock(proc);
5833 	seq_printf(m, "  nodes: %d\n", count);
5834 	count = 0;
5835 	strong = 0;
5836 	weak = 0;
5837 	binder_proc_lock(proc);
5838 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5839 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5840 						  rb_node_desc);
5841 		count++;
5842 		strong += ref->data.strong;
5843 		weak += ref->data.weak;
5844 	}
5845 	binder_proc_unlock(proc);
5846 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5847 
5848 	count = binder_alloc_get_allocated_count(&proc->alloc);
5849 	seq_printf(m, "  buffers: %d\n", count);
5850 
5851 	binder_alloc_print_pages(m, &proc->alloc);
5852 
5853 	count = 0;
5854 	binder_inner_proc_lock(proc);
5855 	list_for_each_entry(w, &proc->todo, entry) {
5856 		if (w->type == BINDER_WORK_TRANSACTION)
5857 			count++;
5858 	}
5859 	binder_inner_proc_unlock(proc);
5860 	seq_printf(m, "  pending transactions: %d\n", count);
5861 
5862 	print_binder_stats(m, "  ", &proc->stats);
5863 }
5864 
5865 
5866 static int state_show(struct seq_file *m, void *unused)
5867 {
5868 	struct binder_proc *proc;
5869 	struct binder_node *node;
5870 	struct binder_node *last_node = NULL;
5871 
5872 	seq_puts(m, "binder state:\n");
5873 
5874 	spin_lock(&binder_dead_nodes_lock);
5875 	if (!hlist_empty(&binder_dead_nodes))
5876 		seq_puts(m, "dead nodes:\n");
5877 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5878 		/*
5879 		 * take a temporary reference on the node so it
5880 		 * survives and isn't removed from the list
5881 		 * while we print it.
5882 		 */
5883 		node->tmp_refs++;
5884 		spin_unlock(&binder_dead_nodes_lock);
5885 		if (last_node)
5886 			binder_put_node(last_node);
5887 		binder_node_lock(node);
5888 		print_binder_node_nilocked(m, node);
5889 		binder_node_unlock(node);
5890 		last_node = node;
5891 		spin_lock(&binder_dead_nodes_lock);
5892 	}
5893 	spin_unlock(&binder_dead_nodes_lock);
5894 	if (last_node)
5895 		binder_put_node(last_node);
5896 
5897 	mutex_lock(&binder_procs_lock);
5898 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5899 		print_binder_proc(m, proc, 1);
5900 	mutex_unlock(&binder_procs_lock);
5901 
5902 	return 0;
5903 }
5904 
5905 static int stats_show(struct seq_file *m, void *unused)
5906 {
5907 	struct binder_proc *proc;
5908 
5909 	seq_puts(m, "binder stats:\n");
5910 
5911 	print_binder_stats(m, "", &binder_stats);
5912 
5913 	mutex_lock(&binder_procs_lock);
5914 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5915 		print_binder_proc_stats(m, proc);
5916 	mutex_unlock(&binder_procs_lock);
5917 
5918 	return 0;
5919 }
5920 
5921 static int transactions_show(struct seq_file *m, void *unused)
5922 {
5923 	struct binder_proc *proc;
5924 
5925 	seq_puts(m, "binder transactions:\n");
5926 	mutex_lock(&binder_procs_lock);
5927 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5928 		print_binder_proc(m, proc, 0);
5929 	mutex_unlock(&binder_procs_lock);
5930 
5931 	return 0;
5932 }
5933 
5934 static int proc_show(struct seq_file *m, void *unused)
5935 {
5936 	struct binder_proc *itr;
5937 	int pid = (unsigned long)m->private;
5938 
5939 	mutex_lock(&binder_procs_lock);
5940 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5941 		if (itr->pid == pid) {
5942 			seq_puts(m, "binder proc state:\n");
5943 			print_binder_proc(m, itr, 1);
5944 		}
5945 	}
5946 	mutex_unlock(&binder_procs_lock);
5947 
5948 	return 0;
5949 }
5950 
5951 static void print_binder_transaction_log_entry(struct seq_file *m,
5952 					struct binder_transaction_log_entry *e)
5953 {
5954 	int debug_id = READ_ONCE(e->debug_id_done);
5955 	/*
5956 	 * read barrier to guarantee debug_id_done read before
5957 	 * we print the log values
5958 	 */
5959 	smp_rmb();
5960 	seq_printf(m,
5961 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5962 		   e->debug_id, (e->call_type == 2) ? "reply" :
5963 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5964 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5965 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5966 		   e->return_error, e->return_error_param,
5967 		   e->return_error_line);
5968 	/*
5969 	 * read-barrier to guarantee read of debug_id_done after
5970 	 * done printing the fields of the entry
5971 	 */
5972 	smp_rmb();
5973 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5974 			"\n" : " (incomplete)\n");
5975 }
5976 
5977 static int transaction_log_show(struct seq_file *m, void *unused)
5978 {
5979 	struct binder_transaction_log *log = m->private;
5980 	unsigned int log_cur = atomic_read(&log->cur);
5981 	unsigned int count;
5982 	unsigned int cur;
5983 	int i;
5984 
5985 	count = log_cur + 1;
5986 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5987 		0 : count % ARRAY_SIZE(log->entry);
5988 	if (count > ARRAY_SIZE(log->entry) || log->full)
5989 		count = ARRAY_SIZE(log->entry);
5990 	for (i = 0; i < count; i++) {
5991 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5992 
5993 		print_binder_transaction_log_entry(m, &log->entry[index]);
5994 	}
5995 	return 0;
5996 }
5997 
5998 const struct file_operations binder_fops = {
5999 	.owner = THIS_MODULE,
6000 	.poll = binder_poll,
6001 	.unlocked_ioctl = binder_ioctl,
6002 	.compat_ioctl = binder_ioctl,
6003 	.mmap = binder_mmap,
6004 	.open = binder_open,
6005 	.flush = binder_flush,
6006 	.release = binder_release,
6007 };
6008 
6009 DEFINE_SHOW_ATTRIBUTE(state);
6010 DEFINE_SHOW_ATTRIBUTE(stats);
6011 DEFINE_SHOW_ATTRIBUTE(transactions);
6012 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6013 
6014 static int __init init_binder_device(const char *name)
6015 {
6016 	int ret;
6017 	struct binder_device *binder_device;
6018 
6019 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6020 	if (!binder_device)
6021 		return -ENOMEM;
6022 
6023 	binder_device->miscdev.fops = &binder_fops;
6024 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6025 	binder_device->miscdev.name = name;
6026 
6027 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6028 	binder_device->context.name = name;
6029 	mutex_init(&binder_device->context.context_mgr_node_lock);
6030 
6031 	ret = misc_register(&binder_device->miscdev);
6032 	if (ret < 0) {
6033 		kfree(binder_device);
6034 		return ret;
6035 	}
6036 
6037 	hlist_add_head(&binder_device->hlist, &binder_devices);
6038 
6039 	return ret;
6040 }
6041 
6042 static int __init binder_init(void)
6043 {
6044 	int ret;
6045 	char *device_name, *device_tmp;
6046 	struct binder_device *device;
6047 	struct hlist_node *tmp;
6048 	char *device_names = NULL;
6049 
6050 	ret = binder_alloc_shrinker_init();
6051 	if (ret)
6052 		return ret;
6053 
6054 	atomic_set(&binder_transaction_log.cur, ~0U);
6055 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6056 
6057 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6058 	if (binder_debugfs_dir_entry_root)
6059 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6060 						 binder_debugfs_dir_entry_root);
6061 
6062 	if (binder_debugfs_dir_entry_root) {
6063 		debugfs_create_file("state",
6064 				    0444,
6065 				    binder_debugfs_dir_entry_root,
6066 				    NULL,
6067 				    &state_fops);
6068 		debugfs_create_file("stats",
6069 				    0444,
6070 				    binder_debugfs_dir_entry_root,
6071 				    NULL,
6072 				    &stats_fops);
6073 		debugfs_create_file("transactions",
6074 				    0444,
6075 				    binder_debugfs_dir_entry_root,
6076 				    NULL,
6077 				    &transactions_fops);
6078 		debugfs_create_file("transaction_log",
6079 				    0444,
6080 				    binder_debugfs_dir_entry_root,
6081 				    &binder_transaction_log,
6082 				    &transaction_log_fops);
6083 		debugfs_create_file("failed_transaction_log",
6084 				    0444,
6085 				    binder_debugfs_dir_entry_root,
6086 				    &binder_transaction_log_failed,
6087 				    &transaction_log_fops);
6088 	}
6089 
6090 	if (strcmp(binder_devices_param, "") != 0) {
6091 		/*
6092 		* Copy the module_parameter string, because we don't want to
6093 		* tokenize it in-place.
6094 		 */
6095 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6096 		if (!device_names) {
6097 			ret = -ENOMEM;
6098 			goto err_alloc_device_names_failed;
6099 		}
6100 
6101 		device_tmp = device_names;
6102 		while ((device_name = strsep(&device_tmp, ","))) {
6103 			ret = init_binder_device(device_name);
6104 			if (ret)
6105 				goto err_init_binder_device_failed;
6106 		}
6107 	}
6108 
6109 	ret = init_binderfs();
6110 	if (ret)
6111 		goto err_init_binder_device_failed;
6112 
6113 	return ret;
6114 
6115 err_init_binder_device_failed:
6116 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6117 		misc_deregister(&device->miscdev);
6118 		hlist_del(&device->hlist);
6119 		kfree(device);
6120 	}
6121 
6122 	kfree(device_names);
6123 
6124 err_alloc_device_names_failed:
6125 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6126 
6127 	return ret;
6128 }
6129 
6130 device_initcall(binder_init);
6131 
6132 #define CREATE_TRACE_POINTS
6133 #include "binder_trace.h"
6134 
6135 MODULE_LICENSE("GPL v2");
6136