xref: /linux/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /**
2  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The names of the above-listed copyright holders may not be used
15  *    to endorse or promote products derived from this software without
16  *    specific prior written permission.
17  *
18  * ALTERNATIVELY, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2, as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/sched/signal.h>
38 #include <linux/types.h>
39 #include <linux/errno.h>
40 #include <linux/cdev.h>
41 #include <linux/fs.h>
42 #include <linux/device.h>
43 #include <linux/mm.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/bug.h>
47 #include <linux/semaphore.h>
48 #include <linux/list.h>
49 #include <linux/of.h>
50 #include <linux/platform_device.h>
51 #include <linux/compat.h>
52 #include <soc/bcm2835/raspberrypi-firmware.h>
53 
54 #include "vchiq_core.h"
55 #include "vchiq_ioctl.h"
56 #include "vchiq_arm.h"
57 #include "vchiq_debugfs.h"
58 #include "vchiq_killable.h"
59 
60 #define DEVICE_NAME "vchiq"
61 
62 /* Override the default prefix, which would be vchiq_arm (from the filename) */
63 #undef MODULE_PARAM_PREFIX
64 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
65 
66 #define VCHIQ_MINOR 0
67 
68 /* Some per-instance constants */
69 #define MAX_COMPLETIONS 128
70 #define MAX_SERVICES 64
71 #define MAX_ELEMENTS 8
72 #define MSG_QUEUE_SIZE 128
73 
74 #define KEEPALIVE_VER 1
75 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
76 
77 /* Run time control of log level, based on KERN_XXX level. */
78 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
79 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
80 
81 #define SUSPEND_TIMER_TIMEOUT_MS 100
82 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
83 
84 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
85 static const char *const suspend_state_names[] = {
86 	"VC_SUSPEND_FORCE_CANCELED",
87 	"VC_SUSPEND_REJECTED",
88 	"VC_SUSPEND_FAILED",
89 	"VC_SUSPEND_IDLE",
90 	"VC_SUSPEND_REQUESTED",
91 	"VC_SUSPEND_IN_PROGRESS",
92 	"VC_SUSPEND_SUSPENDED"
93 };
94 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
95 static const char *const resume_state_names[] = {
96 	"VC_RESUME_FAILED",
97 	"VC_RESUME_IDLE",
98 	"VC_RESUME_REQUESTED",
99 	"VC_RESUME_IN_PROGRESS",
100 	"VC_RESUME_RESUMED"
101 };
102 /* The number of times we allow force suspend to timeout before actually
103 ** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
104 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
105 */
106 #define FORCE_SUSPEND_FAIL_MAX 8
107 
108 /* The time in ms allowed for videocore to go idle when force suspend has been
109  * requested */
110 #define FORCE_SUSPEND_TIMEOUT_MS 200
111 
112 
113 static void suspend_timer_callback(unsigned long context);
114 
115 
116 typedef struct user_service_struct {
117 	VCHIQ_SERVICE_T *service;
118 	void *userdata;
119 	VCHIQ_INSTANCE_T instance;
120 	char is_vchi;
121 	char dequeue_pending;
122 	char close_pending;
123 	int message_available_pos;
124 	int msg_insert;
125 	int msg_remove;
126 	struct semaphore insert_event;
127 	struct semaphore remove_event;
128 	struct semaphore close_event;
129 	VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
130 } USER_SERVICE_T;
131 
132 struct bulk_waiter_node {
133 	struct bulk_waiter bulk_waiter;
134 	int pid;
135 	struct list_head list;
136 };
137 
138 struct vchiq_instance_struct {
139 	VCHIQ_STATE_T *state;
140 	VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
141 	int completion_insert;
142 	int completion_remove;
143 	struct semaphore insert_event;
144 	struct semaphore remove_event;
145 	struct mutex completion_mutex;
146 
147 	int connected;
148 	int closing;
149 	int pid;
150 	int mark;
151 	int use_close_delivered;
152 	int trace;
153 
154 	struct list_head bulk_waiter_list;
155 	struct mutex bulk_waiter_list_mutex;
156 
157 	VCHIQ_DEBUGFS_NODE_T debugfs_node;
158 };
159 
160 typedef struct dump_context_struct {
161 	char __user *buf;
162 	size_t actual;
163 	size_t space;
164 	loff_t offset;
165 } DUMP_CONTEXT_T;
166 
167 static struct cdev    vchiq_cdev;
168 static dev_t          vchiq_devid;
169 static VCHIQ_STATE_T g_state;
170 static struct class  *vchiq_class;
171 static struct device *vchiq_dev;
172 static DEFINE_SPINLOCK(msg_queue_spinlock);
173 
174 static const char *const ioctl_names[] = {
175 	"CONNECT",
176 	"SHUTDOWN",
177 	"CREATE_SERVICE",
178 	"REMOVE_SERVICE",
179 	"QUEUE_MESSAGE",
180 	"QUEUE_BULK_TRANSMIT",
181 	"QUEUE_BULK_RECEIVE",
182 	"AWAIT_COMPLETION",
183 	"DEQUEUE_MESSAGE",
184 	"GET_CLIENT_ID",
185 	"GET_CONFIG",
186 	"CLOSE_SERVICE",
187 	"USE_SERVICE",
188 	"RELEASE_SERVICE",
189 	"SET_SERVICE_OPTION",
190 	"DUMP_PHYS_MEM",
191 	"LIB_VERSION",
192 	"CLOSE_DELIVERED"
193 };
194 
195 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
196 		    (VCHIQ_IOC_MAX + 1));
197 
198 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
199 static void
200 dump_phys_mem(void *virt_addr, u32 num_bytes);
201 #endif
202 
203 /****************************************************************************
204 *
205 *   add_completion
206 *
207 ***************************************************************************/
208 
209 static VCHIQ_STATUS_T
210 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
211 	VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
212 	void *bulk_userdata)
213 {
214 	VCHIQ_COMPLETION_DATA_T *completion;
215 	int insert;
216 
217 	DEBUG_INITIALISE(g_state.local)
218 
219 	insert = instance->completion_insert;
220 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
221 		/* Out of space - wait for the client */
222 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
223 		vchiq_log_trace(vchiq_arm_log_level,
224 			"add_completion - completion queue full");
225 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
226 		if (down_interruptible(&instance->remove_event) != 0) {
227 			vchiq_log_info(vchiq_arm_log_level,
228 				"service_callback interrupted");
229 			return VCHIQ_RETRY;
230 		} else if (instance->closing) {
231 			vchiq_log_info(vchiq_arm_log_level,
232 				"service_callback closing");
233 			return VCHIQ_SUCCESS;
234 		}
235 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
236 	}
237 
238 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
239 
240 	completion->header = header;
241 	completion->reason = reason;
242 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
243 	completion->service_userdata = user_service->service;
244 	completion->bulk_userdata = bulk_userdata;
245 
246 	if (reason == VCHIQ_SERVICE_CLOSED) {
247 		/* Take an extra reference, to be held until
248 		   this CLOSED notification is delivered. */
249 		lock_service(user_service->service);
250 		if (instance->use_close_delivered)
251 			user_service->close_pending = 1;
252 	}
253 
254 	/* A write barrier is needed here to ensure that the entire completion
255 		record is written out before the insert point. */
256 	wmb();
257 
258 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
259 		user_service->message_available_pos = insert;
260 
261 	insert++;
262 	instance->completion_insert = insert;
263 
264 	up(&instance->insert_event);
265 
266 	return VCHIQ_SUCCESS;
267 }
268 
269 /****************************************************************************
270 *
271 *   service_callback
272 *
273 ***************************************************************************/
274 
275 static VCHIQ_STATUS_T
276 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
277 	VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
278 {
279 	/* How do we ensure the callback goes to the right client?
280 	** The service_user data points to a USER_SERVICE_T record containing
281 	** the original callback and the user state structure, which contains a
282 	** circular buffer for completion records.
283 	*/
284 	USER_SERVICE_T *user_service;
285 	VCHIQ_SERVICE_T *service;
286 	VCHIQ_INSTANCE_T instance;
287 	bool skip_completion = false;
288 
289 	DEBUG_INITIALISE(g_state.local)
290 
291 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
292 
293 	service = handle_to_service(handle);
294 	BUG_ON(!service);
295 	user_service = (USER_SERVICE_T *)service->base.userdata;
296 	instance = user_service->instance;
297 
298 	if (!instance || instance->closing)
299 		return VCHIQ_SUCCESS;
300 
301 	vchiq_log_trace(vchiq_arm_log_level,
302 		"service_callback - service %lx(%d,%p), reason %d, header %lx, "
303 		"instance %lx, bulk_userdata %lx",
304 		(unsigned long)user_service,
305 		service->localport, user_service->userdata,
306 		reason, (unsigned long)header,
307 		(unsigned long)instance, (unsigned long)bulk_userdata);
308 
309 	if (header && user_service->is_vchi) {
310 		spin_lock(&msg_queue_spinlock);
311 		while (user_service->msg_insert ==
312 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
313 			spin_unlock(&msg_queue_spinlock);
314 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
315 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
316 			vchiq_log_trace(vchiq_arm_log_level,
317 				"service_callback - msg queue full");
318 			/* If there is no MESSAGE_AVAILABLE in the completion
319 			** queue, add one
320 			*/
321 			if ((user_service->message_available_pos -
322 				instance->completion_remove) < 0) {
323 				VCHIQ_STATUS_T status;
324 
325 				vchiq_log_info(vchiq_arm_log_level,
326 					"Inserting extra MESSAGE_AVAILABLE");
327 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
328 				status = add_completion(instance, reason,
329 					NULL, user_service, bulk_userdata);
330 				if (status != VCHIQ_SUCCESS) {
331 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
332 					return status;
333 				}
334 			}
335 
336 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
337 			if (down_interruptible(&user_service->remove_event)
338 				!= 0) {
339 				vchiq_log_info(vchiq_arm_log_level,
340 					"service_callback interrupted");
341 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
342 				return VCHIQ_RETRY;
343 			} else if (instance->closing) {
344 				vchiq_log_info(vchiq_arm_log_level,
345 					"service_callback closing");
346 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
347 				return VCHIQ_ERROR;
348 			}
349 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
350 			spin_lock(&msg_queue_spinlock);
351 		}
352 
353 		user_service->msg_queue[user_service->msg_insert &
354 			(MSG_QUEUE_SIZE - 1)] = header;
355 		user_service->msg_insert++;
356 
357 		/* If there is a thread waiting in DEQUEUE_MESSAGE, or if
358 		** there is a MESSAGE_AVAILABLE in the completion queue then
359 		** bypass the completion queue.
360 		*/
361 		if (((user_service->message_available_pos -
362 			instance->completion_remove) >= 0) ||
363 			user_service->dequeue_pending) {
364 			user_service->dequeue_pending = 0;
365 			skip_completion = true;
366 		}
367 
368 		spin_unlock(&msg_queue_spinlock);
369 		up(&user_service->insert_event);
370 
371 		header = NULL;
372 	}
373 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
374 
375 	if (skip_completion)
376 		return VCHIQ_SUCCESS;
377 
378 	return add_completion(instance, reason, header, user_service,
379 		bulk_userdata);
380 }
381 
382 /****************************************************************************
383 *
384 *   user_service_free
385 *
386 ***************************************************************************/
387 static void
388 user_service_free(void *userdata)
389 {
390 	kfree(userdata);
391 }
392 
393 /****************************************************************************
394 *
395 *   close_delivered
396 *
397 ***************************************************************************/
398 static void close_delivered(USER_SERVICE_T *user_service)
399 {
400 	vchiq_log_info(vchiq_arm_log_level,
401 		"close_delivered(handle=%x)",
402 		user_service->service->handle);
403 
404 	if (user_service->close_pending) {
405 		/* Allow the underlying service to be culled */
406 		unlock_service(user_service->service);
407 
408 		/* Wake the user-thread blocked in close_ or remove_service */
409 		up(&user_service->close_event);
410 
411 		user_service->close_pending = 0;
412 	}
413 }
414 
415 struct vchiq_io_copy_callback_context {
416 	struct vchiq_element *current_element;
417 	size_t current_element_offset;
418 	unsigned long elements_to_go;
419 	size_t current_offset;
420 };
421 
422 static ssize_t
423 vchiq_ioc_copy_element_data(
424 	void *context,
425 	void *dest,
426 	size_t offset,
427 	size_t maxsize)
428 {
429 	long res;
430 	size_t bytes_this_round;
431 	struct vchiq_io_copy_callback_context *copy_context =
432 		(struct vchiq_io_copy_callback_context *)context;
433 
434 	if (offset != copy_context->current_offset)
435 		return 0;
436 
437 	if (!copy_context->elements_to_go)
438 		return 0;
439 
440 	/*
441 	 * Complex logic here to handle the case of 0 size elements
442 	 * in the middle of the array of elements.
443 	 *
444 	 * Need to skip over these 0 size elements.
445 	 */
446 	while (1) {
447 		bytes_this_round = min(copy_context->current_element->size -
448 				       copy_context->current_element_offset,
449 				       maxsize);
450 
451 		if (bytes_this_round)
452 			break;
453 
454 		copy_context->elements_to_go--;
455 		copy_context->current_element++;
456 		copy_context->current_element_offset = 0;
457 
458 		if (!copy_context->elements_to_go)
459 			return 0;
460 	}
461 
462 	res = copy_from_user(dest,
463 			     copy_context->current_element->data +
464 			     copy_context->current_element_offset,
465 			     bytes_this_round);
466 
467 	if (res != 0)
468 		return -EFAULT;
469 
470 	copy_context->current_element_offset += bytes_this_round;
471 	copy_context->current_offset += bytes_this_round;
472 
473 	/*
474 	 * Check if done with current element, and if so advance to the next.
475 	 */
476 	if (copy_context->current_element_offset ==
477 	    copy_context->current_element->size) {
478 		copy_context->elements_to_go--;
479 		copy_context->current_element++;
480 		copy_context->current_element_offset = 0;
481 	}
482 
483 	return bytes_this_round;
484 }
485 
486 /**************************************************************************
487  *
488  *   vchiq_ioc_queue_message
489  *
490  **************************************************************************/
491 static VCHIQ_STATUS_T
492 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
493 			struct vchiq_element *elements,
494 			unsigned long count)
495 {
496 	struct vchiq_io_copy_callback_context context;
497 	unsigned long i;
498 	size_t total_size = 0;
499 
500 	context.current_element = elements;
501 	context.current_element_offset = 0;
502 	context.elements_to_go = count;
503 	context.current_offset = 0;
504 
505 	for (i = 0; i < count; i++) {
506 		if (!elements[i].data && elements[i].size != 0)
507 			return -EFAULT;
508 
509 		total_size += elements[i].size;
510 	}
511 
512 	return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
513 				   &context, total_size);
514 }
515 
516 /****************************************************************************
517 *
518 *   vchiq_ioctl
519 *
520 ***************************************************************************/
521 static long
522 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
523 {
524 	VCHIQ_INSTANCE_T instance = file->private_data;
525 	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
526 	VCHIQ_SERVICE_T *service = NULL;
527 	long ret = 0;
528 	int i, rc;
529 
530 	DEBUG_INITIALISE(g_state.local)
531 
532 	vchiq_log_trace(vchiq_arm_log_level,
533 		"vchiq_ioctl - instance %pK, cmd %s, arg %lx",
534 		instance,
535 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
536 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
537 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
538 
539 	switch (cmd) {
540 	case VCHIQ_IOC_SHUTDOWN:
541 		if (!instance->connected)
542 			break;
543 
544 		/* Remove all services */
545 		i = 0;
546 		while ((service = next_service_by_instance(instance->state,
547 			instance, &i)) != NULL) {
548 			status = vchiq_remove_service(service->handle);
549 			unlock_service(service);
550 			if (status != VCHIQ_SUCCESS)
551 				break;
552 		}
553 		service = NULL;
554 
555 		if (status == VCHIQ_SUCCESS) {
556 			/* Wake the completion thread and ask it to exit */
557 			instance->closing = 1;
558 			up(&instance->insert_event);
559 		}
560 
561 		break;
562 
563 	case VCHIQ_IOC_CONNECT:
564 		if (instance->connected) {
565 			ret = -EINVAL;
566 			break;
567 		}
568 		rc = mutex_lock_killable(&instance->state->mutex);
569 		if (rc != 0) {
570 			vchiq_log_error(vchiq_arm_log_level,
571 				"vchiq: connect: could not lock mutex for "
572 				"state %d: %d",
573 				instance->state->id, rc);
574 			ret = -EINTR;
575 			break;
576 		}
577 		status = vchiq_connect_internal(instance->state, instance);
578 		mutex_unlock(&instance->state->mutex);
579 
580 		if (status == VCHIQ_SUCCESS)
581 			instance->connected = 1;
582 		else
583 			vchiq_log_error(vchiq_arm_log_level,
584 				"vchiq: could not connect: %d", status);
585 		break;
586 
587 	case VCHIQ_IOC_CREATE_SERVICE: {
588 		VCHIQ_CREATE_SERVICE_T args;
589 		USER_SERVICE_T *user_service = NULL;
590 		void *userdata;
591 		int srvstate;
592 
593 		if (copy_from_user
594 			 (&args, (const void __user *)arg,
595 			  sizeof(args)) != 0) {
596 			ret = -EFAULT;
597 			break;
598 		}
599 
600 		user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
601 		if (!user_service) {
602 			ret = -ENOMEM;
603 			break;
604 		}
605 
606 		if (args.is_open) {
607 			if (!instance->connected) {
608 				ret = -ENOTCONN;
609 				kfree(user_service);
610 				break;
611 			}
612 			srvstate = VCHIQ_SRVSTATE_OPENING;
613 		} else {
614 			srvstate =
615 				 instance->connected ?
616 				 VCHIQ_SRVSTATE_LISTENING :
617 				 VCHIQ_SRVSTATE_HIDDEN;
618 		}
619 
620 		userdata = args.params.userdata;
621 		args.params.callback = service_callback;
622 		args.params.userdata = user_service;
623 		service = vchiq_add_service_internal(
624 				instance->state,
625 				&args.params, srvstate,
626 				instance, user_service_free);
627 
628 		if (service != NULL) {
629 			user_service->service = service;
630 			user_service->userdata = userdata;
631 			user_service->instance = instance;
632 			user_service->is_vchi = (args.is_vchi != 0);
633 			user_service->dequeue_pending = 0;
634 			user_service->close_pending = 0;
635 			user_service->message_available_pos =
636 				instance->completion_remove - 1;
637 			user_service->msg_insert = 0;
638 			user_service->msg_remove = 0;
639 			sema_init(&user_service->insert_event, 0);
640 			sema_init(&user_service->remove_event, 0);
641 			sema_init(&user_service->close_event, 0);
642 
643 			if (args.is_open) {
644 				status = vchiq_open_service_internal
645 					(service, instance->pid);
646 				if (status != VCHIQ_SUCCESS) {
647 					vchiq_remove_service(service->handle);
648 					service = NULL;
649 					ret = (status == VCHIQ_RETRY) ?
650 						-EINTR : -EIO;
651 					break;
652 				}
653 			}
654 
655 			if (copy_to_user((void __user *)
656 				&(((VCHIQ_CREATE_SERVICE_T __user *)
657 					arg)->handle),
658 				(const void *)&service->handle,
659 				sizeof(service->handle)) != 0) {
660 				ret = -EFAULT;
661 				vchiq_remove_service(service->handle);
662 			}
663 
664 			service = NULL;
665 		} else {
666 			ret = -EEXIST;
667 			kfree(user_service);
668 		}
669 	} break;
670 
671 	case VCHIQ_IOC_CLOSE_SERVICE: {
672 		VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
673 
674 		service = find_service_for_instance(instance, handle);
675 		if (service != NULL) {
676 			USER_SERVICE_T *user_service =
677 				(USER_SERVICE_T *)service->base.userdata;
678 			/* close_pending is false on first entry, and when the
679 			   wait in vchiq_close_service has been interrupted. */
680 			if (!user_service->close_pending) {
681 				status = vchiq_close_service(service->handle);
682 				if (status != VCHIQ_SUCCESS)
683 					break;
684 			}
685 
686 			/* close_pending is true once the underlying service
687 			   has been closed until the client library calls the
688 			   CLOSE_DELIVERED ioctl, signalling close_event. */
689 			if (user_service->close_pending &&
690 				down_interruptible(&user_service->close_event))
691 				status = VCHIQ_RETRY;
692 		}
693 		else
694 			ret = -EINVAL;
695 	} break;
696 
697 	case VCHIQ_IOC_REMOVE_SERVICE: {
698 		VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
699 
700 		service = find_service_for_instance(instance, handle);
701 		if (service != NULL) {
702 			USER_SERVICE_T *user_service =
703 				(USER_SERVICE_T *)service->base.userdata;
704 			/* close_pending is false on first entry, and when the
705 			   wait in vchiq_close_service has been interrupted. */
706 			if (!user_service->close_pending) {
707 				status = vchiq_remove_service(service->handle);
708 				if (status != VCHIQ_SUCCESS)
709 					break;
710 			}
711 
712 			/* close_pending is true once the underlying service
713 			   has been closed until the client library calls the
714 			   CLOSE_DELIVERED ioctl, signalling close_event. */
715 			if (user_service->close_pending &&
716 				down_interruptible(&user_service->close_event))
717 				status = VCHIQ_RETRY;
718 		}
719 		else
720 			ret = -EINVAL;
721 	} break;
722 
723 	case VCHIQ_IOC_USE_SERVICE:
724 	case VCHIQ_IOC_RELEASE_SERVICE:	{
725 		VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
726 
727 		service = find_service_for_instance(instance, handle);
728 		if (service != NULL) {
729 			status = (cmd == VCHIQ_IOC_USE_SERVICE)	?
730 				vchiq_use_service_internal(service) :
731 				vchiq_release_service_internal(service);
732 			if (status != VCHIQ_SUCCESS) {
733 				vchiq_log_error(vchiq_susp_log_level,
734 					"%s: cmd %s returned error %d for "
735 					"service %c%c%c%c:%03d",
736 					__func__,
737 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
738 						"VCHIQ_IOC_USE_SERVICE" :
739 						"VCHIQ_IOC_RELEASE_SERVICE",
740 					status,
741 					VCHIQ_FOURCC_AS_4CHARS(
742 						service->base.fourcc),
743 					service->client_id);
744 				ret = -EINVAL;
745 			}
746 		} else
747 			ret = -EINVAL;
748 	} break;
749 
750 	case VCHIQ_IOC_QUEUE_MESSAGE: {
751 		VCHIQ_QUEUE_MESSAGE_T args;
752 
753 		if (copy_from_user
754 			 (&args, (const void __user *)arg,
755 			  sizeof(args)) != 0) {
756 			ret = -EFAULT;
757 			break;
758 		}
759 
760 		service = find_service_for_instance(instance, args.handle);
761 
762 		if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
763 			/* Copy elements into kernel space */
764 			struct vchiq_element elements[MAX_ELEMENTS];
765 
766 			if (copy_from_user(elements, args.elements,
767 				args.count * sizeof(struct vchiq_element)) == 0)
768 				status = vchiq_ioc_queue_message
769 					(args.handle,
770 					elements, args.count);
771 			else
772 				ret = -EFAULT;
773 		} else {
774 			ret = -EINVAL;
775 		}
776 	} break;
777 
778 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
779 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
780 		VCHIQ_QUEUE_BULK_TRANSFER_T args;
781 		struct bulk_waiter_node *waiter = NULL;
782 
783 		VCHIQ_BULK_DIR_T dir =
784 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
785 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
786 
787 		if (copy_from_user
788 			(&args, (const void __user *)arg,
789 			sizeof(args)) != 0) {
790 			ret = -EFAULT;
791 			break;
792 		}
793 
794 		service = find_service_for_instance(instance, args.handle);
795 		if (!service) {
796 			ret = -EINVAL;
797 			break;
798 		}
799 
800 		if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
801 			waiter = kzalloc(sizeof(struct bulk_waiter_node),
802 				GFP_KERNEL);
803 			if (!waiter) {
804 				ret = -ENOMEM;
805 				break;
806 			}
807 			args.userdata = &waiter->bulk_waiter;
808 		} else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
809 			struct list_head *pos;
810 
811 			mutex_lock(&instance->bulk_waiter_list_mutex);
812 			list_for_each(pos, &instance->bulk_waiter_list) {
813 				if (list_entry(pos, struct bulk_waiter_node,
814 					list)->pid == current->pid) {
815 					waiter = list_entry(pos,
816 						struct bulk_waiter_node,
817 						list);
818 					list_del(pos);
819 					break;
820 				}
821 
822 			}
823 			mutex_unlock(&instance->bulk_waiter_list_mutex);
824 			if (!waiter) {
825 				vchiq_log_error(vchiq_arm_log_level,
826 					"no bulk_waiter found for pid %d",
827 					current->pid);
828 				ret = -ESRCH;
829 				break;
830 			}
831 			vchiq_log_info(vchiq_arm_log_level,
832 				"found bulk_waiter %pK for pid %d", waiter,
833 				current->pid);
834 			args.userdata = &waiter->bulk_waiter;
835 		}
836 		status = vchiq_bulk_transfer
837 			(args.handle,
838 			 VCHI_MEM_HANDLE_INVALID,
839 			 args.data, args.size,
840 			 args.userdata, args.mode,
841 			 dir);
842 		if (!waiter)
843 			break;
844 		if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
845 			!waiter->bulk_waiter.bulk) {
846 			if (waiter->bulk_waiter.bulk) {
847 				/* Cancel the signal when the transfer
848 				** completes. */
849 				spin_lock(&bulk_waiter_spinlock);
850 				waiter->bulk_waiter.bulk->userdata = NULL;
851 				spin_unlock(&bulk_waiter_spinlock);
852 			}
853 			kfree(waiter);
854 		} else {
855 			const VCHIQ_BULK_MODE_T mode_waiting =
856 				VCHIQ_BULK_MODE_WAITING;
857 			waiter->pid = current->pid;
858 			mutex_lock(&instance->bulk_waiter_list_mutex);
859 			list_add(&waiter->list, &instance->bulk_waiter_list);
860 			mutex_unlock(&instance->bulk_waiter_list_mutex);
861 			vchiq_log_info(vchiq_arm_log_level,
862 				"saved bulk_waiter %pK for pid %d",
863 				waiter, current->pid);
864 
865 			if (copy_to_user((void __user *)
866 				&(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
867 					arg)->mode),
868 				(const void *)&mode_waiting,
869 				sizeof(mode_waiting)) != 0)
870 				ret = -EFAULT;
871 		}
872 	} break;
873 
874 	case VCHIQ_IOC_AWAIT_COMPLETION: {
875 		VCHIQ_AWAIT_COMPLETION_T args;
876 
877 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
878 		if (!instance->connected) {
879 			ret = -ENOTCONN;
880 			break;
881 		}
882 
883 		if (copy_from_user(&args, (const void __user *)arg,
884 			sizeof(args)) != 0) {
885 			ret = -EFAULT;
886 			break;
887 		}
888 
889 		mutex_lock(&instance->completion_mutex);
890 
891 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
892 		while ((instance->completion_remove ==
893 			instance->completion_insert)
894 			&& !instance->closing) {
895 			int rc;
896 
897 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
898 			mutex_unlock(&instance->completion_mutex);
899 			rc = down_interruptible(&instance->insert_event);
900 			mutex_lock(&instance->completion_mutex);
901 			if (rc != 0) {
902 				DEBUG_TRACE(AWAIT_COMPLETION_LINE);
903 				vchiq_log_info(vchiq_arm_log_level,
904 					"AWAIT_COMPLETION interrupted");
905 				ret = -EINTR;
906 				break;
907 			}
908 		}
909 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
910 
911 		if (ret == 0) {
912 			int msgbufcount = args.msgbufcount;
913 			int remove = instance->completion_remove;
914 
915 			for (ret = 0; ret < args.count; ret++) {
916 				VCHIQ_COMPLETION_DATA_T *completion;
917 				VCHIQ_SERVICE_T *service;
918 				USER_SERVICE_T *user_service;
919 				VCHIQ_HEADER_T *header;
920 
921 				if (remove == instance->completion_insert)
922 					break;
923 
924 				completion = &instance->completions[
925 					remove & (MAX_COMPLETIONS - 1)];
926 
927 				/*
928 				 * A read memory barrier is needed to stop
929 				 * prefetch of a stale completion record
930 				 */
931 				rmb();
932 
933 				service = completion->service_userdata;
934 				user_service = service->base.userdata;
935 				completion->service_userdata =
936 					user_service->userdata;
937 
938 				header = completion->header;
939 				if (header) {
940 					void __user *msgbuf;
941 					int msglen;
942 
943 					msglen = header->size +
944 						sizeof(VCHIQ_HEADER_T);
945 					/* This must be a VCHIQ-style service */
946 					if (args.msgbufsize < msglen) {
947 						vchiq_log_error(
948 							vchiq_arm_log_level,
949 							"header %pK: msgbufsize %x < msglen %x",
950 							header, args.msgbufsize,
951 							msglen);
952 						WARN(1, "invalid message "
953 							"size\n");
954 						if (ret == 0)
955 							ret = -EMSGSIZE;
956 						break;
957 					}
958 					if (msgbufcount <= 0)
959 						/* Stall here for lack of a
960 						** buffer for the message. */
961 						break;
962 					/* Get the pointer from user space */
963 					msgbufcount--;
964 					if (copy_from_user(&msgbuf,
965 						(const void __user *)
966 						&args.msgbufs[msgbufcount],
967 						sizeof(msgbuf)) != 0) {
968 						if (ret == 0)
969 							ret = -EFAULT;
970 						break;
971 					}
972 
973 					/* Copy the message to user space */
974 					if (copy_to_user(msgbuf, header,
975 						msglen) != 0) {
976 						if (ret == 0)
977 							ret = -EFAULT;
978 						break;
979 					}
980 
981 					/* Now it has been copied, the message
982 					** can be released. */
983 					vchiq_release_message(service->handle,
984 						header);
985 
986 					/* The completion must point to the
987 					** msgbuf. */
988 					completion->header = msgbuf;
989 				}
990 
991 				if ((completion->reason ==
992 					VCHIQ_SERVICE_CLOSED) &&
993 					!instance->use_close_delivered)
994 					unlock_service(service);
995 
996 				if (copy_to_user((void __user *)(
997 					(size_t)args.buf +
998 					ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
999 					completion,
1000 					sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
1001 						if (ret == 0)
1002 							ret = -EFAULT;
1003 					break;
1004 				}
1005 
1006 				/*
1007 				 * Ensure that the above copy has completed
1008 				 * before advancing the remove pointer.
1009 				 */
1010 				mb();
1011 				remove++;
1012 				instance->completion_remove = remove;
1013 			}
1014 
1015 			if (msgbufcount != args.msgbufcount) {
1016 				if (copy_to_user((void __user *)
1017 					&((VCHIQ_AWAIT_COMPLETION_T *)arg)->
1018 						msgbufcount,
1019 					&msgbufcount,
1020 					sizeof(msgbufcount)) != 0) {
1021 					ret = -EFAULT;
1022 				}
1023 			}
1024 		}
1025 
1026 		if (ret != 0)
1027 			up(&instance->remove_event);
1028 		mutex_unlock(&instance->completion_mutex);
1029 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1030 	} break;
1031 
1032 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1033 		VCHIQ_DEQUEUE_MESSAGE_T args;
1034 		USER_SERVICE_T *user_service;
1035 		VCHIQ_HEADER_T *header;
1036 
1037 		DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1038 		if (copy_from_user
1039 			 (&args, (const void __user *)arg,
1040 			  sizeof(args)) != 0) {
1041 			ret = -EFAULT;
1042 			break;
1043 		}
1044 		service = find_service_for_instance(instance, args.handle);
1045 		if (!service) {
1046 			ret = -EINVAL;
1047 			break;
1048 		}
1049 		user_service = (USER_SERVICE_T *)service->base.userdata;
1050 		if (user_service->is_vchi == 0) {
1051 			ret = -EINVAL;
1052 			break;
1053 		}
1054 
1055 		spin_lock(&msg_queue_spinlock);
1056 		if (user_service->msg_remove == user_service->msg_insert) {
1057 			if (!args.blocking) {
1058 				spin_unlock(&msg_queue_spinlock);
1059 				DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1060 				ret = -EWOULDBLOCK;
1061 				break;
1062 			}
1063 			user_service->dequeue_pending = 1;
1064 			do {
1065 				spin_unlock(&msg_queue_spinlock);
1066 				DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1067 				if (down_interruptible(
1068 					&user_service->insert_event) != 0) {
1069 					vchiq_log_info(vchiq_arm_log_level,
1070 						"DEQUEUE_MESSAGE interrupted");
1071 					ret = -EINTR;
1072 					break;
1073 				}
1074 				spin_lock(&msg_queue_spinlock);
1075 			} while (user_service->msg_remove ==
1076 				user_service->msg_insert);
1077 
1078 			if (ret)
1079 				break;
1080 		}
1081 
1082 		BUG_ON((int)(user_service->msg_insert -
1083 			user_service->msg_remove) < 0);
1084 
1085 		header = user_service->msg_queue[user_service->msg_remove &
1086 			(MSG_QUEUE_SIZE - 1)];
1087 		user_service->msg_remove++;
1088 		spin_unlock(&msg_queue_spinlock);
1089 
1090 		up(&user_service->remove_event);
1091 		if (header == NULL)
1092 			ret = -ENOTCONN;
1093 		else if (header->size <= args.bufsize) {
1094 			/* Copy to user space if msgbuf is not NULL */
1095 			if ((args.buf == NULL) ||
1096 				(copy_to_user((void __user *)args.buf,
1097 				header->data,
1098 				header->size) == 0)) {
1099 				ret = header->size;
1100 				vchiq_release_message(
1101 					service->handle,
1102 					header);
1103 			} else
1104 				ret = -EFAULT;
1105 		} else {
1106 			vchiq_log_error(vchiq_arm_log_level,
1107 				"header %pK: bufsize %x < size %x",
1108 				header, args.bufsize, header->size);
1109 			WARN(1, "invalid size\n");
1110 			ret = -EMSGSIZE;
1111 		}
1112 		DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1113 	} break;
1114 
1115 	case VCHIQ_IOC_GET_CLIENT_ID: {
1116 		VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1117 
1118 		ret = vchiq_get_client_id(handle);
1119 	} break;
1120 
1121 	case VCHIQ_IOC_GET_CONFIG: {
1122 		VCHIQ_GET_CONFIG_T args;
1123 		VCHIQ_CONFIG_T config;
1124 
1125 		if (copy_from_user(&args, (const void __user *)arg,
1126 			sizeof(args)) != 0) {
1127 			ret = -EFAULT;
1128 			break;
1129 		}
1130 		if (args.config_size > sizeof(config)) {
1131 			ret = -EINVAL;
1132 			break;
1133 		}
1134 		status = vchiq_get_config(instance, args.config_size, &config);
1135 		if (status == VCHIQ_SUCCESS) {
1136 			if (copy_to_user((void __user *)args.pconfig,
1137 				    &config, args.config_size) != 0) {
1138 				ret = -EFAULT;
1139 				break;
1140 			}
1141 		}
1142 	} break;
1143 
1144 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1145 		VCHIQ_SET_SERVICE_OPTION_T args;
1146 
1147 		if (copy_from_user(
1148 			&args, (const void __user *)arg,
1149 			sizeof(args)) != 0) {
1150 			ret = -EFAULT;
1151 			break;
1152 		}
1153 
1154 		service = find_service_for_instance(instance, args.handle);
1155 		if (!service) {
1156 			ret = -EINVAL;
1157 			break;
1158 		}
1159 
1160 		status = vchiq_set_service_option(
1161 				args.handle, args.option, args.value);
1162 	} break;
1163 
1164 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1165 	case VCHIQ_IOC_DUMP_PHYS_MEM: {
1166 		VCHIQ_DUMP_MEM_T  args;
1167 
1168 		if (copy_from_user
1169 			 (&args, (const void __user *)arg,
1170 			  sizeof(args)) != 0) {
1171 			ret = -EFAULT;
1172 			break;
1173 		}
1174 		dump_phys_mem(args.virt_addr, args.num_bytes);
1175 	} break;
1176 #endif
1177 
1178 	case VCHIQ_IOC_LIB_VERSION: {
1179 		unsigned int lib_version = (unsigned int)arg;
1180 
1181 		if (lib_version < VCHIQ_VERSION_MIN)
1182 			ret = -EINVAL;
1183 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1184 			instance->use_close_delivered = 1;
1185 	} break;
1186 
1187 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1188 		VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1189 
1190 		service = find_closed_service_for_instance(instance, handle);
1191 		if (service != NULL) {
1192 			USER_SERVICE_T *user_service =
1193 				(USER_SERVICE_T *)service->base.userdata;
1194 			close_delivered(user_service);
1195 		}
1196 		else
1197 			ret = -EINVAL;
1198 	} break;
1199 
1200 	default:
1201 		ret = -ENOTTY;
1202 		break;
1203 	}
1204 
1205 	if (service)
1206 		unlock_service(service);
1207 
1208 	if (ret == 0) {
1209 		if (status == VCHIQ_ERROR)
1210 			ret = -EIO;
1211 		else if (status == VCHIQ_RETRY)
1212 			ret = -EINTR;
1213 	}
1214 
1215 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1216 		(ret != -EWOULDBLOCK))
1217 		vchiq_log_info(vchiq_arm_log_level,
1218 			"  ioctl instance %lx, cmd %s -> status %d, %ld",
1219 			(unsigned long)instance,
1220 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1221 				ioctl_names[_IOC_NR(cmd)] :
1222 				"<invalid>",
1223 			status, ret);
1224 	else
1225 		vchiq_log_trace(vchiq_arm_log_level,
1226 			"  ioctl instance %lx, cmd %s -> status %d, %ld",
1227 			(unsigned long)instance,
1228 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1229 				ioctl_names[_IOC_NR(cmd)] :
1230 				"<invalid>",
1231 			status, ret);
1232 
1233 	return ret;
1234 }
1235 
1236 #if defined(CONFIG_COMPAT)
1237 
1238 struct vchiq_service_params32 {
1239 	int fourcc;
1240 	compat_uptr_t callback;
1241 	compat_uptr_t userdata;
1242 	short version; /* Increment for non-trivial changes */
1243 	short version_min; /* Update for incompatible changes */
1244 };
1245 
1246 struct vchiq_create_service32 {
1247 	struct vchiq_service_params32 params;
1248 	int is_open;
1249 	int is_vchi;
1250 	unsigned int handle; /* OUT */
1251 };
1252 
1253 #define VCHIQ_IOC_CREATE_SERVICE32 \
1254 	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1255 
1256 static long
1257 vchiq_compat_ioctl_create_service(
1258 	struct file *file,
1259 	unsigned int cmd,
1260 	unsigned long arg)
1261 {
1262 	VCHIQ_CREATE_SERVICE_T __user *args;
1263 	struct vchiq_create_service32 __user *ptrargs32 =
1264 		(struct vchiq_create_service32 __user *)arg;
1265 	struct vchiq_create_service32 args32;
1266 	long ret;
1267 
1268 	args = compat_alloc_user_space(sizeof(*args));
1269 	if (!args)
1270 		return -EFAULT;
1271 
1272 	if (copy_from_user(&args32,
1273 			   (struct vchiq_create_service32 __user *)arg,
1274 			   sizeof(args32)))
1275 		return -EFAULT;
1276 
1277 	if (put_user(args32.params.fourcc, &args->params.fourcc) ||
1278 	    put_user(compat_ptr(args32.params.callback),
1279 		     &args->params.callback) ||
1280 	    put_user(compat_ptr(args32.params.userdata),
1281 		     &args->params.userdata) ||
1282 	    put_user(args32.params.version, &args->params.version) ||
1283 	    put_user(args32.params.version_min,
1284 		     &args->params.version_min) ||
1285 	    put_user(args32.is_open, &args->is_open) ||
1286 	    put_user(args32.is_vchi, &args->is_vchi) ||
1287 	    put_user(args32.handle, &args->handle))
1288 		return -EFAULT;
1289 
1290 	ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
1291 
1292 	if (ret < 0)
1293 		return ret;
1294 
1295 	if (get_user(args32.handle, &args->handle))
1296 		return -EFAULT;
1297 
1298 	if (copy_to_user(&ptrargs32->handle,
1299 			 &args32.handle,
1300 			 sizeof(args32.handle)))
1301 		return -EFAULT;
1302 
1303 	return 0;
1304 }
1305 
1306 struct vchiq_element32 {
1307 	compat_uptr_t data;
1308 	unsigned int size;
1309 };
1310 
1311 struct vchiq_queue_message32 {
1312 	unsigned int handle;
1313 	unsigned int count;
1314 	compat_uptr_t elements;
1315 };
1316 
1317 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1318 	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1319 
1320 static long
1321 vchiq_compat_ioctl_queue_message(struct file *file,
1322 				 unsigned int cmd,
1323 				 unsigned long arg)
1324 {
1325 	VCHIQ_QUEUE_MESSAGE_T *args;
1326 	struct vchiq_element *elements;
1327 	struct vchiq_queue_message32 args32;
1328 	unsigned int count;
1329 
1330 	if (copy_from_user(&args32,
1331 			   (struct vchiq_queue_message32 __user *)arg,
1332 			   sizeof(args32)))
1333 		return -EFAULT;
1334 
1335 	args = compat_alloc_user_space(sizeof(*args) +
1336 				       (sizeof(*elements) * MAX_ELEMENTS));
1337 
1338 	if (!args)
1339 		return -EFAULT;
1340 
1341 	if (put_user(args32.handle, &args->handle) ||
1342 	    put_user(args32.count, &args->count) ||
1343 	    put_user(compat_ptr(args32.elements), &args->elements))
1344 		return -EFAULT;
1345 
1346 	if (args32.count > MAX_ELEMENTS)
1347 		return -EINVAL;
1348 
1349 	if (args32.elements && args32.count) {
1350 		struct vchiq_element32 tempelement32[MAX_ELEMENTS];
1351 
1352 		elements = (struct vchiq_element __user *)(args + 1);
1353 
1354 		if (copy_from_user(&tempelement32,
1355 				   compat_ptr(args32.elements),
1356 				   sizeof(tempelement32)))
1357 			return -EFAULT;
1358 
1359 		for (count = 0; count < args32.count; count++) {
1360 			if (put_user(compat_ptr(tempelement32[count].data),
1361 				     &elements[count].data) ||
1362 			    put_user(tempelement32[count].size,
1363 				     &elements[count].size))
1364 				return -EFAULT;
1365 		}
1366 
1367 		if (put_user(elements, &args->elements))
1368 			return -EFAULT;
1369 	}
1370 
1371 	return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
1372 }
1373 
1374 struct vchiq_queue_bulk_transfer32 {
1375 	unsigned int handle;
1376 	compat_uptr_t data;
1377 	unsigned int size;
1378 	compat_uptr_t userdata;
1379 	VCHIQ_BULK_MODE_T mode;
1380 };
1381 
1382 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1383 	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1384 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1385 	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1386 
1387 static long
1388 vchiq_compat_ioctl_queue_bulk(struct file *file,
1389 			      unsigned int cmd,
1390 			      unsigned long arg)
1391 {
1392 	VCHIQ_QUEUE_BULK_TRANSFER_T *args;
1393 	struct vchiq_queue_bulk_transfer32 args32;
1394 	struct vchiq_queue_bulk_transfer32 *ptrargs32 =
1395 		(struct vchiq_queue_bulk_transfer32 *)arg;
1396 	long ret;
1397 
1398 	args = compat_alloc_user_space(sizeof(*args));
1399 	if (!args)
1400 		return -EFAULT;
1401 
1402 	if (copy_from_user(&args32,
1403 			   (struct vchiq_queue_bulk_transfer32 __user *)arg,
1404 			   sizeof(args32)))
1405 		return -EFAULT;
1406 
1407 	if (put_user(args32.handle, &args->handle) ||
1408 	    put_user(compat_ptr(args32.data), &args->data) ||
1409 	    put_user(args32.size, &args->size) ||
1410 	    put_user(compat_ptr(args32.userdata), &args->userdata) ||
1411 	    put_user(args32.mode, &args->mode))
1412 		return -EFAULT;
1413 
1414 	if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
1415 		cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
1416 	else
1417 		cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
1418 
1419 	ret = vchiq_ioctl(file, cmd, (unsigned long)args);
1420 
1421 	if (ret < 0)
1422 		return ret;
1423 
1424 	if (get_user(args32.mode, &args->mode))
1425 		return -EFAULT;
1426 
1427 	if (copy_to_user(&ptrargs32->mode,
1428 			 &args32.mode,
1429 			 sizeof(args32.mode)))
1430 		return -EFAULT;
1431 
1432 	return 0;
1433 }
1434 
1435 struct vchiq_completion_data32 {
1436 	VCHIQ_REASON_T reason;
1437 	compat_uptr_t header;
1438 	compat_uptr_t service_userdata;
1439 	compat_uptr_t bulk_userdata;
1440 };
1441 
1442 struct vchiq_await_completion32 {
1443 	unsigned int count;
1444 	compat_uptr_t buf;
1445 	unsigned int msgbufsize;
1446 	unsigned int msgbufcount; /* IN/OUT */
1447 	compat_uptr_t msgbufs;
1448 };
1449 
1450 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1451 	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1452 
1453 static long
1454 vchiq_compat_ioctl_await_completion(struct file *file,
1455 				    unsigned int cmd,
1456 				    unsigned long arg)
1457 {
1458 	VCHIQ_AWAIT_COMPLETION_T *args;
1459 	VCHIQ_COMPLETION_DATA_T *completion;
1460 	VCHIQ_COMPLETION_DATA_T completiontemp;
1461 	struct vchiq_await_completion32 args32;
1462 	struct vchiq_completion_data32 completion32;
1463 	unsigned int *msgbufcount32;
1464 	compat_uptr_t msgbuf32;
1465 	void *msgbuf;
1466 	void **msgbufptr;
1467 	long ret;
1468 
1469 	args = compat_alloc_user_space(sizeof(*args) +
1470 				       sizeof(*completion) +
1471 				       sizeof(*msgbufptr));
1472 	if (!args)
1473 		return -EFAULT;
1474 
1475 	completion = (VCHIQ_COMPLETION_DATA_T *)(args + 1);
1476 	msgbufptr = (void __user **)(completion + 1);
1477 
1478 	if (copy_from_user(&args32,
1479 			   (struct vchiq_completion_data32 *)arg,
1480 			   sizeof(args32)))
1481 		return -EFAULT;
1482 
1483 	if (put_user(args32.count, &args->count) ||
1484 	    put_user(compat_ptr(args32.buf), &args->buf) ||
1485 	    put_user(args32.msgbufsize, &args->msgbufsize) ||
1486 	    put_user(args32.msgbufcount, &args->msgbufcount) ||
1487 	    put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
1488 		return -EFAULT;
1489 
1490 	/* These are simple cases, so just fall into the native handler */
1491 	if (!args32.count || !args32.buf || !args32.msgbufcount)
1492 		return vchiq_ioctl(file,
1493 				   VCHIQ_IOC_AWAIT_COMPLETION,
1494 				   (unsigned long)args);
1495 
1496 	/*
1497 	 * These are the more complex cases.  Typical applications of this
1498 	 * ioctl will use a very large count, with a very large msgbufcount.
1499 	 * Since the native ioctl can asynchronously fill in the returned
1500 	 * buffers and the application can in theory begin processing messages
1501 	 * even before the ioctl returns, a bit of a trick is used here.
1502 	 *
1503 	 * By forcing both count and msgbufcount to be 1, it forces the native
1504 	 * ioctl to only claim at most 1 message is available.   This tricks
1505 	 * the calling application into thinking only 1 message was actually
1506 	 * available in the queue so like all good applications it will retry
1507 	 * waiting until all the required messages are received.
1508 	 *
1509 	 * This trick has been tested and proven to work with vchiq_test,
1510 	 * Minecraft_PI, the "hello pi" examples, and various other
1511 	 * applications that are included in Raspbian.
1512 	 */
1513 
1514 	if (copy_from_user(&msgbuf32,
1515 			   compat_ptr(args32.msgbufs) +
1516 			   (sizeof(compat_uptr_t) *
1517 			   (args32.msgbufcount - 1)),
1518 			   sizeof(msgbuf32)))
1519 		return -EFAULT;
1520 
1521 	msgbuf = compat_ptr(msgbuf32);
1522 
1523 	if (copy_to_user(msgbufptr,
1524 			 &msgbuf,
1525 			 sizeof(msgbuf)))
1526 		return -EFAULT;
1527 
1528 	if (copy_to_user(&args->msgbufs,
1529 			 &msgbufptr,
1530 			 sizeof(msgbufptr)))
1531 		return -EFAULT;
1532 
1533 	if (put_user(1U, &args->count) ||
1534 	    put_user(completion, &args->buf) ||
1535 	    put_user(1U, &args->msgbufcount))
1536 		return -EFAULT;
1537 
1538 	ret = vchiq_ioctl(file,
1539 			  VCHIQ_IOC_AWAIT_COMPLETION,
1540 			  (unsigned long)args);
1541 
1542 	/*
1543 	 * An return value of 0 here means that no messages where available
1544 	 * in the message queue.  In this case the native ioctl does not
1545 	 * return any data to the application at all.  Not even to update
1546 	 * msgbufcount.  This functionality needs to be kept here for
1547 	 * compatibility.
1548 	 *
1549 	 * Of course, < 0 means that an error occurred and no data is being
1550 	 * returned.
1551 	 *
1552 	 * Since count and msgbufcount was forced to 1, that means
1553 	 * the only other possible return value is 1. Meaning that 1 message
1554 	 * was available, so that multiple message case does not need to be
1555 	 * handled here.
1556 	 */
1557 	if (ret <= 0)
1558 		return ret;
1559 
1560 	if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
1561 		return -EFAULT;
1562 
1563 	completion32.reason = completiontemp.reason;
1564 	completion32.header = ptr_to_compat(completiontemp.header);
1565 	completion32.service_userdata =
1566 		ptr_to_compat(completiontemp.service_userdata);
1567 	completion32.bulk_userdata =
1568 		ptr_to_compat(completiontemp.bulk_userdata);
1569 
1570 	if (copy_to_user(compat_ptr(args32.buf),
1571 			 &completion32,
1572 			 sizeof(completion32)))
1573 		return -EFAULT;
1574 
1575 	args32.msgbufcount--;
1576 
1577 	msgbufcount32 =
1578 		&((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
1579 
1580 	if (copy_to_user(msgbufcount32,
1581 			 &args32.msgbufcount,
1582 			 sizeof(args32.msgbufcount)))
1583 		return -EFAULT;
1584 
1585 	return 1;
1586 }
1587 
1588 struct vchiq_dequeue_message32 {
1589 	unsigned int handle;
1590 	int blocking;
1591 	unsigned int bufsize;
1592 	compat_uptr_t buf;
1593 };
1594 
1595 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1596 	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1597 
1598 static long
1599 vchiq_compat_ioctl_dequeue_message(struct file *file,
1600 				   unsigned int cmd,
1601 				   unsigned long arg)
1602 {
1603 	VCHIQ_DEQUEUE_MESSAGE_T *args;
1604 	struct vchiq_dequeue_message32 args32;
1605 
1606 	args = compat_alloc_user_space(sizeof(*args));
1607 	if (!args)
1608 		return -EFAULT;
1609 
1610 	if (copy_from_user(&args32,
1611 			   (struct vchiq_dequeue_message32 *)arg,
1612 			   sizeof(args32)))
1613 		return -EFAULT;
1614 
1615 	if (put_user(args32.handle, &args->handle) ||
1616 	    put_user(args32.blocking, &args->blocking) ||
1617 	    put_user(args32.bufsize, &args->bufsize) ||
1618 	    put_user(compat_ptr(args32.buf), &args->buf))
1619 		return -EFAULT;
1620 
1621 	return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
1622 			   (unsigned long)args);
1623 }
1624 
1625 struct vchiq_get_config32 {
1626 	unsigned int config_size;
1627 	compat_uptr_t pconfig;
1628 };
1629 
1630 #define VCHIQ_IOC_GET_CONFIG32 \
1631 	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1632 
1633 static long
1634 vchiq_compat_ioctl_get_config(struct file *file,
1635 			      unsigned int cmd,
1636 			      unsigned long arg)
1637 {
1638 	VCHIQ_GET_CONFIG_T *args;
1639 	struct vchiq_get_config32 args32;
1640 
1641 	args = compat_alloc_user_space(sizeof(*args));
1642 	if (!args)
1643 		return -EFAULT;
1644 
1645 	if (copy_from_user(&args32,
1646 			   (struct vchiq_get_config32 *)arg,
1647 			   sizeof(args32)))
1648 		return -EFAULT;
1649 
1650 	if (put_user(args32.config_size, &args->config_size) ||
1651 	    put_user(compat_ptr(args32.pconfig), &args->pconfig))
1652 		return -EFAULT;
1653 
1654 	return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
1655 }
1656 
1657 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1658 
1659 struct vchiq_dump_mem32 {
1660 	compat_uptr_t virt_addr;
1661 	u32 num_bytes;
1662 };
1663 
1664 #define VCHIQ_IOC_DUMP_PHYS_MEM32 \
1665 	_IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem32)
1666 
1667 static long
1668 vchiq_compat_ioctl_dump_phys_mem(struct file *file,
1669 				 unsigned int cmd,
1670 				 unsigned long arg)
1671 {
1672 	VCHIQ_DUMP_MEM_T *args;
1673 	struct vchiq_dump_mem32 args32;
1674 
1675 	args = compat_alloc_user_space(sizeof(*args));
1676 	if (!args)
1677 		return -EFAULT;
1678 
1679 	if (copy_from_user(&args32,
1680 			   (struct vchiq_dump_mem32 *)arg,
1681 			   sizeof(args32)))
1682 		return -EFAULT;
1683 
1684 	if (put_user(compat_ptr(args32.virt_addr), &args->virt_addr) ||
1685 	    put_user(args32.num_bytes, &args->num_bytes))
1686 		return -EFAULT;
1687 
1688 	return vchiq_ioctl(file, VCHIQ_IOC_DUMP_PHYS_MEM, (unsigned long)args);
1689 }
1690 
1691 #endif
1692 
1693 static long
1694 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1695 {
1696 	switch (cmd) {
1697 	case VCHIQ_IOC_CREATE_SERVICE32:
1698 		return vchiq_compat_ioctl_create_service(file, cmd, arg);
1699 	case VCHIQ_IOC_QUEUE_MESSAGE32:
1700 		return vchiq_compat_ioctl_queue_message(file, cmd, arg);
1701 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1702 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1703 		return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
1704 	case VCHIQ_IOC_AWAIT_COMPLETION32:
1705 		return vchiq_compat_ioctl_await_completion(file, cmd, arg);
1706 	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1707 		return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
1708 	case VCHIQ_IOC_GET_CONFIG32:
1709 		return vchiq_compat_ioctl_get_config(file, cmd, arg);
1710 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
1711 	case VCHIQ_IOC_DUMP_PHYS_MEM32:
1712 		return vchiq_compat_ioctl_dump_phys_mem(file, cmd, arg);
1713 #endif
1714 	default:
1715 		return vchiq_ioctl(file, cmd, arg);
1716 	}
1717 }
1718 
1719 #endif
1720 
1721 /****************************************************************************
1722 *
1723 *   vchiq_open
1724 *
1725 ***************************************************************************/
1726 
1727 static int
1728 vchiq_open(struct inode *inode, struct file *file)
1729 {
1730 	int dev = iminor(inode) & 0x0f;
1731 
1732 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1733 	switch (dev) {
1734 	case VCHIQ_MINOR: {
1735 		int ret;
1736 		VCHIQ_STATE_T *state = vchiq_get_state();
1737 		VCHIQ_INSTANCE_T instance;
1738 
1739 		if (!state) {
1740 			vchiq_log_error(vchiq_arm_log_level,
1741 				"vchiq has no connection to VideoCore");
1742 			return -ENOTCONN;
1743 		}
1744 
1745 		instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1746 		if (!instance)
1747 			return -ENOMEM;
1748 
1749 		instance->state = state;
1750 		instance->pid = current->tgid;
1751 
1752 		ret = vchiq_debugfs_add_instance(instance);
1753 		if (ret != 0) {
1754 			kfree(instance);
1755 			return ret;
1756 		}
1757 
1758 		sema_init(&instance->insert_event, 0);
1759 		sema_init(&instance->remove_event, 0);
1760 		mutex_init(&instance->completion_mutex);
1761 		mutex_init(&instance->bulk_waiter_list_mutex);
1762 		INIT_LIST_HEAD(&instance->bulk_waiter_list);
1763 
1764 		file->private_data = instance;
1765 	} break;
1766 
1767 	default:
1768 		vchiq_log_error(vchiq_arm_log_level,
1769 			"Unknown minor device: %d", dev);
1770 		return -ENXIO;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 /****************************************************************************
1777 *
1778 *   vchiq_release
1779 *
1780 ***************************************************************************/
1781 
1782 static int
1783 vchiq_release(struct inode *inode, struct file *file)
1784 {
1785 	int dev = iminor(inode) & 0x0f;
1786 	int ret = 0;
1787 
1788 	switch (dev) {
1789 	case VCHIQ_MINOR: {
1790 		VCHIQ_INSTANCE_T instance = file->private_data;
1791 		VCHIQ_STATE_T *state = vchiq_get_state();
1792 		VCHIQ_SERVICE_T *service;
1793 		int i;
1794 
1795 		vchiq_log_info(vchiq_arm_log_level,
1796 			"vchiq_release: instance=%lx",
1797 			(unsigned long)instance);
1798 
1799 		if (!state) {
1800 			ret = -EPERM;
1801 			goto out;
1802 		}
1803 
1804 		/* Ensure videocore is awake to allow termination. */
1805 		vchiq_use_internal(instance->state, NULL,
1806 				USE_TYPE_VCHIQ);
1807 
1808 		mutex_lock(&instance->completion_mutex);
1809 
1810 		/* Wake the completion thread and ask it to exit */
1811 		instance->closing = 1;
1812 		up(&instance->insert_event);
1813 
1814 		mutex_unlock(&instance->completion_mutex);
1815 
1816 		/* Wake the slot handler if the completion queue is full. */
1817 		up(&instance->remove_event);
1818 
1819 		/* Mark all services for termination... */
1820 		i = 0;
1821 		while ((service = next_service_by_instance(state, instance,
1822 			&i)) !=	NULL) {
1823 			USER_SERVICE_T *user_service = service->base.userdata;
1824 
1825 			/* Wake the slot handler if the msg queue is full. */
1826 			up(&user_service->remove_event);
1827 
1828 			vchiq_terminate_service_internal(service);
1829 			unlock_service(service);
1830 		}
1831 
1832 		/* ...and wait for them to die */
1833 		i = 0;
1834 		while ((service = next_service_by_instance(state, instance, &i))
1835 			!= NULL) {
1836 			USER_SERVICE_T *user_service = service->base.userdata;
1837 
1838 			down(&service->remove_event);
1839 
1840 			BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1841 
1842 			spin_lock(&msg_queue_spinlock);
1843 
1844 			while (user_service->msg_remove !=
1845 				user_service->msg_insert) {
1846 				VCHIQ_HEADER_T *header = user_service->
1847 					msg_queue[user_service->msg_remove &
1848 						(MSG_QUEUE_SIZE - 1)];
1849 				user_service->msg_remove++;
1850 				spin_unlock(&msg_queue_spinlock);
1851 
1852 				if (header)
1853 					vchiq_release_message(
1854 						service->handle,
1855 						header);
1856 				spin_lock(&msg_queue_spinlock);
1857 			}
1858 
1859 			spin_unlock(&msg_queue_spinlock);
1860 
1861 			unlock_service(service);
1862 		}
1863 
1864 		/* Release any closed services */
1865 		while (instance->completion_remove !=
1866 			instance->completion_insert) {
1867 			VCHIQ_COMPLETION_DATA_T *completion;
1868 			VCHIQ_SERVICE_T *service;
1869 
1870 			completion = &instance->completions[
1871 				instance->completion_remove &
1872 				(MAX_COMPLETIONS - 1)];
1873 			service = completion->service_userdata;
1874 			if (completion->reason == VCHIQ_SERVICE_CLOSED)
1875 			{
1876 				USER_SERVICE_T *user_service =
1877 					service->base.userdata;
1878 
1879 				/* Wake any blocked user-thread */
1880 				if (instance->use_close_delivered)
1881 					up(&user_service->close_event);
1882 				unlock_service(service);
1883 			}
1884 			instance->completion_remove++;
1885 		}
1886 
1887 		/* Release the PEER service count. */
1888 		vchiq_release_internal(instance->state, NULL);
1889 
1890 		{
1891 			struct list_head *pos, *next;
1892 
1893 			list_for_each_safe(pos, next,
1894 				&instance->bulk_waiter_list) {
1895 				struct bulk_waiter_node *waiter;
1896 
1897 				waiter = list_entry(pos,
1898 					struct bulk_waiter_node,
1899 					list);
1900 				list_del(pos);
1901 				vchiq_log_info(vchiq_arm_log_level,
1902 					"bulk_waiter - cleaned up %pK for pid %d",
1903 					waiter, waiter->pid);
1904 				kfree(waiter);
1905 			}
1906 		}
1907 
1908 		vchiq_debugfs_remove_instance(instance);
1909 
1910 		kfree(instance);
1911 		file->private_data = NULL;
1912 	} break;
1913 
1914 	default:
1915 		vchiq_log_error(vchiq_arm_log_level,
1916 			"Unknown minor device: %d", dev);
1917 		ret = -ENXIO;
1918 	}
1919 
1920 out:
1921 	return ret;
1922 }
1923 
1924 /****************************************************************************
1925 *
1926 *   vchiq_dump
1927 *
1928 ***************************************************************************/
1929 
1930 void
1931 vchiq_dump(void *dump_context, const char *str, int len)
1932 {
1933 	DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1934 
1935 	if (context->actual < context->space) {
1936 		int copy_bytes;
1937 
1938 		if (context->offset > 0) {
1939 			int skip_bytes = min(len, (int)context->offset);
1940 
1941 			str += skip_bytes;
1942 			len -= skip_bytes;
1943 			context->offset -= skip_bytes;
1944 			if (context->offset > 0)
1945 				return;
1946 		}
1947 		copy_bytes = min(len, (int)(context->space - context->actual));
1948 		if (copy_bytes == 0)
1949 			return;
1950 		if (copy_to_user(context->buf + context->actual, str,
1951 			copy_bytes))
1952 			context->actual = -EFAULT;
1953 		context->actual += copy_bytes;
1954 		len -= copy_bytes;
1955 
1956 		/* If tne terminating NUL is included in the length, then it
1957 		** marks the end of a line and should be replaced with a
1958 		** carriage return. */
1959 		if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1960 			char cr = '\n';
1961 
1962 			if (copy_to_user(context->buf + context->actual - 1,
1963 				&cr, 1))
1964 				context->actual = -EFAULT;
1965 		}
1966 	}
1967 }
1968 
1969 /****************************************************************************
1970 *
1971 *   vchiq_dump_platform_instance_state
1972 *
1973 ***************************************************************************/
1974 
1975 void
1976 vchiq_dump_platform_instances(void *dump_context)
1977 {
1978 	VCHIQ_STATE_T *state = vchiq_get_state();
1979 	char buf[80];
1980 	int len;
1981 	int i;
1982 
1983 	/* There is no list of instances, so instead scan all services,
1984 		marking those that have been dumped. */
1985 
1986 	for (i = 0; i < state->unused_service; i++) {
1987 		VCHIQ_SERVICE_T *service = state->services[i];
1988 		VCHIQ_INSTANCE_T instance;
1989 
1990 		if (service && (service->base.callback == service_callback)) {
1991 			instance = service->instance;
1992 			if (instance)
1993 				instance->mark = 0;
1994 		}
1995 	}
1996 
1997 	for (i = 0; i < state->unused_service; i++) {
1998 		VCHIQ_SERVICE_T *service = state->services[i];
1999 		VCHIQ_INSTANCE_T instance;
2000 
2001 		if (service && (service->base.callback == service_callback)) {
2002 			instance = service->instance;
2003 			if (instance && !instance->mark) {
2004 				len = snprintf(buf, sizeof(buf),
2005 					"Instance %pK: pid %d,%s completions %d/%d",
2006 					instance, instance->pid,
2007 					instance->connected ? " connected, " :
2008 						"",
2009 					instance->completion_insert -
2010 						instance->completion_remove,
2011 					MAX_COMPLETIONS);
2012 
2013 				vchiq_dump(dump_context, buf, len + 1);
2014 
2015 				instance->mark = 1;
2016 			}
2017 		}
2018 	}
2019 }
2020 
2021 /****************************************************************************
2022 *
2023 *   vchiq_dump_platform_service_state
2024 *
2025 ***************************************************************************/
2026 
2027 void
2028 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
2029 {
2030 	USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
2031 	char buf[80];
2032 	int len;
2033 
2034 	len = snprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2035 
2036 	if ((service->base.callback == service_callback) &&
2037 		user_service->is_vchi) {
2038 		len += snprintf(buf + len, sizeof(buf) - len,
2039 			", %d/%d messages",
2040 			user_service->msg_insert - user_service->msg_remove,
2041 			MSG_QUEUE_SIZE);
2042 
2043 		if (user_service->dequeue_pending)
2044 			len += snprintf(buf + len, sizeof(buf) - len,
2045 				" (dequeue pending)");
2046 	}
2047 
2048 	vchiq_dump(dump_context, buf, len + 1);
2049 }
2050 
2051 /****************************************************************************
2052 *
2053 *   dump_user_mem
2054 *
2055 ***************************************************************************/
2056 
2057 #if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
2058 
2059 static void
2060 dump_phys_mem(void *virt_addr, u32 num_bytes)
2061 {
2062 	int            rc;
2063 	u8            *end_virt_addr = virt_addr + num_bytes;
2064 	int            num_pages;
2065 	int            offset;
2066 	int            end_offset;
2067 	int            page_idx;
2068 	int            prev_idx;
2069 	struct page   *page;
2070 	struct page  **pages;
2071 	u8            *kmapped_virt_ptr;
2072 
2073 	/* Align virtAddr and endVirtAddr to 16 byte boundaries. */
2074 
2075 	virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
2076 	end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
2077 		~0x0fuL);
2078 
2079 	offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
2080 	end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
2081 
2082 	num_pages = DIV_ROUND_UP(offset + num_bytes, PAGE_SIZE);
2083 
2084 	pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
2085 	if (!pages) {
2086 		vchiq_log_error(vchiq_arm_log_level,
2087 			"Unable to allocation memory for %d pages\n",
2088 			num_pages);
2089 		return;
2090 	}
2091 
2092 	down_read(&current->mm->mmap_sem);
2093 	rc = get_user_pages(
2094 		(unsigned long)virt_addr, /* start */
2095 		num_pages,                /* len */
2096 		0,                        /* gup_flags */
2097 		pages,                    /* pages (array of page pointers) */
2098 		NULL);                    /* vmas */
2099 	up_read(&current->mm->mmap_sem);
2100 
2101 	prev_idx = -1;
2102 	page = NULL;
2103 
2104 	if (rc < 0) {
2105 		vchiq_log_error(vchiq_arm_log_level,
2106 				"Failed to get user pages: %d\n", rc);
2107 		goto out;
2108 	}
2109 
2110 	while (offset < end_offset) {
2111 		int page_offset = offset % PAGE_SIZE;
2112 
2113 		page_idx = offset / PAGE_SIZE;
2114 		if (page_idx != prev_idx) {
2115 			if (page != NULL)
2116 				kunmap(page);
2117 			page = pages[page_idx];
2118 			kmapped_virt_ptr = kmap(page);
2119 			prev_idx = page_idx;
2120 		}
2121 
2122 		if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
2123 			vchiq_log_dump_mem("ph",
2124 				(u32)(unsigned long)&kmapped_virt_ptr[
2125 					page_offset],
2126 				&kmapped_virt_ptr[page_offset], 16);
2127 
2128 		offset += 16;
2129 	}
2130 
2131 out:
2132 	if (page != NULL)
2133 		kunmap(page);
2134 
2135 	for (page_idx = 0; page_idx < num_pages; page_idx++)
2136 		put_page(pages[page_idx]);
2137 
2138 	kfree(pages);
2139 }
2140 
2141 #endif
2142 
2143 /****************************************************************************
2144 *
2145 *   vchiq_read
2146 *
2147 ***************************************************************************/
2148 
2149 static ssize_t
2150 vchiq_read(struct file *file, char __user *buf,
2151 	size_t count, loff_t *ppos)
2152 {
2153 	DUMP_CONTEXT_T context;
2154 
2155 	context.buf = buf;
2156 	context.actual = 0;
2157 	context.space = count;
2158 	context.offset = *ppos;
2159 
2160 	vchiq_dump_state(&context, &g_state);
2161 
2162 	*ppos += context.actual;
2163 
2164 	return context.actual;
2165 }
2166 
2167 VCHIQ_STATE_T *
2168 vchiq_get_state(void)
2169 {
2170 
2171 	if (g_state.remote == NULL)
2172 		printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2173 	else if (g_state.remote->initialised != 1)
2174 		printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2175 			__func__, g_state.remote->initialised);
2176 
2177 	return ((g_state.remote != NULL) &&
2178 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
2179 }
2180 
2181 static const struct file_operations
2182 vchiq_fops = {
2183 	.owner = THIS_MODULE,
2184 	.unlocked_ioctl = vchiq_ioctl,
2185 #if defined(CONFIG_COMPAT)
2186 	.compat_ioctl = vchiq_compat_ioctl,
2187 #endif
2188 	.open = vchiq_open,
2189 	.release = vchiq_release,
2190 	.read = vchiq_read
2191 };
2192 
2193 /*
2194  * Autosuspend related functionality
2195  */
2196 
2197 int
2198 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
2199 {
2200 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2201 
2202 	if (!arm_state)
2203 		/* autosuspend not supported - always return wanted */
2204 		return 1;
2205 	else if (arm_state->blocked_count)
2206 		return 1;
2207 	else if (!arm_state->videocore_use_count)
2208 		/* usage count zero - check for override unless we're forcing */
2209 		if (arm_state->resume_blocked)
2210 			return 0;
2211 		else
2212 			return vchiq_platform_videocore_wanted(state);
2213 	else
2214 		/* non-zero usage count - videocore still required */
2215 		return 1;
2216 }
2217 
2218 static VCHIQ_STATUS_T
2219 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
2220 	VCHIQ_HEADER_T *header,
2221 	VCHIQ_SERVICE_HANDLE_T service_user,
2222 	void *bulk_user)
2223 {
2224 	vchiq_log_error(vchiq_susp_log_level,
2225 		"%s callback reason %d", __func__, reason);
2226 	return 0;
2227 }
2228 
2229 static int
2230 vchiq_keepalive_thread_func(void *v)
2231 {
2232 	VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
2233 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2234 
2235 	VCHIQ_STATUS_T status;
2236 	VCHIQ_INSTANCE_T instance;
2237 	VCHIQ_SERVICE_HANDLE_T ka_handle;
2238 
2239 	VCHIQ_SERVICE_PARAMS_T params = {
2240 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2241 		.callback    = vchiq_keepalive_vchiq_callback,
2242 		.version     = KEEPALIVE_VER,
2243 		.version_min = KEEPALIVE_VER_MIN
2244 	};
2245 
2246 	status = vchiq_initialise(&instance);
2247 	if (status != VCHIQ_SUCCESS) {
2248 		vchiq_log_error(vchiq_susp_log_level,
2249 			"%s vchiq_initialise failed %d", __func__, status);
2250 		goto exit;
2251 	}
2252 
2253 	status = vchiq_connect(instance);
2254 	if (status != VCHIQ_SUCCESS) {
2255 		vchiq_log_error(vchiq_susp_log_level,
2256 			"%s vchiq_connect failed %d", __func__, status);
2257 		goto shutdown;
2258 	}
2259 
2260 	status = vchiq_add_service(instance, &params, &ka_handle);
2261 	if (status != VCHIQ_SUCCESS) {
2262 		vchiq_log_error(vchiq_susp_log_level,
2263 			"%s vchiq_open_service failed %d", __func__, status);
2264 		goto shutdown;
2265 	}
2266 
2267 	while (1) {
2268 		long rc = 0, uc = 0;
2269 
2270 		if (wait_for_completion_interruptible(&arm_state->ka_evt)
2271 				!= 0) {
2272 			vchiq_log_error(vchiq_susp_log_level,
2273 				"%s interrupted", __func__);
2274 			flush_signals(current);
2275 			continue;
2276 		}
2277 
2278 		/* read and clear counters.  Do release_count then use_count to
2279 		 * prevent getting more releases than uses */
2280 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
2281 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
2282 
2283 		/* Call use/release service the requisite number of times.
2284 		 * Process use before release so use counts don't go negative */
2285 		while (uc--) {
2286 			atomic_inc(&arm_state->ka_use_ack_count);
2287 			status = vchiq_use_service(ka_handle);
2288 			if (status != VCHIQ_SUCCESS) {
2289 				vchiq_log_error(vchiq_susp_log_level,
2290 					"%s vchiq_use_service error %d",
2291 					__func__, status);
2292 			}
2293 		}
2294 		while (rc--) {
2295 			status = vchiq_release_service(ka_handle);
2296 			if (status != VCHIQ_SUCCESS) {
2297 				vchiq_log_error(vchiq_susp_log_level,
2298 					"%s vchiq_release_service error %d",
2299 					__func__, status);
2300 			}
2301 		}
2302 	}
2303 
2304 shutdown:
2305 	vchiq_shutdown(instance);
2306 exit:
2307 	return 0;
2308 }
2309 
2310 
2311 
2312 VCHIQ_STATUS_T
2313 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
2314 {
2315 	if (arm_state) {
2316 		rwlock_init(&arm_state->susp_res_lock);
2317 
2318 		init_completion(&arm_state->ka_evt);
2319 		atomic_set(&arm_state->ka_use_count, 0);
2320 		atomic_set(&arm_state->ka_use_ack_count, 0);
2321 		atomic_set(&arm_state->ka_release_count, 0);
2322 
2323 		init_completion(&arm_state->vc_suspend_complete);
2324 
2325 		init_completion(&arm_state->vc_resume_complete);
2326 		/* Initialise to 'done' state.  We only want to block on resume
2327 		 * completion while videocore is suspended. */
2328 		set_resume_state(arm_state, VC_RESUME_RESUMED);
2329 
2330 		init_completion(&arm_state->resume_blocker);
2331 		/* Initialise to 'done' state.  We only want to block on this
2332 		 * completion while resume is blocked */
2333 		complete_all(&arm_state->resume_blocker);
2334 
2335 		init_completion(&arm_state->blocked_blocker);
2336 		/* Initialise to 'done' state.  We only want to block on this
2337 		 * completion while things are waiting on the resume blocker */
2338 		complete_all(&arm_state->blocked_blocker);
2339 
2340 		arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
2341 		arm_state->suspend_timer_running = 0;
2342 		setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
2343 			    (unsigned long)(state));
2344 
2345 		arm_state->first_connect = 0;
2346 
2347 	}
2348 	return VCHIQ_SUCCESS;
2349 }
2350 
2351 /*
2352 ** Functions to modify the state variables;
2353 **	set_suspend_state
2354 **	set_resume_state
2355 **
2356 ** There are more state variables than we might like, so ensure they remain in
2357 ** step.  Suspend and resume state are maintained separately, since most of
2358 ** these state machines can operate independently.  However, there are a few
2359 ** states where state transitions in one state machine cause a reset to the
2360 ** other state machine.  In addition, there are some completion events which
2361 ** need to occur on state machine reset and end-state(s), so these are also
2362 ** dealt with in these functions.
2363 **
2364 ** In all states we set the state variable according to the input, but in some
2365 ** cases we perform additional steps outlined below;
2366 **
2367 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2368 **			The suspend completion is completed after any suspend
2369 **			attempt.  When we reset the state machine we also reset
2370 **			the completion.  This reset occurs when videocore is
2371 **			resumed, and also if we initiate suspend after a suspend
2372 **			failure.
2373 **
2374 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2375 **			suspend - ie from this point on we must try to suspend
2376 **			before resuming can occur.  We therefore also reset the
2377 **			resume state machine to VC_RESUME_IDLE in this state.
2378 **
2379 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2380 **			complete_all on the suspend completion to notify
2381 **			anything waiting for suspend to happen.
2382 **
2383 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2384 **			initiate resume, so no need to alter resume state.
2385 **			We call complete_all on the suspend completion to notify
2386 **			of suspend rejection.
2387 **
2388 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
2389 **			suspend completion and reset the resume state machine.
2390 **
2391 ** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
2392 **			resume completion is in it's 'done' state whenever
2393 **			videcore is running.  Therefore, the VC_RESUME_IDLE
2394 **			state implies that videocore is suspended.
2395 **			Hence, any thread which needs to wait until videocore is
2396 **			running can wait on this completion - it will only block
2397 **			if videocore is suspended.
2398 **
2399 ** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
2400 **			Call complete_all on the resume completion to unblock
2401 **			any threads waiting for resume.	 Also reset the suspend
2402 **			state machine to it's idle state.
2403 **
2404 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2405 */
2406 
2407 void
2408 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
2409 	enum vc_suspend_status new_state)
2410 {
2411 	/* set the state in all cases */
2412 	arm_state->vc_suspend_state = new_state;
2413 
2414 	/* state specific additional actions */
2415 	switch (new_state) {
2416 	case VC_SUSPEND_FORCE_CANCELED:
2417 		complete_all(&arm_state->vc_suspend_complete);
2418 		break;
2419 	case VC_SUSPEND_REJECTED:
2420 		complete_all(&arm_state->vc_suspend_complete);
2421 		break;
2422 	case VC_SUSPEND_FAILED:
2423 		complete_all(&arm_state->vc_suspend_complete);
2424 		arm_state->vc_resume_state = VC_RESUME_RESUMED;
2425 		complete_all(&arm_state->vc_resume_complete);
2426 		break;
2427 	case VC_SUSPEND_IDLE:
2428 		reinit_completion(&arm_state->vc_suspend_complete);
2429 		break;
2430 	case VC_SUSPEND_REQUESTED:
2431 		break;
2432 	case VC_SUSPEND_IN_PROGRESS:
2433 		set_resume_state(arm_state, VC_RESUME_IDLE);
2434 		break;
2435 	case VC_SUSPEND_SUSPENDED:
2436 		complete_all(&arm_state->vc_suspend_complete);
2437 		break;
2438 	default:
2439 		BUG();
2440 		break;
2441 	}
2442 }
2443 
2444 void
2445 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
2446 	enum vc_resume_status new_state)
2447 {
2448 	/* set the state in all cases */
2449 	arm_state->vc_resume_state = new_state;
2450 
2451 	/* state specific additional actions */
2452 	switch (new_state) {
2453 	case VC_RESUME_FAILED:
2454 		break;
2455 	case VC_RESUME_IDLE:
2456 		reinit_completion(&arm_state->vc_resume_complete);
2457 		break;
2458 	case VC_RESUME_REQUESTED:
2459 		break;
2460 	case VC_RESUME_IN_PROGRESS:
2461 		break;
2462 	case VC_RESUME_RESUMED:
2463 		complete_all(&arm_state->vc_resume_complete);
2464 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2465 		break;
2466 	default:
2467 		BUG();
2468 		break;
2469 	}
2470 }
2471 
2472 
2473 /* should be called with the write lock held */
2474 inline void
2475 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
2476 {
2477 	del_timer(&arm_state->suspend_timer);
2478 	arm_state->suspend_timer.expires = jiffies +
2479 		msecs_to_jiffies(arm_state->
2480 			suspend_timer_timeout);
2481 	add_timer(&arm_state->suspend_timer);
2482 	arm_state->suspend_timer_running = 1;
2483 }
2484 
2485 /* should be called with the write lock held */
2486 static inline void
2487 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
2488 {
2489 	if (arm_state->suspend_timer_running) {
2490 		del_timer(&arm_state->suspend_timer);
2491 		arm_state->suspend_timer_running = 0;
2492 	}
2493 }
2494 
2495 static inline int
2496 need_resume(VCHIQ_STATE_T *state)
2497 {
2498 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2499 
2500 	return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
2501 			(arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
2502 			vchiq_videocore_wanted(state);
2503 }
2504 
2505 static int
2506 block_resume(VCHIQ_ARM_STATE_T *arm_state)
2507 {
2508 	int status = VCHIQ_SUCCESS;
2509 	const unsigned long timeout_val =
2510 				msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
2511 	int resume_count = 0;
2512 
2513 	/* Allow any threads which were blocked by the last force suspend to
2514 	 * complete if they haven't already.  Only give this one shot; if
2515 	 * blocked_count is incremented after blocked_blocker is completed
2516 	 * (which only happens when blocked_count hits 0) then those threads
2517 	 * will have to wait until next time around */
2518 	if (arm_state->blocked_count) {
2519 		reinit_completion(&arm_state->blocked_blocker);
2520 		write_unlock_bh(&arm_state->susp_res_lock);
2521 		vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
2522 			"blocked clients", __func__);
2523 		if (wait_for_completion_interruptible_timeout(
2524 				&arm_state->blocked_blocker, timeout_val)
2525 					<= 0) {
2526 			vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2527 				"previously blocked clients failed", __func__);
2528 			status = VCHIQ_ERROR;
2529 			write_lock_bh(&arm_state->susp_res_lock);
2530 			goto out;
2531 		}
2532 		vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
2533 			"clients resumed", __func__);
2534 		write_lock_bh(&arm_state->susp_res_lock);
2535 	}
2536 
2537 	/* We need to wait for resume to complete if it's in process */
2538 	while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
2539 			arm_state->vc_resume_state > VC_RESUME_IDLE) {
2540 		if (resume_count > 1) {
2541 			status = VCHIQ_ERROR;
2542 			vchiq_log_error(vchiq_susp_log_level, "%s waited too "
2543 				"many times for resume", __func__);
2544 			goto out;
2545 		}
2546 		write_unlock_bh(&arm_state->susp_res_lock);
2547 		vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
2548 			__func__);
2549 		if (wait_for_completion_interruptible_timeout(
2550 				&arm_state->vc_resume_complete, timeout_val)
2551 					<= 0) {
2552 			vchiq_log_error(vchiq_susp_log_level, "%s wait for "
2553 				"resume failed (%s)", __func__,
2554 				resume_state_names[arm_state->vc_resume_state +
2555 							VC_RESUME_NUM_OFFSET]);
2556 			status = VCHIQ_ERROR;
2557 			write_lock_bh(&arm_state->susp_res_lock);
2558 			goto out;
2559 		}
2560 		vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
2561 		write_lock_bh(&arm_state->susp_res_lock);
2562 		resume_count++;
2563 	}
2564 	reinit_completion(&arm_state->resume_blocker);
2565 	arm_state->resume_blocked = 1;
2566 
2567 out:
2568 	return status;
2569 }
2570 
2571 static inline void
2572 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
2573 {
2574 	complete_all(&arm_state->resume_blocker);
2575 	arm_state->resume_blocked = 0;
2576 }
2577 
2578 /* Initiate suspend via slot handler. Should be called with the write lock
2579  * held */
2580 VCHIQ_STATUS_T
2581 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2582 {
2583 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
2584 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2585 
2586 	if (!arm_state)
2587 		goto out;
2588 
2589 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2590 	status = VCHIQ_SUCCESS;
2591 
2592 
2593 	switch (arm_state->vc_suspend_state) {
2594 	case VC_SUSPEND_REQUESTED:
2595 		vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2596 			"requested", __func__);
2597 		break;
2598 	case VC_SUSPEND_IN_PROGRESS:
2599 		vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2600 			"progress", __func__);
2601 		break;
2602 
2603 	default:
2604 		/* We don't expect to be in other states, so log but continue
2605 		 * anyway */
2606 		vchiq_log_error(vchiq_susp_log_level,
2607 			"%s unexpected suspend state %s", __func__,
2608 			suspend_state_names[arm_state->vc_suspend_state +
2609 						VC_SUSPEND_NUM_OFFSET]);
2610 		/* fall through */
2611 	case VC_SUSPEND_REJECTED:
2612 	case VC_SUSPEND_FAILED:
2613 		/* Ensure any idle state actions have been run */
2614 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2615 		/* fall through */
2616 	case VC_SUSPEND_IDLE:
2617 		vchiq_log_info(vchiq_susp_log_level,
2618 			"%s: suspending", __func__);
2619 		set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2620 		/* kick the slot handler thread to initiate suspend */
2621 		request_poll(state, NULL, 0);
2622 		break;
2623 	}
2624 
2625 out:
2626 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2627 	return status;
2628 }
2629 
2630 void
2631 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2632 {
2633 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2634 	int susp = 0;
2635 
2636 	if (!arm_state)
2637 		goto out;
2638 
2639 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2640 
2641 	write_lock_bh(&arm_state->susp_res_lock);
2642 	if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2643 			arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2644 		set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2645 		susp = 1;
2646 	}
2647 	write_unlock_bh(&arm_state->susp_res_lock);
2648 
2649 	if (susp)
2650 		vchiq_platform_suspend(state);
2651 
2652 out:
2653 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2654 	return;
2655 }
2656 
2657 
2658 static void
2659 output_timeout_error(VCHIQ_STATE_T *state)
2660 {
2661 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2662 	char err[50] = "";
2663 	int vc_use_count = arm_state->videocore_use_count;
2664 	int active_services = state->unused_service;
2665 	int i;
2666 
2667 	if (!arm_state->videocore_use_count) {
2668 		snprintf(err, sizeof(err), " Videocore usecount is 0");
2669 		goto output_msg;
2670 	}
2671 	for (i = 0; i < active_services; i++) {
2672 		VCHIQ_SERVICE_T *service_ptr = state->services[i];
2673 
2674 		if (service_ptr && service_ptr->service_use_count &&
2675 			(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2676 			snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
2677 				"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2678 					service_ptr->base.fourcc),
2679 				 service_ptr->client_id,
2680 				 service_ptr->service_use_count,
2681 				 service_ptr->service_use_count ==
2682 					 vc_use_count ? "" : " (+ more)");
2683 			break;
2684 		}
2685 	}
2686 
2687 output_msg:
2688 	vchiq_log_error(vchiq_susp_log_level,
2689 		"timed out waiting for vc suspend (%d).%s",
2690 		 arm_state->autosuspend_override, err);
2691 
2692 }
2693 
2694 /* Try to get videocore into suspended state, regardless of autosuspend state.
2695 ** We don't actually force suspend, since videocore may get into a bad state
2696 ** if we force suspend at a bad time.  Instead, we wait for autosuspend to
2697 ** determine a good point to suspend.  If this doesn't happen within 100ms we
2698 ** report failure.
2699 **
2700 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2701 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2702 */
2703 VCHIQ_STATUS_T
2704 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2705 {
2706 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2707 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
2708 	long rc = 0;
2709 	int repeat = -1;
2710 
2711 	if (!arm_state)
2712 		goto out;
2713 
2714 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2715 
2716 	write_lock_bh(&arm_state->susp_res_lock);
2717 
2718 	status = block_resume(arm_state);
2719 	if (status != VCHIQ_SUCCESS)
2720 		goto unlock;
2721 	if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2722 		/* Already suspended - just block resume and exit */
2723 		vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2724 			__func__);
2725 		status = VCHIQ_SUCCESS;
2726 		goto unlock;
2727 	} else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2728 		/* initiate suspend immediately in the case that we're waiting
2729 		 * for the timeout */
2730 		stop_suspend_timer(arm_state);
2731 		if (!vchiq_videocore_wanted(state)) {
2732 			vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2733 				"idle, initiating suspend", __func__);
2734 			status = vchiq_arm_vcsuspend(state);
2735 		} else if (arm_state->autosuspend_override <
2736 						FORCE_SUSPEND_FAIL_MAX) {
2737 			vchiq_log_info(vchiq_susp_log_level, "%s letting "
2738 				"videocore go idle", __func__);
2739 			status = VCHIQ_SUCCESS;
2740 		} else {
2741 			vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2742 				"many times - attempting suspend", __func__);
2743 			status = vchiq_arm_vcsuspend(state);
2744 		}
2745 	} else {
2746 		vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2747 			"in progress - wait for completion", __func__);
2748 		status = VCHIQ_SUCCESS;
2749 	}
2750 
2751 	/* Wait for suspend to happen due to system idle (not forced..) */
2752 	if (status != VCHIQ_SUCCESS)
2753 		goto unblock_resume;
2754 
2755 	do {
2756 		write_unlock_bh(&arm_state->susp_res_lock);
2757 
2758 		rc = wait_for_completion_interruptible_timeout(
2759 				&arm_state->vc_suspend_complete,
2760 				msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2761 
2762 		write_lock_bh(&arm_state->susp_res_lock);
2763 		if (rc < 0) {
2764 			vchiq_log_warning(vchiq_susp_log_level, "%s "
2765 				"interrupted waiting for suspend", __func__);
2766 			status = VCHIQ_ERROR;
2767 			goto unblock_resume;
2768 		} else if (rc == 0) {
2769 			if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2770 				/* Repeat timeout once if in progress */
2771 				if (repeat < 0) {
2772 					repeat = 1;
2773 					continue;
2774 				}
2775 			}
2776 			arm_state->autosuspend_override++;
2777 			output_timeout_error(state);
2778 
2779 			status = VCHIQ_RETRY;
2780 			goto unblock_resume;
2781 		}
2782 	} while (0 < (repeat--));
2783 
2784 	/* Check and report state in case we need to abort ARM suspend */
2785 	if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2786 		status = VCHIQ_RETRY;
2787 		vchiq_log_error(vchiq_susp_log_level,
2788 			"%s videocore suspend failed (state %s)", __func__,
2789 			suspend_state_names[arm_state->vc_suspend_state +
2790 						VC_SUSPEND_NUM_OFFSET]);
2791 		/* Reset the state only if it's still in an error state.
2792 		 * Something could have already initiated another suspend. */
2793 		if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2794 			set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2795 
2796 		goto unblock_resume;
2797 	}
2798 
2799 	/* successfully suspended - unlock and exit */
2800 	goto unlock;
2801 
2802 unblock_resume:
2803 	/* all error states need to unblock resume before exit */
2804 	unblock_resume(arm_state);
2805 
2806 unlock:
2807 	write_unlock_bh(&arm_state->susp_res_lock);
2808 
2809 out:
2810 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2811 	return status;
2812 }
2813 
2814 void
2815 vchiq_check_suspend(VCHIQ_STATE_T *state)
2816 {
2817 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2818 
2819 	if (!arm_state)
2820 		goto out;
2821 
2822 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2823 
2824 	write_lock_bh(&arm_state->susp_res_lock);
2825 	if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2826 			arm_state->first_connect &&
2827 			!vchiq_videocore_wanted(state)) {
2828 		vchiq_arm_vcsuspend(state);
2829 	}
2830 	write_unlock_bh(&arm_state->susp_res_lock);
2831 
2832 out:
2833 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2834 	return;
2835 }
2836 
2837 
2838 int
2839 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2840 {
2841 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2842 	int resume = 0;
2843 	int ret = -1;
2844 
2845 	if (!arm_state)
2846 		goto out;
2847 
2848 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2849 
2850 	write_lock_bh(&arm_state->susp_res_lock);
2851 	unblock_resume(arm_state);
2852 	resume = vchiq_check_resume(state);
2853 	write_unlock_bh(&arm_state->susp_res_lock);
2854 
2855 	if (resume) {
2856 		if (wait_for_completion_interruptible(
2857 			&arm_state->vc_resume_complete) < 0) {
2858 			vchiq_log_error(vchiq_susp_log_level,
2859 				"%s interrupted", __func__);
2860 			/* failed, cannot accurately derive suspend
2861 			 * state, so exit early. */
2862 			goto out;
2863 		}
2864 	}
2865 
2866 	read_lock_bh(&arm_state->susp_res_lock);
2867 	if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2868 		vchiq_log_info(vchiq_susp_log_level,
2869 				"%s: Videocore remains suspended", __func__);
2870 	} else {
2871 		vchiq_log_info(vchiq_susp_log_level,
2872 				"%s: Videocore resumed", __func__);
2873 		ret = 0;
2874 	}
2875 	read_unlock_bh(&arm_state->susp_res_lock);
2876 out:
2877 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2878 	return ret;
2879 }
2880 
2881 /* This function should be called with the write lock held */
2882 int
2883 vchiq_check_resume(VCHIQ_STATE_T *state)
2884 {
2885 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2886 	int resume = 0;
2887 
2888 	if (!arm_state)
2889 		goto out;
2890 
2891 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2892 
2893 	if (need_resume(state)) {
2894 		set_resume_state(arm_state, VC_RESUME_REQUESTED);
2895 		request_poll(state, NULL, 0);
2896 		resume = 1;
2897 	}
2898 
2899 out:
2900 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2901 	return resume;
2902 }
2903 
2904 VCHIQ_STATUS_T
2905 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2906 		enum USE_TYPE_E use_type)
2907 {
2908 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2909 	VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2910 	char entity[16];
2911 	int *entity_uc;
2912 	int local_uc, local_entity_uc;
2913 
2914 	if (!arm_state)
2915 		goto out;
2916 
2917 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2918 
2919 	if (use_type == USE_TYPE_VCHIQ) {
2920 		sprintf(entity, "VCHIQ:   ");
2921 		entity_uc = &arm_state->peer_use_count;
2922 	} else if (service) {
2923 		sprintf(entity, "%c%c%c%c:%03d",
2924 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2925 			service->client_id);
2926 		entity_uc = &service->service_use_count;
2927 	} else {
2928 		vchiq_log_error(vchiq_susp_log_level, "%s null service "
2929 				"ptr", __func__);
2930 		ret = VCHIQ_ERROR;
2931 		goto out;
2932 	}
2933 
2934 	write_lock_bh(&arm_state->susp_res_lock);
2935 	while (arm_state->resume_blocked) {
2936 		/* If we call 'use' while force suspend is waiting for suspend,
2937 		 * then we're about to block the thread which the force is
2938 		 * waiting to complete, so we're bound to just time out. In this
2939 		 * case, set the suspend state such that the wait will be
2940 		 * canceled, so we can complete as quickly as possible. */
2941 		if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2942 				VC_SUSPEND_IDLE) {
2943 			set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2944 			break;
2945 		}
2946 		/* If suspend is already in progress then we need to block */
2947 		if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2948 			/* Indicate that there are threads waiting on the resume
2949 			 * blocker.  These need to be allowed to complete before
2950 			 * a _second_ call to force suspend can complete,
2951 			 * otherwise low priority threads might never actually
2952 			 * continue */
2953 			arm_state->blocked_count++;
2954 			write_unlock_bh(&arm_state->susp_res_lock);
2955 			vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2956 				"blocked - waiting...", __func__, entity);
2957 			if (wait_for_completion_killable(
2958 					&arm_state->resume_blocker) != 0) {
2959 				vchiq_log_error(vchiq_susp_log_level, "%s %s "
2960 					"wait for resume blocker interrupted",
2961 					__func__, entity);
2962 				ret = VCHIQ_ERROR;
2963 				write_lock_bh(&arm_state->susp_res_lock);
2964 				arm_state->blocked_count--;
2965 				write_unlock_bh(&arm_state->susp_res_lock);
2966 				goto out;
2967 			}
2968 			vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2969 				"unblocked", __func__, entity);
2970 			write_lock_bh(&arm_state->susp_res_lock);
2971 			if (--arm_state->blocked_count == 0)
2972 				complete_all(&arm_state->blocked_blocker);
2973 		}
2974 	}
2975 
2976 	stop_suspend_timer(arm_state);
2977 
2978 	local_uc = ++arm_state->videocore_use_count;
2979 	local_entity_uc = ++(*entity_uc);
2980 
2981 	/* If there's a pending request which hasn't yet been serviced then
2982 	 * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
2983 	 * vc_resume_complete will block until we either resume or fail to
2984 	 * suspend */
2985 	if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2986 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2987 
2988 	if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2989 		set_resume_state(arm_state, VC_RESUME_REQUESTED);
2990 		vchiq_log_info(vchiq_susp_log_level,
2991 			"%s %s count %d, state count %d",
2992 			__func__, entity, local_entity_uc, local_uc);
2993 		request_poll(state, NULL, 0);
2994 	} else
2995 		vchiq_log_trace(vchiq_susp_log_level,
2996 			"%s %s count %d, state count %d",
2997 			__func__, entity, *entity_uc, local_uc);
2998 
2999 
3000 	write_unlock_bh(&arm_state->susp_res_lock);
3001 
3002 	/* Completion is in a done state when we're not suspended, so this won't
3003 	 * block for the non-suspended case. */
3004 	if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
3005 		vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
3006 			__func__, entity);
3007 		if (wait_for_completion_killable(
3008 				&arm_state->vc_resume_complete) != 0) {
3009 			vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
3010 				"resume interrupted", __func__, entity);
3011 			ret = VCHIQ_ERROR;
3012 			goto out;
3013 		}
3014 		vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
3015 			entity);
3016 	}
3017 
3018 	if (ret == VCHIQ_SUCCESS) {
3019 		VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3020 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
3021 
3022 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
3023 			/* Send the use notify to videocore */
3024 			status = vchiq_send_remote_use_active(state);
3025 			if (status == VCHIQ_SUCCESS)
3026 				ack_cnt--;
3027 			else
3028 				atomic_add(ack_cnt,
3029 					&arm_state->ka_use_ack_count);
3030 		}
3031 	}
3032 
3033 out:
3034 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
3035 	return ret;
3036 }
3037 
3038 VCHIQ_STATUS_T
3039 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
3040 {
3041 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3042 	VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
3043 	char entity[16];
3044 	int *entity_uc;
3045 	int local_uc, local_entity_uc;
3046 
3047 	if (!arm_state)
3048 		goto out;
3049 
3050 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3051 
3052 	if (service) {
3053 		sprintf(entity, "%c%c%c%c:%03d",
3054 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3055 			service->client_id);
3056 		entity_uc = &service->service_use_count;
3057 	} else {
3058 		sprintf(entity, "PEER:   ");
3059 		entity_uc = &arm_state->peer_use_count;
3060 	}
3061 
3062 	write_lock_bh(&arm_state->susp_res_lock);
3063 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
3064 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
3065 		WARN_ON(!arm_state->videocore_use_count);
3066 		WARN_ON(!(*entity_uc));
3067 		ret = VCHIQ_ERROR;
3068 		goto unlock;
3069 	}
3070 	local_uc = --arm_state->videocore_use_count;
3071 	local_entity_uc = --(*entity_uc);
3072 
3073 	if (!vchiq_videocore_wanted(state)) {
3074 		if (vchiq_platform_use_suspend_timer() &&
3075 				!arm_state->resume_blocked) {
3076 			/* Only use the timer if we're not trying to force
3077 			 * suspend (=> resume_blocked) */
3078 			start_suspend_timer(arm_state);
3079 		} else {
3080 			vchiq_log_info(vchiq_susp_log_level,
3081 				"%s %s count %d, state count %d - suspending",
3082 				__func__, entity, *entity_uc,
3083 				arm_state->videocore_use_count);
3084 			vchiq_arm_vcsuspend(state);
3085 		}
3086 	} else
3087 		vchiq_log_trace(vchiq_susp_log_level,
3088 			"%s %s count %d, state count %d",
3089 			__func__, entity, *entity_uc,
3090 			arm_state->videocore_use_count);
3091 
3092 unlock:
3093 	write_unlock_bh(&arm_state->susp_res_lock);
3094 
3095 out:
3096 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
3097 	return ret;
3098 }
3099 
3100 void
3101 vchiq_on_remote_use(VCHIQ_STATE_T *state)
3102 {
3103 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3104 
3105 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3106 	atomic_inc(&arm_state->ka_use_count);
3107 	complete(&arm_state->ka_evt);
3108 }
3109 
3110 void
3111 vchiq_on_remote_release(VCHIQ_STATE_T *state)
3112 {
3113 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3114 
3115 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3116 	atomic_inc(&arm_state->ka_release_count);
3117 	complete(&arm_state->ka_evt);
3118 }
3119 
3120 VCHIQ_STATUS_T
3121 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
3122 {
3123 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
3124 }
3125 
3126 VCHIQ_STATUS_T
3127 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
3128 {
3129 	return vchiq_release_internal(service->state, service);
3130 }
3131 
3132 VCHIQ_DEBUGFS_NODE_T *
3133 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
3134 {
3135 	return &instance->debugfs_node;
3136 }
3137 
3138 int
3139 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
3140 {
3141 	VCHIQ_SERVICE_T *service;
3142 	int use_count = 0, i;
3143 
3144 	i = 0;
3145 	while ((service = next_service_by_instance(instance->state,
3146 		instance, &i)) != NULL) {
3147 		use_count += service->service_use_count;
3148 		unlock_service(service);
3149 	}
3150 	return use_count;
3151 }
3152 
3153 int
3154 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
3155 {
3156 	return instance->pid;
3157 }
3158 
3159 int
3160 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
3161 {
3162 	return instance->trace;
3163 }
3164 
3165 void
3166 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
3167 {
3168 	VCHIQ_SERVICE_T *service;
3169 	int i;
3170 
3171 	i = 0;
3172 	while ((service = next_service_by_instance(instance->state,
3173 		instance, &i)) != NULL) {
3174 		service->trace = trace;
3175 		unlock_service(service);
3176 	}
3177 	instance->trace = (trace != 0);
3178 }
3179 
3180 static void suspend_timer_callback(unsigned long context)
3181 {
3182 	VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
3183 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3184 
3185 	if (!arm_state)
3186 		goto out;
3187 	vchiq_log_info(vchiq_susp_log_level,
3188 		"%s - suspend timer expired - check suspend", __func__);
3189 	vchiq_check_suspend(state);
3190 out:
3191 	return;
3192 }
3193 
3194 VCHIQ_STATUS_T
3195 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
3196 {
3197 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3198 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3199 
3200 	if (service) {
3201 		ret = vchiq_use_internal(service->state, service,
3202 				USE_TYPE_SERVICE_NO_RESUME);
3203 		unlock_service(service);
3204 	}
3205 	return ret;
3206 }
3207 
3208 VCHIQ_STATUS_T
3209 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
3210 {
3211 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3212 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3213 
3214 	if (service) {
3215 		ret = vchiq_use_internal(service->state, service,
3216 				USE_TYPE_SERVICE);
3217 		unlock_service(service);
3218 	}
3219 	return ret;
3220 }
3221 
3222 VCHIQ_STATUS_T
3223 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
3224 {
3225 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3226 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
3227 
3228 	if (service) {
3229 		ret = vchiq_release_internal(service->state, service);
3230 		unlock_service(service);
3231 	}
3232 	return ret;
3233 }
3234 
3235 void
3236 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
3237 {
3238 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3239 	int i, j = 0;
3240 	/* Only dump 64 services */
3241 	static const int local_max_services = 64;
3242 	/* If there's more than 64 services, only dump ones with
3243 	 * non-zero counts */
3244 	int only_nonzero = 0;
3245 	static const char *nz = "<-- preventing suspend";
3246 
3247 	enum vc_suspend_status vc_suspend_state;
3248 	enum vc_resume_status  vc_resume_state;
3249 	int peer_count;
3250 	int vc_use_count;
3251 	int active_services;
3252 	struct service_data_struct {
3253 		int fourcc;
3254 		int clientid;
3255 		int use_count;
3256 	} service_data[local_max_services];
3257 
3258 	if (!arm_state)
3259 		return;
3260 
3261 	read_lock_bh(&arm_state->susp_res_lock);
3262 	vc_suspend_state = arm_state->vc_suspend_state;
3263 	vc_resume_state  = arm_state->vc_resume_state;
3264 	peer_count = arm_state->peer_use_count;
3265 	vc_use_count = arm_state->videocore_use_count;
3266 	active_services = state->unused_service;
3267 	if (active_services > local_max_services)
3268 		only_nonzero = 1;
3269 
3270 	for (i = 0; (i < active_services) && (j < local_max_services); i++) {
3271 		VCHIQ_SERVICE_T *service_ptr = state->services[i];
3272 
3273 		if (!service_ptr)
3274 			continue;
3275 
3276 		if (only_nonzero && !service_ptr->service_use_count)
3277 			continue;
3278 
3279 		if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
3280 			service_data[j].fourcc = service_ptr->base.fourcc;
3281 			service_data[j].clientid = service_ptr->client_id;
3282 			service_data[j++].use_count = service_ptr->
3283 							service_use_count;
3284 		}
3285 	}
3286 
3287 	read_unlock_bh(&arm_state->susp_res_lock);
3288 
3289 	vchiq_log_warning(vchiq_susp_log_level,
3290 		"-- Videcore suspend state: %s --",
3291 		suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
3292 	vchiq_log_warning(vchiq_susp_log_level,
3293 		"-- Videcore resume state: %s --",
3294 		resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
3295 
3296 	if (only_nonzero)
3297 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
3298 			"services (%d).  Only dumping up to first %d services "
3299 			"with non-zero use-count", active_services,
3300 			local_max_services);
3301 
3302 	for (i = 0; i < j; i++) {
3303 		vchiq_log_warning(vchiq_susp_log_level,
3304 			"----- %c%c%c%c:%d service count %d %s",
3305 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
3306 			service_data[i].clientid,
3307 			service_data[i].use_count,
3308 			service_data[i].use_count ? nz : "");
3309 	}
3310 	vchiq_log_warning(vchiq_susp_log_level,
3311 		"----- VCHIQ use count count %d", peer_count);
3312 	vchiq_log_warning(vchiq_susp_log_level,
3313 		"--- Overall vchiq instance use count %d", vc_use_count);
3314 
3315 	vchiq_dump_platform_use_state(state);
3316 }
3317 
3318 VCHIQ_STATUS_T
3319 vchiq_check_service(VCHIQ_SERVICE_T *service)
3320 {
3321 	VCHIQ_ARM_STATE_T *arm_state;
3322 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3323 
3324 	if (!service || !service->state)
3325 		goto out;
3326 
3327 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3328 
3329 	arm_state = vchiq_platform_get_arm_state(service->state);
3330 
3331 	read_lock_bh(&arm_state->susp_res_lock);
3332 	if (service->service_use_count)
3333 		ret = VCHIQ_SUCCESS;
3334 	read_unlock_bh(&arm_state->susp_res_lock);
3335 
3336 	if (ret == VCHIQ_ERROR) {
3337 		vchiq_log_error(vchiq_susp_log_level,
3338 			"%s ERROR - %c%c%c%c:%d service count %d, "
3339 			"state count %d, videocore suspend state %s", __func__,
3340 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3341 			service->client_id, service->service_use_count,
3342 			arm_state->videocore_use_count,
3343 			suspend_state_names[arm_state->vc_suspend_state +
3344 						VC_SUSPEND_NUM_OFFSET]);
3345 		vchiq_dump_service_use_state(service->state);
3346 	}
3347 out:
3348 	return ret;
3349 }
3350 
3351 /* stub functions */
3352 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
3353 {
3354 	(void)state;
3355 }
3356 
3357 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
3358 	VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
3359 {
3360 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3361 
3362 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
3363 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
3364 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
3365 		write_lock_bh(&arm_state->susp_res_lock);
3366 		if (!arm_state->first_connect) {
3367 			char threadname[16];
3368 
3369 			arm_state->first_connect = 1;
3370 			write_unlock_bh(&arm_state->susp_res_lock);
3371 			snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
3372 				state->id);
3373 			arm_state->ka_thread = kthread_create(
3374 				&vchiq_keepalive_thread_func,
3375 				(void *)state,
3376 				threadname);
3377 			if (IS_ERR(arm_state->ka_thread)) {
3378 				vchiq_log_error(vchiq_susp_log_level,
3379 					"vchiq: FATAL: couldn't create thread %s",
3380 					threadname);
3381 			} else {
3382 				wake_up_process(arm_state->ka_thread);
3383 			}
3384 		} else
3385 			write_unlock_bh(&arm_state->susp_res_lock);
3386 	}
3387 }
3388 
3389 static int vchiq_probe(struct platform_device *pdev)
3390 {
3391 	struct device_node *fw_node;
3392 	struct rpi_firmware *fw;
3393 	int err;
3394 	void *ptr_err;
3395 
3396 	fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
3397 	if (!fw_node) {
3398 		dev_err(&pdev->dev, "Missing firmware node\n");
3399 		return -ENOENT;
3400 	}
3401 
3402 	fw = rpi_firmware_get(fw_node);
3403 	of_node_put(fw_node);
3404 	if (!fw)
3405 		return -EPROBE_DEFER;
3406 
3407 	platform_set_drvdata(pdev, fw);
3408 
3409 	err = vchiq_platform_init(pdev, &g_state);
3410 	if (err != 0)
3411 		goto failed_platform_init;
3412 
3413 	err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
3414 	if (err != 0) {
3415 		vchiq_log_error(vchiq_arm_log_level,
3416 			"Unable to allocate device number");
3417 		goto failed_platform_init;
3418 	}
3419 	cdev_init(&vchiq_cdev, &vchiq_fops);
3420 	vchiq_cdev.owner = THIS_MODULE;
3421 	err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
3422 	if (err != 0) {
3423 		vchiq_log_error(vchiq_arm_log_level,
3424 			"Unable to register device");
3425 		goto failed_cdev_add;
3426 	}
3427 
3428 	/* create sysfs entries */
3429 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3430 	ptr_err = vchiq_class;
3431 	if (IS_ERR(ptr_err))
3432 		goto failed_class_create;
3433 
3434 	vchiq_dev = device_create(vchiq_class, NULL,
3435 		vchiq_devid, NULL, "vchiq");
3436 	ptr_err = vchiq_dev;
3437 	if (IS_ERR(ptr_err))
3438 		goto failed_device_create;
3439 
3440 	/* create debugfs entries */
3441 	err = vchiq_debugfs_init();
3442 	if (err != 0)
3443 		goto failed_debugfs_init;
3444 
3445 	vchiq_log_info(vchiq_arm_log_level,
3446 		"vchiq: initialised - version %d (min %d), device %d.%d",
3447 		VCHIQ_VERSION, VCHIQ_VERSION_MIN,
3448 		MAJOR(vchiq_devid), MINOR(vchiq_devid));
3449 
3450 	return 0;
3451 
3452 failed_debugfs_init:
3453 	device_destroy(vchiq_class, vchiq_devid);
3454 failed_device_create:
3455 	class_destroy(vchiq_class);
3456 failed_class_create:
3457 	cdev_del(&vchiq_cdev);
3458 	err = PTR_ERR(ptr_err);
3459 failed_cdev_add:
3460 	unregister_chrdev_region(vchiq_devid, 1);
3461 failed_platform_init:
3462 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
3463 	return err;
3464 }
3465 
3466 static int vchiq_remove(struct platform_device *pdev)
3467 {
3468 	vchiq_debugfs_deinit();
3469 	device_destroy(vchiq_class, vchiq_devid);
3470 	class_destroy(vchiq_class);
3471 	cdev_del(&vchiq_cdev);
3472 	unregister_chrdev_region(vchiq_devid, 1);
3473 
3474 	return 0;
3475 }
3476 
3477 static const struct of_device_id vchiq_of_match[] = {
3478 	{ .compatible = "brcm,bcm2835-vchiq", },
3479 	{},
3480 };
3481 MODULE_DEVICE_TABLE(of, vchiq_of_match);
3482 
3483 static struct platform_driver vchiq_driver = {
3484 	.driver = {
3485 		.name = "bcm2835_vchiq",
3486 		.of_match_table = vchiq_of_match,
3487 	},
3488 	.probe = vchiq_probe,
3489 	.remove = vchiq_remove,
3490 };
3491 module_platform_driver(vchiq_driver);
3492 
3493 MODULE_LICENSE("Dual BSD/GPL");
3494 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3495 MODULE_AUTHOR("Broadcom Corporation");
3496