xref: /linux/drivers/iio/accel/bmc150-accel-core.c (revision a4cdb556cae05cd3e7b602b3a44c01420c4e2258)
1 /*
2  * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
3  *  - BMC150
4  *  - BMI055
5  *  - BMA255
6  *  - BMA250E
7  *  - BMA222E
8  *  - BMA280
9  *
10  * Copyright (c) 2014, Intel Corporation.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms and conditions of the GNU General Public License,
14  * version 2, as published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  */
21 
22 #include <linux/module.h>
23 #include <linux/i2c.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/acpi.h>
28 #include <linux/gpio/consumer.h>
29 #include <linux/pm.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/iio/iio.h>
32 #include <linux/iio/sysfs.h>
33 #include <linux/iio/buffer.h>
34 #include <linux/iio/events.h>
35 #include <linux/iio/trigger.h>
36 #include <linux/iio/trigger_consumer.h>
37 #include <linux/iio/triggered_buffer.h>
38 #include <linux/regmap.h>
39 
40 #include "bmc150-accel.h"
41 
42 #define BMC150_ACCEL_DRV_NAME			"bmc150_accel"
43 #define BMC150_ACCEL_IRQ_NAME			"bmc150_accel_event"
44 
45 #define BMC150_ACCEL_REG_CHIP_ID		0x00
46 
47 #define BMC150_ACCEL_REG_INT_STATUS_2		0x0B
48 #define BMC150_ACCEL_ANY_MOTION_MASK		0x07
49 #define BMC150_ACCEL_ANY_MOTION_BIT_X		BIT(0)
50 #define BMC150_ACCEL_ANY_MOTION_BIT_Y		BIT(1)
51 #define BMC150_ACCEL_ANY_MOTION_BIT_Z		BIT(2)
52 #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN	BIT(3)
53 
54 #define BMC150_ACCEL_REG_PMU_LPW		0x11
55 #define BMC150_ACCEL_PMU_MODE_MASK		0xE0
56 #define BMC150_ACCEL_PMU_MODE_SHIFT		5
57 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK	0x17
58 #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT	1
59 
60 #define BMC150_ACCEL_REG_PMU_RANGE		0x0F
61 
62 #define BMC150_ACCEL_DEF_RANGE_2G		0x03
63 #define BMC150_ACCEL_DEF_RANGE_4G		0x05
64 #define BMC150_ACCEL_DEF_RANGE_8G		0x08
65 #define BMC150_ACCEL_DEF_RANGE_16G		0x0C
66 
67 /* Default BW: 125Hz */
68 #define BMC150_ACCEL_REG_PMU_BW		0x10
69 #define BMC150_ACCEL_DEF_BW			125
70 
71 #define BMC150_ACCEL_REG_INT_MAP_0		0x19
72 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE	BIT(2)
73 
74 #define BMC150_ACCEL_REG_INT_MAP_1		0x1A
75 #define BMC150_ACCEL_INT_MAP_1_BIT_DATA		BIT(0)
76 #define BMC150_ACCEL_INT_MAP_1_BIT_FWM		BIT(1)
77 #define BMC150_ACCEL_INT_MAP_1_BIT_FFULL	BIT(2)
78 
79 #define BMC150_ACCEL_REG_INT_RST_LATCH		0x21
80 #define BMC150_ACCEL_INT_MODE_LATCH_RESET	0x80
81 #define BMC150_ACCEL_INT_MODE_LATCH_INT	0x0F
82 #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT	0x00
83 
84 #define BMC150_ACCEL_REG_INT_EN_0		0x16
85 #define BMC150_ACCEL_INT_EN_BIT_SLP_X		BIT(0)
86 #define BMC150_ACCEL_INT_EN_BIT_SLP_Y		BIT(1)
87 #define BMC150_ACCEL_INT_EN_BIT_SLP_Z		BIT(2)
88 
89 #define BMC150_ACCEL_REG_INT_EN_1		0x17
90 #define BMC150_ACCEL_INT_EN_BIT_DATA_EN		BIT(4)
91 #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN	BIT(5)
92 #define BMC150_ACCEL_INT_EN_BIT_FWM_EN		BIT(6)
93 
94 #define BMC150_ACCEL_REG_INT_OUT_CTRL		0x20
95 #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL	BIT(0)
96 
97 #define BMC150_ACCEL_REG_INT_5			0x27
98 #define BMC150_ACCEL_SLOPE_DUR_MASK		0x03
99 
100 #define BMC150_ACCEL_REG_INT_6			0x28
101 #define BMC150_ACCEL_SLOPE_THRES_MASK		0xFF
102 
103 /* Slope duration in terms of number of samples */
104 #define BMC150_ACCEL_DEF_SLOPE_DURATION		1
105 /* in terms of multiples of g's/LSB, based on range */
106 #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD	1
107 
108 #define BMC150_ACCEL_REG_XOUT_L		0x02
109 
110 #define BMC150_ACCEL_MAX_STARTUP_TIME_MS	100
111 
112 /* Sleep Duration values */
113 #define BMC150_ACCEL_SLEEP_500_MICRO		0x05
114 #define BMC150_ACCEL_SLEEP_1_MS		0x06
115 #define BMC150_ACCEL_SLEEP_2_MS		0x07
116 #define BMC150_ACCEL_SLEEP_4_MS		0x08
117 #define BMC150_ACCEL_SLEEP_6_MS		0x09
118 #define BMC150_ACCEL_SLEEP_10_MS		0x0A
119 #define BMC150_ACCEL_SLEEP_25_MS		0x0B
120 #define BMC150_ACCEL_SLEEP_50_MS		0x0C
121 #define BMC150_ACCEL_SLEEP_100_MS		0x0D
122 #define BMC150_ACCEL_SLEEP_500_MS		0x0E
123 #define BMC150_ACCEL_SLEEP_1_SEC		0x0F
124 
125 #define BMC150_ACCEL_REG_TEMP			0x08
126 #define BMC150_ACCEL_TEMP_CENTER_VAL		24
127 
128 #define BMC150_ACCEL_AXIS_TO_REG(axis)	(BMC150_ACCEL_REG_XOUT_L + (axis * 2))
129 #define BMC150_AUTO_SUSPEND_DELAY_MS		2000
130 
131 #define BMC150_ACCEL_REG_FIFO_STATUS		0x0E
132 #define BMC150_ACCEL_REG_FIFO_CONFIG0		0x30
133 #define BMC150_ACCEL_REG_FIFO_CONFIG1		0x3E
134 #define BMC150_ACCEL_REG_FIFO_DATA		0x3F
135 #define BMC150_ACCEL_FIFO_LENGTH		32
136 
137 enum bmc150_accel_axis {
138 	AXIS_X,
139 	AXIS_Y,
140 	AXIS_Z,
141 };
142 
143 enum bmc150_power_modes {
144 	BMC150_ACCEL_SLEEP_MODE_NORMAL,
145 	BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND,
146 	BMC150_ACCEL_SLEEP_MODE_LPM,
147 	BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04,
148 };
149 
150 struct bmc150_scale_info {
151 	int scale;
152 	u8 reg_range;
153 };
154 
155 struct bmc150_accel_chip_info {
156 	const char *name;
157 	u8 chip_id;
158 	const struct iio_chan_spec *channels;
159 	int num_channels;
160 	const struct bmc150_scale_info scale_table[4];
161 };
162 
163 struct bmc150_accel_interrupt {
164 	const struct bmc150_accel_interrupt_info *info;
165 	atomic_t users;
166 };
167 
168 struct bmc150_accel_trigger {
169 	struct bmc150_accel_data *data;
170 	struct iio_trigger *indio_trig;
171 	int (*setup)(struct bmc150_accel_trigger *t, bool state);
172 	int intr;
173 	bool enabled;
174 };
175 
176 enum bmc150_accel_interrupt_id {
177 	BMC150_ACCEL_INT_DATA_READY,
178 	BMC150_ACCEL_INT_ANY_MOTION,
179 	BMC150_ACCEL_INT_WATERMARK,
180 	BMC150_ACCEL_INTERRUPTS,
181 };
182 
183 enum bmc150_accel_trigger_id {
184 	BMC150_ACCEL_TRIGGER_DATA_READY,
185 	BMC150_ACCEL_TRIGGER_ANY_MOTION,
186 	BMC150_ACCEL_TRIGGERS,
187 };
188 
189 struct bmc150_accel_data {
190 	struct regmap *regmap;
191 	struct device *dev;
192 	int irq;
193 	struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
194 	atomic_t active_intr;
195 	struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
196 	struct mutex mutex;
197 	u8 fifo_mode, watermark;
198 	s16 buffer[8];
199 	u8 bw_bits;
200 	u32 slope_dur;
201 	u32 slope_thres;
202 	u32 range;
203 	int ev_enable_state;
204 	int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
205 	const struct bmc150_accel_chip_info *chip_info;
206 };
207 
208 static const struct {
209 	int val;
210 	int val2;
211 	u8 bw_bits;
212 } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
213 				     {31, 260000, 0x09},
214 				     {62, 500000, 0x0A},
215 				     {125, 0, 0x0B},
216 				     {250, 0, 0x0C},
217 				     {500, 0, 0x0D},
218 				     {1000, 0, 0x0E},
219 				     {2000, 0, 0x0F} };
220 
221 static const struct {
222 	int bw_bits;
223 	int msec;
224 } bmc150_accel_sample_upd_time[] = { {0x08, 64},
225 				     {0x09, 32},
226 				     {0x0A, 16},
227 				     {0x0B, 8},
228 				     {0x0C, 4},
229 				     {0x0D, 2},
230 				     {0x0E, 1},
231 				     {0x0F, 1} };
232 
233 static const struct {
234 	int sleep_dur;
235 	u8 reg_value;
236 } bmc150_accel_sleep_value_table[] = { {0, 0},
237 				       {500, BMC150_ACCEL_SLEEP_500_MICRO},
238 				       {1000, BMC150_ACCEL_SLEEP_1_MS},
239 				       {2000, BMC150_ACCEL_SLEEP_2_MS},
240 				       {4000, BMC150_ACCEL_SLEEP_4_MS},
241 				       {6000, BMC150_ACCEL_SLEEP_6_MS},
242 				       {10000, BMC150_ACCEL_SLEEP_10_MS},
243 				       {25000, BMC150_ACCEL_SLEEP_25_MS},
244 				       {50000, BMC150_ACCEL_SLEEP_50_MS},
245 				       {100000, BMC150_ACCEL_SLEEP_100_MS},
246 				       {500000, BMC150_ACCEL_SLEEP_500_MS},
247 				       {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
248 
249 static const struct regmap_config bmc150_i2c_regmap_conf = {
250 	.reg_bits = 8,
251 	.val_bits = 8,
252 	.max_register = 0x3f,
253 };
254 
255 static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
256 				 enum bmc150_power_modes mode,
257 				 int dur_us)
258 {
259 	int i;
260 	int ret;
261 	u8 lpw_bits;
262 	int dur_val = -1;
263 
264 	if (dur_us > 0) {
265 		for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table);
266 									 ++i) {
267 			if (bmc150_accel_sleep_value_table[i].sleep_dur ==
268 									dur_us)
269 				dur_val =
270 				bmc150_accel_sleep_value_table[i].reg_value;
271 		}
272 	} else {
273 		dur_val = 0;
274 	}
275 
276 	if (dur_val < 0)
277 		return -EINVAL;
278 
279 	lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
280 	lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
281 
282 	dev_dbg(data->dev, "Set Mode bits %x\n", lpw_bits);
283 
284 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
285 	if (ret < 0) {
286 		dev_err(data->dev, "Error writing reg_pmu_lpw\n");
287 		return ret;
288 	}
289 
290 	return 0;
291 }
292 
293 static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
294 			       int val2)
295 {
296 	int i;
297 	int ret;
298 
299 	for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
300 		if (bmc150_accel_samp_freq_table[i].val == val &&
301 		    bmc150_accel_samp_freq_table[i].val2 == val2) {
302 			ret = regmap_write(data->regmap,
303 				BMC150_ACCEL_REG_PMU_BW,
304 				bmc150_accel_samp_freq_table[i].bw_bits);
305 			if (ret < 0)
306 				return ret;
307 
308 			data->bw_bits =
309 				bmc150_accel_samp_freq_table[i].bw_bits;
310 			return 0;
311 		}
312 	}
313 
314 	return -EINVAL;
315 }
316 
317 static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
318 {
319 	int ret;
320 
321 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
322 					data->slope_thres);
323 	if (ret < 0) {
324 		dev_err(data->dev, "Error writing reg_int_6\n");
325 		return ret;
326 	}
327 
328 	ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
329 				 BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
330 	if (ret < 0) {
331 		dev_err(data->dev, "Error updating reg_int_5\n");
332 		return ret;
333 	}
334 
335 	dev_dbg(data->dev, "%s: %x %x\n", __func__, data->slope_thres,
336 		data->slope_dur);
337 
338 	return ret;
339 }
340 
341 static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
342 					 bool state)
343 {
344 	if (state)
345 		return bmc150_accel_update_slope(t->data);
346 
347 	return 0;
348 }
349 
350 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
351 			       int *val2)
352 {
353 	int i;
354 
355 	for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
356 		if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) {
357 			*val = bmc150_accel_samp_freq_table[i].val;
358 			*val2 = bmc150_accel_samp_freq_table[i].val2;
359 			return IIO_VAL_INT_PLUS_MICRO;
360 		}
361 	}
362 
363 	return -EINVAL;
364 }
365 
366 #ifdef CONFIG_PM
367 static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
368 {
369 	int i;
370 
371 	for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) {
372 		if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits)
373 			return bmc150_accel_sample_upd_time[i].msec;
374 	}
375 
376 	return BMC150_ACCEL_MAX_STARTUP_TIME_MS;
377 }
378 
379 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
380 {
381 	int ret;
382 
383 	if (on) {
384 		ret = pm_runtime_get_sync(data->dev);
385 	} else {
386 		pm_runtime_mark_last_busy(data->dev);
387 		ret = pm_runtime_put_autosuspend(data->dev);
388 	}
389 
390 	if (ret < 0) {
391 		dev_err(data->dev,
392 			"Failed: bmc150_accel_set_power_state for %d\n", on);
393 		if (on)
394 			pm_runtime_put_noidle(data->dev);
395 
396 		return ret;
397 	}
398 
399 	return 0;
400 }
401 #else
402 static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
403 {
404 	return 0;
405 }
406 #endif
407 
408 static const struct bmc150_accel_interrupt_info {
409 	u8 map_reg;
410 	u8 map_bitmask;
411 	u8 en_reg;
412 	u8 en_bitmask;
413 } bmc150_accel_interrupts[BMC150_ACCEL_INTERRUPTS] = {
414 	{ /* data ready interrupt */
415 		.map_reg = BMC150_ACCEL_REG_INT_MAP_1,
416 		.map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_DATA,
417 		.en_reg = BMC150_ACCEL_REG_INT_EN_1,
418 		.en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
419 	},
420 	{  /* motion interrupt */
421 		.map_reg = BMC150_ACCEL_REG_INT_MAP_0,
422 		.map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_SLOPE,
423 		.en_reg = BMC150_ACCEL_REG_INT_EN_0,
424 		.en_bitmask =  BMC150_ACCEL_INT_EN_BIT_SLP_X |
425 			BMC150_ACCEL_INT_EN_BIT_SLP_Y |
426 			BMC150_ACCEL_INT_EN_BIT_SLP_Z
427 	},
428 	{ /* fifo watermark interrupt */
429 		.map_reg = BMC150_ACCEL_REG_INT_MAP_1,
430 		.map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_FWM,
431 		.en_reg = BMC150_ACCEL_REG_INT_EN_1,
432 		.en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
433 	},
434 };
435 
436 static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
437 					  struct bmc150_accel_data *data)
438 {
439 	int i;
440 
441 	for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
442 		data->interrupts[i].info = &bmc150_accel_interrupts[i];
443 }
444 
445 static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
446 				      bool state)
447 {
448 	struct bmc150_accel_interrupt *intr = &data->interrupts[i];
449 	const struct bmc150_accel_interrupt_info *info = intr->info;
450 	int ret;
451 
452 	if (state) {
453 		if (atomic_inc_return(&intr->users) > 1)
454 			return 0;
455 	} else {
456 		if (atomic_dec_return(&intr->users) > 0)
457 			return 0;
458 	}
459 
460 	/*
461 	 * We will expect the enable and disable to do operation in reverse
462 	 * order. This will happen here anyway, as our resume operation uses
463 	 * sync mode runtime pm calls. The suspend operation will be delayed
464 	 * by autosuspend delay.
465 	 * So the disable operation will still happen in reverse order of
466 	 * enable operation. When runtime pm is disabled the mode is always on,
467 	 * so sequence doesn't matter.
468 	 */
469 	ret = bmc150_accel_set_power_state(data, state);
470 	if (ret < 0)
471 		return ret;
472 
473 	/* map the interrupt to the appropriate pins */
474 	ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
475 				 (state ? info->map_bitmask : 0));
476 	if (ret < 0) {
477 		dev_err(data->dev, "Error updating reg_int_map\n");
478 		goto out_fix_power_state;
479 	}
480 
481 	/* enable/disable the interrupt */
482 	ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
483 				 (state ? info->en_bitmask : 0));
484 	if (ret < 0) {
485 		dev_err(data->dev, "Error updating reg_int_en\n");
486 		goto out_fix_power_state;
487 	}
488 
489 	if (state)
490 		atomic_inc(&data->active_intr);
491 	else
492 		atomic_dec(&data->active_intr);
493 
494 	return 0;
495 
496 out_fix_power_state:
497 	bmc150_accel_set_power_state(data, false);
498 	return ret;
499 }
500 
501 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
502 {
503 	int ret, i;
504 
505 	for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
506 		if (data->chip_info->scale_table[i].scale == val) {
507 			ret = regmap_write(data->regmap,
508 				     BMC150_ACCEL_REG_PMU_RANGE,
509 				     data->chip_info->scale_table[i].reg_range);
510 			if (ret < 0) {
511 				dev_err(data->dev,
512 					"Error writing pmu_range\n");
513 				return ret;
514 			}
515 
516 			data->range = data->chip_info->scale_table[i].reg_range;
517 			return 0;
518 		}
519 	}
520 
521 	return -EINVAL;
522 }
523 
524 static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
525 {
526 	int ret;
527 	unsigned int value;
528 
529 	mutex_lock(&data->mutex);
530 
531 	ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
532 	if (ret < 0) {
533 		dev_err(data->dev, "Error reading reg_temp\n");
534 		mutex_unlock(&data->mutex);
535 		return ret;
536 	}
537 	*val = sign_extend32(value, 7);
538 
539 	mutex_unlock(&data->mutex);
540 
541 	return IIO_VAL_INT;
542 }
543 
544 static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
545 				 struct iio_chan_spec const *chan,
546 				 int *val)
547 {
548 	int ret;
549 	int axis = chan->scan_index;
550 	unsigned int raw_val;
551 
552 	mutex_lock(&data->mutex);
553 	ret = bmc150_accel_set_power_state(data, true);
554 	if (ret < 0) {
555 		mutex_unlock(&data->mutex);
556 		return ret;
557 	}
558 
559 	ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
560 			       &raw_val, 2);
561 	if (ret < 0) {
562 		dev_err(data->dev, "Error reading axis %d\n", axis);
563 		bmc150_accel_set_power_state(data, false);
564 		mutex_unlock(&data->mutex);
565 		return ret;
566 	}
567 	*val = sign_extend32(raw_val >> chan->scan_type.shift,
568 			     chan->scan_type.realbits - 1);
569 	ret = bmc150_accel_set_power_state(data, false);
570 	mutex_unlock(&data->mutex);
571 	if (ret < 0)
572 		return ret;
573 
574 	return IIO_VAL_INT;
575 }
576 
577 static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
578 				 struct iio_chan_spec const *chan,
579 				 int *val, int *val2, long mask)
580 {
581 	struct bmc150_accel_data *data = iio_priv(indio_dev);
582 	int ret;
583 
584 	switch (mask) {
585 	case IIO_CHAN_INFO_RAW:
586 		switch (chan->type) {
587 		case IIO_TEMP:
588 			return bmc150_accel_get_temp(data, val);
589 		case IIO_ACCEL:
590 			if (iio_buffer_enabled(indio_dev))
591 				return -EBUSY;
592 			else
593 				return bmc150_accel_get_axis(data, chan, val);
594 		default:
595 			return -EINVAL;
596 		}
597 	case IIO_CHAN_INFO_OFFSET:
598 		if (chan->type == IIO_TEMP) {
599 			*val = BMC150_ACCEL_TEMP_CENTER_VAL;
600 			return IIO_VAL_INT;
601 		} else {
602 			return -EINVAL;
603 		}
604 	case IIO_CHAN_INFO_SCALE:
605 		*val = 0;
606 		switch (chan->type) {
607 		case IIO_TEMP:
608 			*val2 = 500000;
609 			return IIO_VAL_INT_PLUS_MICRO;
610 		case IIO_ACCEL:
611 		{
612 			int i;
613 			const struct bmc150_scale_info *si;
614 			int st_size = ARRAY_SIZE(data->chip_info->scale_table);
615 
616 			for (i = 0; i < st_size; ++i) {
617 				si = &data->chip_info->scale_table[i];
618 				if (si->reg_range == data->range) {
619 					*val2 = si->scale;
620 					return IIO_VAL_INT_PLUS_MICRO;
621 				}
622 			}
623 			return -EINVAL;
624 		}
625 		default:
626 			return -EINVAL;
627 		}
628 	case IIO_CHAN_INFO_SAMP_FREQ:
629 		mutex_lock(&data->mutex);
630 		ret = bmc150_accel_get_bw(data, val, val2);
631 		mutex_unlock(&data->mutex);
632 		return ret;
633 	default:
634 		return -EINVAL;
635 	}
636 }
637 
638 static int bmc150_accel_write_raw(struct iio_dev *indio_dev,
639 				  struct iio_chan_spec const *chan,
640 				  int val, int val2, long mask)
641 {
642 	struct bmc150_accel_data *data = iio_priv(indio_dev);
643 	int ret;
644 
645 	switch (mask) {
646 	case IIO_CHAN_INFO_SAMP_FREQ:
647 		mutex_lock(&data->mutex);
648 		ret = bmc150_accel_set_bw(data, val, val2);
649 		mutex_unlock(&data->mutex);
650 		break;
651 	case IIO_CHAN_INFO_SCALE:
652 		if (val)
653 			return -EINVAL;
654 
655 		mutex_lock(&data->mutex);
656 		ret = bmc150_accel_set_scale(data, val2);
657 		mutex_unlock(&data->mutex);
658 		return ret;
659 	default:
660 		ret = -EINVAL;
661 	}
662 
663 	return ret;
664 }
665 
666 static int bmc150_accel_read_event(struct iio_dev *indio_dev,
667 				   const struct iio_chan_spec *chan,
668 				   enum iio_event_type type,
669 				   enum iio_event_direction dir,
670 				   enum iio_event_info info,
671 				   int *val, int *val2)
672 {
673 	struct bmc150_accel_data *data = iio_priv(indio_dev);
674 
675 	*val2 = 0;
676 	switch (info) {
677 	case IIO_EV_INFO_VALUE:
678 		*val = data->slope_thres;
679 		break;
680 	case IIO_EV_INFO_PERIOD:
681 		*val = data->slope_dur;
682 		break;
683 	default:
684 		return -EINVAL;
685 	}
686 
687 	return IIO_VAL_INT;
688 }
689 
690 static int bmc150_accel_write_event(struct iio_dev *indio_dev,
691 				    const struct iio_chan_spec *chan,
692 				    enum iio_event_type type,
693 				    enum iio_event_direction dir,
694 				    enum iio_event_info info,
695 				    int val, int val2)
696 {
697 	struct bmc150_accel_data *data = iio_priv(indio_dev);
698 
699 	if (data->ev_enable_state)
700 		return -EBUSY;
701 
702 	switch (info) {
703 	case IIO_EV_INFO_VALUE:
704 		data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
705 		break;
706 	case IIO_EV_INFO_PERIOD:
707 		data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
708 		break;
709 	default:
710 		return -EINVAL;
711 	}
712 
713 	return 0;
714 }
715 
716 static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
717 					  const struct iio_chan_spec *chan,
718 					  enum iio_event_type type,
719 					  enum iio_event_direction dir)
720 {
721 	struct bmc150_accel_data *data = iio_priv(indio_dev);
722 
723 	return data->ev_enable_state;
724 }
725 
726 static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
727 					   const struct iio_chan_spec *chan,
728 					   enum iio_event_type type,
729 					   enum iio_event_direction dir,
730 					   int state)
731 {
732 	struct bmc150_accel_data *data = iio_priv(indio_dev);
733 	int ret;
734 
735 	if (state == data->ev_enable_state)
736 		return 0;
737 
738 	mutex_lock(&data->mutex);
739 
740 	ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION,
741 					 state);
742 	if (ret < 0) {
743 		mutex_unlock(&data->mutex);
744 		return ret;
745 	}
746 
747 	data->ev_enable_state = state;
748 	mutex_unlock(&data->mutex);
749 
750 	return 0;
751 }
752 
753 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
754 					 struct iio_trigger *trig)
755 {
756 	struct bmc150_accel_data *data = iio_priv(indio_dev);
757 	int i;
758 
759 	for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
760 		if (data->triggers[i].indio_trig == trig)
761 			return 0;
762 	}
763 
764 	return -EINVAL;
765 }
766 
767 static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev,
768 					       struct device_attribute *attr,
769 					       char *buf)
770 {
771 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
772 	struct bmc150_accel_data *data = iio_priv(indio_dev);
773 	int wm;
774 
775 	mutex_lock(&data->mutex);
776 	wm = data->watermark;
777 	mutex_unlock(&data->mutex);
778 
779 	return sprintf(buf, "%d\n", wm);
780 }
781 
782 static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
783 					   struct device_attribute *attr,
784 					   char *buf)
785 {
786 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
787 	struct bmc150_accel_data *data = iio_priv(indio_dev);
788 	bool state;
789 
790 	mutex_lock(&data->mutex);
791 	state = data->fifo_mode;
792 	mutex_unlock(&data->mutex);
793 
794 	return sprintf(buf, "%d\n", state);
795 }
796 
797 static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
798 static IIO_CONST_ATTR(hwfifo_watermark_max,
799 		      __stringify(BMC150_ACCEL_FIFO_LENGTH));
800 static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO,
801 		       bmc150_accel_get_fifo_state, NULL, 0);
802 static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO,
803 		       bmc150_accel_get_fifo_watermark, NULL, 0);
804 
805 static const struct attribute *bmc150_accel_fifo_attributes[] = {
806 	&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
807 	&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
808 	&iio_dev_attr_hwfifo_watermark.dev_attr.attr,
809 	&iio_dev_attr_hwfifo_enabled.dev_attr.attr,
810 	NULL,
811 };
812 
813 static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
814 {
815 	struct bmc150_accel_data *data = iio_priv(indio_dev);
816 
817 	if (val > BMC150_ACCEL_FIFO_LENGTH)
818 		val = BMC150_ACCEL_FIFO_LENGTH;
819 
820 	mutex_lock(&data->mutex);
821 	data->watermark = val;
822 	mutex_unlock(&data->mutex);
823 
824 	return 0;
825 }
826 
827 /*
828  * We must read at least one full frame in one burst, otherwise the rest of the
829  * frame data is discarded.
830  */
831 static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
832 				      char *buffer, int samples)
833 {
834 	int sample_length = 3 * 2;
835 	int ret;
836 	int total_length = samples * sample_length;
837 	int i;
838 	size_t step = regmap_get_raw_read_max(data->regmap);
839 
840 	if (!step || step > total_length)
841 		step = total_length;
842 	else if (step < total_length)
843 		step = sample_length;
844 
845 	/*
846 	 * Seems we have a bus with size limitation so we have to execute
847 	 * multiple reads
848 	 */
849 	for (i = 0; i < total_length; i += step) {
850 		ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
851 				      &buffer[i], step);
852 		if (ret)
853 			break;
854 	}
855 
856 	if (ret)
857 		dev_err(data->dev, "Error transferring data from fifo in single steps of %zu\n",
858 			step);
859 
860 	return ret;
861 }
862 
863 static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
864 				     unsigned samples, bool irq)
865 {
866 	struct bmc150_accel_data *data = iio_priv(indio_dev);
867 	int ret, i;
868 	u8 count;
869 	u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
870 	int64_t tstamp;
871 	uint64_t sample_period;
872 	unsigned int val;
873 
874 	ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
875 	if (ret < 0) {
876 		dev_err(data->dev, "Error reading reg_fifo_status\n");
877 		return ret;
878 	}
879 
880 	count = val & 0x7F;
881 
882 	if (!count)
883 		return 0;
884 
885 	/*
886 	 * If we getting called from IRQ handler we know the stored timestamp is
887 	 * fairly accurate for the last stored sample. Otherwise, if we are
888 	 * called as a result of a read operation from userspace and hence
889 	 * before the watermark interrupt was triggered, take a timestamp
890 	 * now. We can fall anywhere in between two samples so the error in this
891 	 * case is at most one sample period.
892 	 */
893 	if (!irq) {
894 		data->old_timestamp = data->timestamp;
895 		data->timestamp = iio_get_time_ns();
896 	}
897 
898 	/*
899 	 * Approximate timestamps for each of the sample based on the sampling
900 	 * frequency, timestamp for last sample and number of samples.
901 	 *
902 	 * Note that we can't use the current bandwidth settings to compute the
903 	 * sample period because the sample rate varies with the device
904 	 * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That
905 	 * small variation adds when we store a large number of samples and
906 	 * creates significant jitter between the last and first samples in
907 	 * different batches (e.g. 32ms vs 21ms).
908 	 *
909 	 * To avoid this issue we compute the actual sample period ourselves
910 	 * based on the timestamp delta between the last two flush operations.
911 	 */
912 	sample_period = (data->timestamp - data->old_timestamp);
913 	do_div(sample_period, count);
914 	tstamp = data->timestamp - (count - 1) * sample_period;
915 
916 	if (samples && count > samples)
917 		count = samples;
918 
919 	ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count);
920 	if (ret)
921 		return ret;
922 
923 	/*
924 	 * Ideally we want the IIO core to handle the demux when running in fifo
925 	 * mode but not when running in triggered buffer mode. Unfortunately
926 	 * this does not seem to be possible, so stick with driver demux for
927 	 * now.
928 	 */
929 	for (i = 0; i < count; i++) {
930 		u16 sample[8];
931 		int j, bit;
932 
933 		j = 0;
934 		for_each_set_bit(bit, indio_dev->active_scan_mask,
935 				 indio_dev->masklength)
936 			memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
937 
938 		iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
939 
940 		tstamp += sample_period;
941 	}
942 
943 	return count;
944 }
945 
946 static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples)
947 {
948 	struct bmc150_accel_data *data = iio_priv(indio_dev);
949 	int ret;
950 
951 	mutex_lock(&data->mutex);
952 	ret = __bmc150_accel_fifo_flush(indio_dev, samples, false);
953 	mutex_unlock(&data->mutex);
954 
955 	return ret;
956 }
957 
958 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
959 		"15.620000 31.260000 62.50000 125 250 500 1000 2000");
960 
961 static struct attribute *bmc150_accel_attributes[] = {
962 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
963 	NULL,
964 };
965 
966 static const struct attribute_group bmc150_accel_attrs_group = {
967 	.attrs = bmc150_accel_attributes,
968 };
969 
970 static const struct iio_event_spec bmc150_accel_event = {
971 		.type = IIO_EV_TYPE_ROC,
972 		.dir = IIO_EV_DIR_EITHER,
973 		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
974 				 BIT(IIO_EV_INFO_ENABLE) |
975 				 BIT(IIO_EV_INFO_PERIOD)
976 };
977 
978 #define BMC150_ACCEL_CHANNEL(_axis, bits) {				\
979 	.type = IIO_ACCEL,						\
980 	.modified = 1,							\
981 	.channel2 = IIO_MOD_##_axis,					\
982 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
983 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |		\
984 				BIT(IIO_CHAN_INFO_SAMP_FREQ),		\
985 	.scan_index = AXIS_##_axis,					\
986 	.scan_type = {							\
987 		.sign = 's',						\
988 		.realbits = (bits),					\
989 		.storagebits = 16,					\
990 		.shift = 16 - (bits),					\
991 	},								\
992 	.event_spec = &bmc150_accel_event,				\
993 	.num_event_specs = 1						\
994 }
995 
996 #define BMC150_ACCEL_CHANNELS(bits) {					\
997 	{								\
998 		.type = IIO_TEMP,					\
999 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
1000 				      BIT(IIO_CHAN_INFO_SCALE) |	\
1001 				      BIT(IIO_CHAN_INFO_OFFSET),	\
1002 		.scan_index = -1,					\
1003 	},								\
1004 	BMC150_ACCEL_CHANNEL(X, bits),					\
1005 	BMC150_ACCEL_CHANNEL(Y, bits),					\
1006 	BMC150_ACCEL_CHANNEL(Z, bits),					\
1007 	IIO_CHAN_SOFT_TIMESTAMP(3),					\
1008 }
1009 
1010 static const struct iio_chan_spec bma222e_accel_channels[] =
1011 	BMC150_ACCEL_CHANNELS(8);
1012 static const struct iio_chan_spec bma250e_accel_channels[] =
1013 	BMC150_ACCEL_CHANNELS(10);
1014 static const struct iio_chan_spec bmc150_accel_channels[] =
1015 	BMC150_ACCEL_CHANNELS(12);
1016 static const struct iio_chan_spec bma280_accel_channels[] =
1017 	BMC150_ACCEL_CHANNELS(14);
1018 
1019 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
1020 	[bmc150] = {
1021 		.name = "BMC150A",
1022 		.chip_id = 0xFA,
1023 		.channels = bmc150_accel_channels,
1024 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
1025 		.scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
1026 				 {19122, BMC150_ACCEL_DEF_RANGE_4G},
1027 				 {38344, BMC150_ACCEL_DEF_RANGE_8G},
1028 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
1029 	},
1030 	[bmi055] = {
1031 		.name = "BMI055A",
1032 		.chip_id = 0xFA,
1033 		.channels = bmc150_accel_channels,
1034 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
1035 		.scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
1036 				 {19122, BMC150_ACCEL_DEF_RANGE_4G},
1037 				 {38344, BMC150_ACCEL_DEF_RANGE_8G},
1038 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
1039 	},
1040 	[bma255] = {
1041 		.name = "BMA0255",
1042 		.chip_id = 0xFA,
1043 		.channels = bmc150_accel_channels,
1044 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
1045 		.scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
1046 				 {19122, BMC150_ACCEL_DEF_RANGE_4G},
1047 				 {38344, BMC150_ACCEL_DEF_RANGE_8G},
1048 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
1049 	},
1050 	[bma250e] = {
1051 		.name = "BMA250E",
1052 		.chip_id = 0xF9,
1053 		.channels = bma250e_accel_channels,
1054 		.num_channels = ARRAY_SIZE(bma250e_accel_channels),
1055 		.scale_table = { {38344, BMC150_ACCEL_DEF_RANGE_2G},
1056 				 {76590, BMC150_ACCEL_DEF_RANGE_4G},
1057 				 {153277, BMC150_ACCEL_DEF_RANGE_8G},
1058 				 {306457, BMC150_ACCEL_DEF_RANGE_16G} },
1059 	},
1060 	[bma222e] = {
1061 		.name = "BMA222E",
1062 		.chip_id = 0xF8,
1063 		.channels = bma222e_accel_channels,
1064 		.num_channels = ARRAY_SIZE(bma222e_accel_channels),
1065 		.scale_table = { {153277, BMC150_ACCEL_DEF_RANGE_2G},
1066 				 {306457, BMC150_ACCEL_DEF_RANGE_4G},
1067 				 {612915, BMC150_ACCEL_DEF_RANGE_8G},
1068 				 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
1069 	},
1070 	[bma280] = {
1071 		.name = "BMA0280",
1072 		.chip_id = 0xFB,
1073 		.channels = bma280_accel_channels,
1074 		.num_channels = ARRAY_SIZE(bma280_accel_channels),
1075 		.scale_table = { {2392, BMC150_ACCEL_DEF_RANGE_2G},
1076 				 {4785, BMC150_ACCEL_DEF_RANGE_4G},
1077 				 {9581, BMC150_ACCEL_DEF_RANGE_8G},
1078 				 {19152, BMC150_ACCEL_DEF_RANGE_16G} },
1079 	},
1080 };
1081 
1082 static const struct iio_info bmc150_accel_info = {
1083 	.attrs			= &bmc150_accel_attrs_group,
1084 	.read_raw		= bmc150_accel_read_raw,
1085 	.write_raw		= bmc150_accel_write_raw,
1086 	.read_event_value	= bmc150_accel_read_event,
1087 	.write_event_value	= bmc150_accel_write_event,
1088 	.write_event_config	= bmc150_accel_write_event_config,
1089 	.read_event_config	= bmc150_accel_read_event_config,
1090 	.driver_module		= THIS_MODULE,
1091 };
1092 
1093 static const struct iio_info bmc150_accel_info_fifo = {
1094 	.attrs			= &bmc150_accel_attrs_group,
1095 	.read_raw		= bmc150_accel_read_raw,
1096 	.write_raw		= bmc150_accel_write_raw,
1097 	.read_event_value	= bmc150_accel_read_event,
1098 	.write_event_value	= bmc150_accel_write_event,
1099 	.write_event_config	= bmc150_accel_write_event_config,
1100 	.read_event_config	= bmc150_accel_read_event_config,
1101 	.validate_trigger	= bmc150_accel_validate_trigger,
1102 	.hwfifo_set_watermark	= bmc150_accel_set_watermark,
1103 	.hwfifo_flush_to_buffer	= bmc150_accel_fifo_flush,
1104 	.driver_module		= THIS_MODULE,
1105 };
1106 
1107 static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
1108 {
1109 	struct iio_poll_func *pf = p;
1110 	struct iio_dev *indio_dev = pf->indio_dev;
1111 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1112 	int bit, ret, i = 0;
1113 	unsigned int raw_val;
1114 
1115 	mutex_lock(&data->mutex);
1116 	for_each_set_bit(bit, indio_dev->active_scan_mask,
1117 			 indio_dev->masklength) {
1118 		ret = regmap_bulk_read(data->regmap,
1119 				       BMC150_ACCEL_AXIS_TO_REG(bit), &raw_val,
1120 				       2);
1121 		if (ret < 0) {
1122 			mutex_unlock(&data->mutex);
1123 			goto err_read;
1124 		}
1125 		data->buffer[i++] = raw_val;
1126 	}
1127 	mutex_unlock(&data->mutex);
1128 
1129 	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
1130 					   pf->timestamp);
1131 err_read:
1132 	iio_trigger_notify_done(indio_dev->trig);
1133 
1134 	return IRQ_HANDLED;
1135 }
1136 
1137 static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
1138 {
1139 	struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1140 	struct bmc150_accel_data *data = t->data;
1141 	int ret;
1142 
1143 	/* new data interrupts don't need ack */
1144 	if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY])
1145 		return 0;
1146 
1147 	mutex_lock(&data->mutex);
1148 	/* clear any latched interrupt */
1149 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1150 			   BMC150_ACCEL_INT_MODE_LATCH_INT |
1151 			   BMC150_ACCEL_INT_MODE_LATCH_RESET);
1152 	mutex_unlock(&data->mutex);
1153 	if (ret < 0) {
1154 		dev_err(data->dev,
1155 			"Error writing reg_int_rst_latch\n");
1156 		return ret;
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
1163 					  bool state)
1164 {
1165 	struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
1166 	struct bmc150_accel_data *data = t->data;
1167 	int ret;
1168 
1169 	mutex_lock(&data->mutex);
1170 
1171 	if (t->enabled == state) {
1172 		mutex_unlock(&data->mutex);
1173 		return 0;
1174 	}
1175 
1176 	if (t->setup) {
1177 		ret = t->setup(t, state);
1178 		if (ret < 0) {
1179 			mutex_unlock(&data->mutex);
1180 			return ret;
1181 		}
1182 	}
1183 
1184 	ret = bmc150_accel_set_interrupt(data, t->intr, state);
1185 	if (ret < 0) {
1186 		mutex_unlock(&data->mutex);
1187 		return ret;
1188 	}
1189 
1190 	t->enabled = state;
1191 
1192 	mutex_unlock(&data->mutex);
1193 
1194 	return ret;
1195 }
1196 
1197 static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
1198 	.set_trigger_state = bmc150_accel_trigger_set_state,
1199 	.try_reenable = bmc150_accel_trig_try_reen,
1200 	.owner = THIS_MODULE,
1201 };
1202 
1203 static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
1204 {
1205 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1206 	int dir;
1207 	int ret;
1208 	unsigned int val;
1209 
1210 	ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
1211 	if (ret < 0) {
1212 		dev_err(data->dev, "Error reading reg_int_status_2\n");
1213 		return ret;
1214 	}
1215 
1216 	if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN)
1217 		dir = IIO_EV_DIR_FALLING;
1218 	else
1219 		dir = IIO_EV_DIR_RISING;
1220 
1221 	if (val & BMC150_ACCEL_ANY_MOTION_BIT_X)
1222 		iio_push_event(indio_dev,
1223 			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
1224 						  0,
1225 						  IIO_MOD_X,
1226 						  IIO_EV_TYPE_ROC,
1227 						  dir),
1228 			       data->timestamp);
1229 
1230 	if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1231 		iio_push_event(indio_dev,
1232 			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
1233 						  0,
1234 						  IIO_MOD_Y,
1235 						  IIO_EV_TYPE_ROC,
1236 						  dir),
1237 			       data->timestamp);
1238 
1239 	if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1240 		iio_push_event(indio_dev,
1241 			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
1242 						  0,
1243 						  IIO_MOD_Z,
1244 						  IIO_EV_TYPE_ROC,
1245 						  dir),
1246 			       data->timestamp);
1247 
1248 	return ret;
1249 }
1250 
1251 static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
1252 {
1253 	struct iio_dev *indio_dev = private;
1254 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1255 	bool ack = false;
1256 	int ret;
1257 
1258 	mutex_lock(&data->mutex);
1259 
1260 	if (data->fifo_mode) {
1261 		ret = __bmc150_accel_fifo_flush(indio_dev,
1262 						BMC150_ACCEL_FIFO_LENGTH, true);
1263 		if (ret > 0)
1264 			ack = true;
1265 	}
1266 
1267 	if (data->ev_enable_state) {
1268 		ret = bmc150_accel_handle_roc_event(indio_dev);
1269 		if (ret > 0)
1270 			ack = true;
1271 	}
1272 
1273 	if (ack) {
1274 		ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1275 				   BMC150_ACCEL_INT_MODE_LATCH_INT |
1276 				   BMC150_ACCEL_INT_MODE_LATCH_RESET);
1277 		if (ret)
1278 			dev_err(data->dev, "Error writing reg_int_rst_latch\n");
1279 
1280 		ret = IRQ_HANDLED;
1281 	} else {
1282 		ret = IRQ_NONE;
1283 	}
1284 
1285 	mutex_unlock(&data->mutex);
1286 
1287 	return ret;
1288 }
1289 
1290 static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
1291 {
1292 	struct iio_dev *indio_dev = private;
1293 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1294 	bool ack = false;
1295 	int i;
1296 
1297 	data->old_timestamp = data->timestamp;
1298 	data->timestamp = iio_get_time_ns();
1299 
1300 	for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1301 		if (data->triggers[i].enabled) {
1302 			iio_trigger_poll(data->triggers[i].indio_trig);
1303 			ack = true;
1304 			break;
1305 		}
1306 	}
1307 
1308 	if (data->ev_enable_state || data->fifo_mode)
1309 		return IRQ_WAKE_THREAD;
1310 
1311 	if (ack)
1312 		return IRQ_HANDLED;
1313 
1314 	return IRQ_NONE;
1315 }
1316 
1317 static const struct {
1318 	int intr;
1319 	const char *name;
1320 	int (*setup)(struct bmc150_accel_trigger *t, bool state);
1321 } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = {
1322 	{
1323 		.intr = 0,
1324 		.name = "%s-dev%d",
1325 	},
1326 	{
1327 		.intr = 1,
1328 		.name = "%s-any-motion-dev%d",
1329 		.setup = bmc150_accel_any_motion_setup,
1330 	},
1331 };
1332 
1333 static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1334 					     int from)
1335 {
1336 	int i;
1337 
1338 	for (i = from; i >= 0; i--) {
1339 		if (data->triggers[i].indio_trig) {
1340 			iio_trigger_unregister(data->triggers[i].indio_trig);
1341 			data->triggers[i].indio_trig = NULL;
1342 		}
1343 	}
1344 }
1345 
1346 static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
1347 				       struct bmc150_accel_data *data)
1348 {
1349 	int i, ret;
1350 
1351 	for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
1352 		struct bmc150_accel_trigger *t = &data->triggers[i];
1353 
1354 		t->indio_trig = devm_iio_trigger_alloc(data->dev,
1355 					       bmc150_accel_triggers[i].name,
1356 						       indio_dev->name,
1357 						       indio_dev->id);
1358 		if (!t->indio_trig) {
1359 			ret = -ENOMEM;
1360 			break;
1361 		}
1362 
1363 		t->indio_trig->dev.parent = data->dev;
1364 		t->indio_trig->ops = &bmc150_accel_trigger_ops;
1365 		t->intr = bmc150_accel_triggers[i].intr;
1366 		t->data = data;
1367 		t->setup = bmc150_accel_triggers[i].setup;
1368 		iio_trigger_set_drvdata(t->indio_trig, t);
1369 
1370 		ret = iio_trigger_register(t->indio_trig);
1371 		if (ret)
1372 			break;
1373 	}
1374 
1375 	if (ret)
1376 		bmc150_accel_unregister_triggers(data, i - 1);
1377 
1378 	return ret;
1379 }
1380 
1381 #define BMC150_ACCEL_FIFO_MODE_STREAM          0x80
1382 #define BMC150_ACCEL_FIFO_MODE_FIFO            0x40
1383 #define BMC150_ACCEL_FIFO_MODE_BYPASS          0x00
1384 
1385 static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
1386 {
1387 	u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
1388 	int ret;
1389 
1390 	ret = regmap_write(data->regmap, reg, data->fifo_mode);
1391 	if (ret < 0) {
1392 		dev_err(data->dev, "Error writing reg_fifo_config1\n");
1393 		return ret;
1394 	}
1395 
1396 	if (!data->fifo_mode)
1397 		return 0;
1398 
1399 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
1400 			   data->watermark);
1401 	if (ret < 0)
1402 		dev_err(data->dev, "Error writing reg_fifo_config0\n");
1403 
1404 	return ret;
1405 }
1406 
1407 static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev)
1408 {
1409 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1410 
1411 	return bmc150_accel_set_power_state(data, true);
1412 }
1413 
1414 static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
1415 {
1416 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1417 	int ret = 0;
1418 
1419 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
1420 		return iio_triggered_buffer_postenable(indio_dev);
1421 
1422 	mutex_lock(&data->mutex);
1423 
1424 	if (!data->watermark)
1425 		goto out;
1426 
1427 	ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1428 					 true);
1429 	if (ret)
1430 		goto out;
1431 
1432 	data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO;
1433 
1434 	ret = bmc150_accel_fifo_set_mode(data);
1435 	if (ret) {
1436 		data->fifo_mode = 0;
1437 		bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
1438 					   false);
1439 	}
1440 
1441 out:
1442 	mutex_unlock(&data->mutex);
1443 
1444 	return ret;
1445 }
1446 
1447 static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
1448 {
1449 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1450 
1451 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
1452 		return iio_triggered_buffer_predisable(indio_dev);
1453 
1454 	mutex_lock(&data->mutex);
1455 
1456 	if (!data->fifo_mode)
1457 		goto out;
1458 
1459 	bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false);
1460 	__bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false);
1461 	data->fifo_mode = 0;
1462 	bmc150_accel_fifo_set_mode(data);
1463 
1464 out:
1465 	mutex_unlock(&data->mutex);
1466 
1467 	return 0;
1468 }
1469 
1470 static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev)
1471 {
1472 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1473 
1474 	return bmc150_accel_set_power_state(data, false);
1475 }
1476 
1477 static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
1478 	.preenable = bmc150_accel_buffer_preenable,
1479 	.postenable = bmc150_accel_buffer_postenable,
1480 	.predisable = bmc150_accel_buffer_predisable,
1481 	.postdisable = bmc150_accel_buffer_postdisable,
1482 };
1483 
1484 static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1485 {
1486 	int ret, i;
1487 	unsigned int val;
1488 
1489 	ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
1490 	if (ret < 0) {
1491 		dev_err(data->dev,
1492 			"Error: Reading chip id\n");
1493 		return ret;
1494 	}
1495 
1496 	dev_dbg(data->dev, "Chip Id %x\n", val);
1497 	for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
1498 		if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
1499 			data->chip_info = &bmc150_accel_chip_info_tbl[i];
1500 			break;
1501 		}
1502 	}
1503 
1504 	if (!data->chip_info) {
1505 		dev_err(data->dev, "Invalid chip %x\n", val);
1506 		return -ENODEV;
1507 	}
1508 
1509 	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1510 	if (ret < 0)
1511 		return ret;
1512 
1513 	/* Set Bandwidth */
1514 	ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
1515 	if (ret < 0)
1516 		return ret;
1517 
1518 	/* Set Default Range */
1519 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
1520 			   BMC150_ACCEL_DEF_RANGE_4G);
1521 	if (ret < 0) {
1522 		dev_err(data->dev,
1523 					"Error writing reg_pmu_range\n");
1524 		return ret;
1525 	}
1526 
1527 	data->range = BMC150_ACCEL_DEF_RANGE_4G;
1528 
1529 	/* Set default slope duration and thresholds */
1530 	data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
1531 	data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
1532 	ret = bmc150_accel_update_slope(data);
1533 	if (ret < 0)
1534 		return ret;
1535 
1536 	/* Set default as latched interrupts */
1537 	ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1538 			   BMC150_ACCEL_INT_MODE_LATCH_INT |
1539 			   BMC150_ACCEL_INT_MODE_LATCH_RESET);
1540 	if (ret < 0) {
1541 		dev_err(data->dev,
1542 			"Error writing reg_int_rst_latch\n");
1543 		return ret;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1550 			    const char *name, bool block_supported)
1551 {
1552 	struct bmc150_accel_data *data;
1553 	struct iio_dev *indio_dev;
1554 	int ret;
1555 
1556 	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
1557 	if (!indio_dev)
1558 		return -ENOMEM;
1559 
1560 	data = iio_priv(indio_dev);
1561 	dev_set_drvdata(dev, indio_dev);
1562 	data->dev = dev;
1563 	data->irq = irq;
1564 
1565 	data->regmap = regmap;
1566 
1567 	ret = bmc150_accel_chip_init(data);
1568 	if (ret < 0)
1569 		return ret;
1570 
1571 	mutex_init(&data->mutex);
1572 
1573 	indio_dev->dev.parent = dev;
1574 	indio_dev->channels = data->chip_info->channels;
1575 	indio_dev->num_channels = data->chip_info->num_channels;
1576 	indio_dev->name = name ? name : data->chip_info->name;
1577 	indio_dev->modes = INDIO_DIRECT_MODE;
1578 	indio_dev->info = &bmc150_accel_info;
1579 
1580 	ret = iio_triggered_buffer_setup(indio_dev,
1581 					 &iio_pollfunc_store_time,
1582 					 bmc150_accel_trigger_handler,
1583 					 &bmc150_accel_buffer_ops);
1584 	if (ret < 0) {
1585 		dev_err(data->dev, "Failed: iio triggered buffer setup\n");
1586 		return ret;
1587 	}
1588 
1589 	if (data->irq > 0) {
1590 		ret = devm_request_threaded_irq(
1591 						data->dev, data->irq,
1592 						bmc150_accel_irq_handler,
1593 						bmc150_accel_irq_thread_handler,
1594 						IRQF_TRIGGER_RISING,
1595 						BMC150_ACCEL_IRQ_NAME,
1596 						indio_dev);
1597 		if (ret)
1598 			goto err_buffer_cleanup;
1599 
1600 		/*
1601 		 * Set latched mode interrupt. While certain interrupts are
1602 		 * non-latched regardless of this settings (e.g. new data) we
1603 		 * want to use latch mode when we can to prevent interrupt
1604 		 * flooding.
1605 		 */
1606 		ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
1607 				   BMC150_ACCEL_INT_MODE_LATCH_RESET);
1608 		if (ret < 0) {
1609 			dev_err(data->dev, "Error writing reg_int_rst_latch\n");
1610 			goto err_buffer_cleanup;
1611 		}
1612 
1613 		bmc150_accel_interrupts_setup(indio_dev, data);
1614 
1615 		ret = bmc150_accel_triggers_setup(indio_dev, data);
1616 		if (ret)
1617 			goto err_buffer_cleanup;
1618 
1619 		if (block_supported) {
1620 			indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
1621 			indio_dev->info = &bmc150_accel_info_fifo;
1622 			indio_dev->buffer->attrs = bmc150_accel_fifo_attributes;
1623 		}
1624 	}
1625 
1626 	ret = iio_device_register(indio_dev);
1627 	if (ret < 0) {
1628 		dev_err(dev, "Unable to register iio device\n");
1629 		goto err_trigger_unregister;
1630 	}
1631 
1632 	ret = pm_runtime_set_active(dev);
1633 	if (ret)
1634 		goto err_iio_unregister;
1635 
1636 	pm_runtime_enable(dev);
1637 	pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS);
1638 	pm_runtime_use_autosuspend(dev);
1639 
1640 	return 0;
1641 
1642 err_iio_unregister:
1643 	iio_device_unregister(indio_dev);
1644 err_trigger_unregister:
1645 	bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1646 err_buffer_cleanup:
1647 	iio_triggered_buffer_cleanup(indio_dev);
1648 
1649 	return ret;
1650 }
1651 EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
1652 
1653 int bmc150_accel_core_remove(struct device *dev)
1654 {
1655 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
1656 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1657 
1658 	pm_runtime_disable(data->dev);
1659 	pm_runtime_set_suspended(data->dev);
1660 	pm_runtime_put_noidle(data->dev);
1661 
1662 	iio_device_unregister(indio_dev);
1663 
1664 	bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1665 
1666 	iio_triggered_buffer_cleanup(indio_dev);
1667 
1668 	mutex_lock(&data->mutex);
1669 	bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0);
1670 	mutex_unlock(&data->mutex);
1671 
1672 	return 0;
1673 }
1674 EXPORT_SYMBOL_GPL(bmc150_accel_core_remove);
1675 
1676 #ifdef CONFIG_PM_SLEEP
1677 static int bmc150_accel_suspend(struct device *dev)
1678 {
1679 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
1680 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1681 
1682 	mutex_lock(&data->mutex);
1683 	bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1684 	mutex_unlock(&data->mutex);
1685 
1686 	return 0;
1687 }
1688 
1689 static int bmc150_accel_resume(struct device *dev)
1690 {
1691 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
1692 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1693 
1694 	mutex_lock(&data->mutex);
1695 	if (atomic_read(&data->active_intr))
1696 		bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1697 	bmc150_accel_fifo_set_mode(data);
1698 	mutex_unlock(&data->mutex);
1699 
1700 	return 0;
1701 }
1702 #endif
1703 
1704 #ifdef CONFIG_PM
1705 static int bmc150_accel_runtime_suspend(struct device *dev)
1706 {
1707 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
1708 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1709 	int ret;
1710 
1711 	dev_dbg(data->dev,  __func__);
1712 	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1713 	if (ret < 0)
1714 		return -EAGAIN;
1715 
1716 	return 0;
1717 }
1718 
1719 static int bmc150_accel_runtime_resume(struct device *dev)
1720 {
1721 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
1722 	struct bmc150_accel_data *data = iio_priv(indio_dev);
1723 	int ret;
1724 	int sleep_val;
1725 
1726 	dev_dbg(data->dev,  __func__);
1727 
1728 	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1729 	if (ret < 0)
1730 		return ret;
1731 	ret = bmc150_accel_fifo_set_mode(data);
1732 	if (ret < 0)
1733 		return ret;
1734 
1735 	sleep_val = bmc150_accel_get_startup_times(data);
1736 	if (sleep_val < 20)
1737 		usleep_range(sleep_val * 1000, 20000);
1738 	else
1739 		msleep_interruptible(sleep_val);
1740 
1741 	return 0;
1742 }
1743 #endif
1744 
1745 const struct dev_pm_ops bmc150_accel_pm_ops = {
1746 	SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume)
1747 	SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend,
1748 			   bmc150_accel_runtime_resume, NULL)
1749 };
1750 EXPORT_SYMBOL_GPL(bmc150_accel_pm_ops);
1751 
1752 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
1753 MODULE_LICENSE("GPL v2");
1754 MODULE_DESCRIPTION("BMC150 accelerometer driver");
1755