xref: /linux/drivers/platform/mellanox/mlxreg-hotplug.c (revision 3503d56cc7233ced602e38a4c13caa64f00ab2aa)
1 /*
2  * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the names of the copyright holders nor the names of its
14  *    contributors may be used to endorse or promote products derived from
15  *    this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2 as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
47 
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF	1
50 #define MLXREG_HOTPLUG_MASK_OFF		2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF	1
52 
53 /* ASIC good health mask. */
54 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK	0x02
55 
56 #define MLXREG_HOTPLUG_ATTRS_MAX	24
57 #define MLXREG_HOTPLUG_NOT_ASSERT	3
58 
59 /**
60  * struct mlxreg_hotplug_priv_data - platform private data:
61  * @irq: platform device interrupt number;
62  * @dev: basic device;
63  * @pdev: platform device;
64  * @plat: platform data;
65  * @regmap: register map handle;
66  * @dwork_irq: delayed work template;
67  * @lock: spin lock;
68  * @hwmon: hwmon device;
69  * @mlxreg_hotplug_attr: sysfs attributes array;
70  * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
71  * @group: sysfs attribute group;
72  * @groups: list of sysfs attribute group for hwmon registration;
73  * @cell: location of top aggregation interrupt register;
74  * @mask: top aggregation interrupt common mask;
75  * @aggr_cache: last value of aggregation register status;
76  * @after_probe: flag indication probing completion;
77  * @not_asserted: number of entries in workqueue with no signal assertion;
78  */
79 struct mlxreg_hotplug_priv_data {
80 	int irq;
81 	struct device *dev;
82 	struct platform_device *pdev;
83 	struct mlxreg_hotplug_platform_data *plat;
84 	struct regmap *regmap;
85 	struct delayed_work dwork_irq;
86 	spinlock_t lock; /* sync with interrupt */
87 	struct device *hwmon;
88 	struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
89 	struct sensor_device_attribute_2
90 			mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
91 	struct attribute_group group;
92 	const struct attribute_group *groups[2];
93 	u32 cell;
94 	u32 mask;
95 	u32 aggr_cache;
96 	bool after_probe;
97 	u8 not_asserted;
98 };
99 
100 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
101 					struct mlxreg_core_data *data)
102 {
103 	struct mlxreg_core_hotplug_platform_data *pdata;
104 	struct i2c_client *client;
105 
106 	/* Notify user by sending hwmon uevent. */
107 	kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
108 
109 	/*
110 	 * Return if adapter number is negative. It could be in case hotplug
111 	 * event is not associated with hotplug device.
112 	 */
113 	if (data->hpdev.nr < 0)
114 		return 0;
115 
116 	pdata = dev_get_platdata(&priv->pdev->dev);
117 	data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
118 					      pdata->shift_nr);
119 	if (!data->hpdev.adapter) {
120 		dev_err(priv->dev, "Failed to get adapter for bus %d\n",
121 			data->hpdev.nr + pdata->shift_nr);
122 		return -EFAULT;
123 	}
124 
125 	client = i2c_new_client_device(data->hpdev.adapter,
126 				       data->hpdev.brdinfo);
127 	if (IS_ERR(client)) {
128 		dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
129 			data->hpdev.brdinfo->type, data->hpdev.nr +
130 			pdata->shift_nr, data->hpdev.brdinfo->addr);
131 
132 		i2c_put_adapter(data->hpdev.adapter);
133 		data->hpdev.adapter = NULL;
134 		return PTR_ERR(client);
135 	}
136 
137 	data->hpdev.client = client;
138 
139 	return 0;
140 }
141 
142 static void
143 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
144 			      struct mlxreg_core_data *data)
145 {
146 	/* Notify user by sending hwmon uevent. */
147 	kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
148 
149 	if (data->hpdev.client) {
150 		i2c_unregister_device(data->hpdev.client);
151 		data->hpdev.client = NULL;
152 	}
153 
154 	if (data->hpdev.adapter) {
155 		i2c_put_adapter(data->hpdev.adapter);
156 		data->hpdev.adapter = NULL;
157 	}
158 }
159 
160 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
161 					struct device_attribute *attr,
162 					char *buf)
163 {
164 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
165 	struct mlxreg_core_hotplug_platform_data *pdata;
166 	int index = to_sensor_dev_attr_2(attr)->index;
167 	int nr = to_sensor_dev_attr_2(attr)->nr;
168 	struct mlxreg_core_item *item;
169 	struct mlxreg_core_data *data;
170 	u32 regval;
171 	int ret;
172 
173 	pdata = dev_get_platdata(&priv->pdev->dev);
174 	item = pdata->items + nr;
175 	data = item->data + index;
176 
177 	ret = regmap_read(priv->regmap, data->reg, &regval);
178 	if (ret)
179 		return ret;
180 
181 	if (item->health) {
182 		regval &= data->mask;
183 	} else {
184 		/* Bit = 0 : functional if item->inversed is true. */
185 		if (item->inversed)
186 			regval = !(regval & data->mask);
187 		else
188 			regval = !!(regval & data->mask);
189 	}
190 
191 	return sprintf(buf, "%u\n", regval);
192 }
193 
194 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
195 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
196 
197 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
198 {
199 	struct mlxreg_core_hotplug_platform_data *pdata;
200 	struct mlxreg_core_item *item;
201 	struct mlxreg_core_data *data;
202 	int num_attrs = 0, id = 0, i, j;
203 
204 	pdata = dev_get_platdata(&priv->pdev->dev);
205 	item = pdata->items;
206 
207 	/* Go over all kinds of items - psu, pwr, fan. */
208 	for (i = 0; i < pdata->counter; i++, item++) {
209 		num_attrs += item->count;
210 		data = item->data;
211 		/* Go over all units within the item. */
212 		for (j = 0; j < item->count; j++, data++, id++) {
213 			PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
214 			PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
215 							     GFP_KERNEL,
216 							     data->label);
217 
218 			if (!PRIV_ATTR(id)->name) {
219 				dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
220 					id);
221 				return -ENOMEM;
222 			}
223 
224 			PRIV_DEV_ATTR(id).dev_attr.attr.name =
225 							PRIV_ATTR(id)->name;
226 			PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
227 			PRIV_DEV_ATTR(id).dev_attr.show =
228 						mlxreg_hotplug_attr_show;
229 			PRIV_DEV_ATTR(id).nr = i;
230 			PRIV_DEV_ATTR(id).index = j;
231 			sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
232 		}
233 	}
234 
235 	priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
236 					 num_attrs,
237 					 sizeof(struct attribute *),
238 					 GFP_KERNEL);
239 	if (!priv->group.attrs)
240 		return -ENOMEM;
241 
242 	priv->group.attrs = priv->mlxreg_hotplug_attr;
243 	priv->groups[0] = &priv->group;
244 	priv->groups[1] = NULL;
245 
246 	return 0;
247 }
248 
249 static void
250 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
251 			   struct mlxreg_core_item *item)
252 {
253 	struct mlxreg_core_data *data;
254 	unsigned long asserted;
255 	u32 regval, bit;
256 	int ret;
257 
258 	/*
259 	 * Validate if item related to received signal type is valid.
260 	 * It should never happen, excepted the situation when some
261 	 * piece of hardware is broken. In such situation just produce
262 	 * error message and return. Caller must continue to handle the
263 	 * signals from other devices if any.
264 	 */
265 	if (unlikely(!item)) {
266 		dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
267 			item->reg, item->mask);
268 
269 		return;
270 	}
271 
272 	/* Mask event. */
273 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
274 			   0);
275 	if (ret)
276 		goto out;
277 
278 	/* Read status. */
279 	ret = regmap_read(priv->regmap, item->reg, &regval);
280 	if (ret)
281 		goto out;
282 
283 	/* Set asserted bits and save last status. */
284 	regval &= item->mask;
285 	asserted = item->cache ^ regval;
286 	item->cache = regval;
287 
288 	for_each_set_bit(bit, &asserted, 8) {
289 		data = item->data + bit;
290 		if (regval & BIT(bit)) {
291 			if (item->inversed)
292 				mlxreg_hotplug_device_destroy(priv, data);
293 			else
294 				mlxreg_hotplug_device_create(priv, data);
295 		} else {
296 			if (item->inversed)
297 				mlxreg_hotplug_device_create(priv, data);
298 			else
299 				mlxreg_hotplug_device_destroy(priv, data);
300 		}
301 	}
302 
303 	/* Acknowledge event. */
304 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
305 			   0);
306 	if (ret)
307 		goto out;
308 
309 	/* Unmask event. */
310 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
311 			   item->mask);
312 
313  out:
314 	if (ret)
315 		dev_err(priv->dev, "Failed to complete workqueue.\n");
316 }
317 
318 static void
319 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
320 				  struct mlxreg_core_item *item)
321 {
322 	struct mlxreg_core_data *data = item->data;
323 	u32 regval;
324 	int i, ret = 0;
325 
326 	for (i = 0; i < item->count; i++, data++) {
327 		/* Mask event. */
328 		ret = regmap_write(priv->regmap, data->reg +
329 				   MLXREG_HOTPLUG_MASK_OFF, 0);
330 		if (ret)
331 			goto out;
332 
333 		/* Read status. */
334 		ret = regmap_read(priv->regmap, data->reg, &regval);
335 		if (ret)
336 			goto out;
337 
338 		regval &= data->mask;
339 
340 		if (item->cache == regval)
341 			goto ack_event;
342 
343 		/*
344 		 * ASIC health indication is provided through two bits. Bits
345 		 * value 0x2 indicates that ASIC reached the good health, value
346 		 * 0x0 indicates ASIC the bad health or dormant state and value
347 		 * 0x3 indicates the booting state. During ASIC reset it should
348 		 * pass the following states: dormant -> booting -> good.
349 		 */
350 		if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
351 			if (!data->attached) {
352 				/*
353 				 * ASIC is in steady state. Connect associated
354 				 * device, if configured.
355 				 */
356 				mlxreg_hotplug_device_create(priv, data);
357 				data->attached = true;
358 			}
359 		} else {
360 			if (data->attached) {
361 				/*
362 				 * ASIC health is failed after ASIC has been
363 				 * in steady state. Disconnect associated
364 				 * device, if it has been connected.
365 				 */
366 				mlxreg_hotplug_device_destroy(priv, data);
367 				data->attached = false;
368 				data->health_cntr = 0;
369 			}
370 		}
371 		item->cache = regval;
372 ack_event:
373 		/* Acknowledge event. */
374 		ret = regmap_write(priv->regmap, data->reg +
375 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
376 		if (ret)
377 			goto out;
378 
379 		/* Unmask event. */
380 		ret = regmap_write(priv->regmap, data->reg +
381 				   MLXREG_HOTPLUG_MASK_OFF, data->mask);
382 		if (ret)
383 			goto out;
384 	}
385 
386  out:
387 	if (ret)
388 		dev_err(priv->dev, "Failed to complete workqueue.\n");
389 }
390 
391 /*
392  * mlxreg_hotplug_work_handler - performs traversing of device interrupt
393  * registers according to the below hierarchy schema:
394  *
395  *				Aggregation registers (status/mask)
396  * PSU registers:		*---*
397  * *-----------------*		|   |
398  * |status/event/mask|----->    | * |
399  * *-----------------*		|   |
400  * Power registers:		|   |
401  * *-----------------*		|   |
402  * |status/event/mask|----->    | * |
403  * *-----------------*		|   |
404  * FAN registers:		|   |--> CPU
405  * *-----------------*		|   |
406  * |status/event/mask|----->    | * |
407  * *-----------------*		|   |
408  * ASIC registers:		|   |
409  * *-----------------*		|   |
410  * |status/event/mask|----->    | * |
411  * *-----------------*		|   |
412  *				*---*
413  *
414  * In case some system changed are detected: FAN in/out, PSU in/out, power
415  * cable attached/detached, ASIC health good/bad, relevant device is created
416  * or destroyed.
417  */
418 static void mlxreg_hotplug_work_handler(struct work_struct *work)
419 {
420 	struct mlxreg_core_hotplug_platform_data *pdata;
421 	struct mlxreg_hotplug_priv_data *priv;
422 	struct mlxreg_core_item *item;
423 	u32 regval, aggr_asserted;
424 	unsigned long flags;
425 	int i, ret;
426 
427 	priv = container_of(work, struct mlxreg_hotplug_priv_data,
428 			    dwork_irq.work);
429 	pdata = dev_get_platdata(&priv->pdev->dev);
430 	item = pdata->items;
431 
432 	/* Mask aggregation event. */
433 	ret = regmap_write(priv->regmap, pdata->cell +
434 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
435 	if (ret < 0)
436 		goto out;
437 
438 	/* Read aggregation status. */
439 	ret = regmap_read(priv->regmap, pdata->cell, &regval);
440 	if (ret)
441 		goto out;
442 
443 	regval &= pdata->mask;
444 	aggr_asserted = priv->aggr_cache ^ regval;
445 	priv->aggr_cache = regval;
446 
447 	/*
448 	 * Handler is invoked, but no assertion is detected at top aggregation
449 	 * status level. Set aggr_asserted to mask value to allow handler extra
450 	 * run over all relevant signals to recover any missed signal.
451 	 */
452 	if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
453 		priv->not_asserted = 0;
454 		aggr_asserted = pdata->mask;
455 	}
456 	if (!aggr_asserted)
457 		goto unmask_event;
458 
459 	/* Handle topology and health configuration changes. */
460 	for (i = 0; i < pdata->counter; i++, item++) {
461 		if (aggr_asserted & item->aggr_mask) {
462 			if (item->health)
463 				mlxreg_hotplug_health_work_helper(priv, item);
464 			else
465 				mlxreg_hotplug_work_helper(priv, item);
466 		}
467 	}
468 
469 	spin_lock_irqsave(&priv->lock, flags);
470 
471 	/*
472 	 * It is possible, that some signals have been inserted, while
473 	 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
474 	 * case such signals will be missed. In order to handle these signals
475 	 * delayed work is canceled and work task re-scheduled for immediate
476 	 * execution. It allows to handle missed signals, if any. In other case
477 	 * work handler just validates that no new signals have been received
478 	 * during masking.
479 	 */
480 	cancel_delayed_work(&priv->dwork_irq);
481 	schedule_delayed_work(&priv->dwork_irq, 0);
482 
483 	spin_unlock_irqrestore(&priv->lock, flags);
484 
485 	return;
486 
487 unmask_event:
488 	priv->not_asserted++;
489 	/* Unmask aggregation event (no need acknowledge). */
490 	ret = regmap_write(priv->regmap, pdata->cell +
491 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
492 
493  out:
494 	if (ret)
495 		dev_err(priv->dev, "Failed to complete workqueue.\n");
496 }
497 
498 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
499 {
500 	struct mlxreg_core_hotplug_platform_data *pdata;
501 	struct mlxreg_core_item *item;
502 	struct mlxreg_core_data *data;
503 	u32 regval;
504 	int i, j, ret;
505 
506 	pdata = dev_get_platdata(&priv->pdev->dev);
507 	item = pdata->items;
508 
509 	for (i = 0; i < pdata->counter; i++, item++) {
510 		if (item->capability) {
511 			/*
512 			 * Read group capability register to get actual number
513 			 * of interrupt capable components and set group mask
514 			 * accordingly.
515 			 */
516 			ret = regmap_read(priv->regmap, item->capability,
517 					  &regval);
518 			if (ret)
519 				goto out;
520 
521 			item->mask = GENMASK((regval & item->mask) - 1, 0);
522 		}
523 
524 		/* Clear group presense event. */
525 		ret = regmap_write(priv->regmap, item->reg +
526 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
527 		if (ret)
528 			goto out;
529 
530 		/*
531 		 * Verify if hardware configuration requires to disable
532 		 * interrupt capability for some of components.
533 		 */
534 		data = item->data;
535 		for (j = 0; j < item->count; j++, data++) {
536 			/* Verify if the attribute has capability register. */
537 			if (data->capability) {
538 				/* Read capability register. */
539 				ret = regmap_read(priv->regmap,
540 						  data->capability, &regval);
541 				if (ret)
542 					goto out;
543 
544 				if (!(regval & data->bit))
545 					item->mask &= ~BIT(j);
546 			}
547 		}
548 
549 		/* Set group initial status as mask and unmask group event. */
550 		if (item->inversed) {
551 			item->cache = item->mask;
552 			ret = regmap_write(priv->regmap, item->reg +
553 					   MLXREG_HOTPLUG_MASK_OFF,
554 					   item->mask);
555 			if (ret)
556 				goto out;
557 		}
558 	}
559 
560 	/* Keep aggregation initial status as zero and unmask events. */
561 	ret = regmap_write(priv->regmap, pdata->cell +
562 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
563 	if (ret)
564 		goto out;
565 
566 	/* Keep low aggregation initial status as zero and unmask events. */
567 	if (pdata->cell_low) {
568 		ret = regmap_write(priv->regmap, pdata->cell_low +
569 				   MLXREG_HOTPLUG_AGGR_MASK_OFF,
570 				   pdata->mask_low);
571 		if (ret)
572 			goto out;
573 	}
574 
575 	/* Invoke work handler for initializing hot plug devices setting. */
576 	mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
577 
578  out:
579 	if (ret)
580 		dev_err(priv->dev, "Failed to set interrupts.\n");
581 	enable_irq(priv->irq);
582 	return ret;
583 }
584 
585 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
586 {
587 	struct mlxreg_core_hotplug_platform_data *pdata;
588 	struct mlxreg_core_item *item;
589 	struct mlxreg_core_data *data;
590 	int count, i, j;
591 
592 	pdata = dev_get_platdata(&priv->pdev->dev);
593 	item = pdata->items;
594 	disable_irq(priv->irq);
595 	cancel_delayed_work_sync(&priv->dwork_irq);
596 
597 	/* Mask low aggregation event, if defined. */
598 	if (pdata->cell_low)
599 		regmap_write(priv->regmap, pdata->cell_low +
600 			     MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
601 
602 	/* Mask aggregation event. */
603 	regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
604 		     0);
605 
606 	/* Clear topology configurations. */
607 	for (i = 0; i < pdata->counter; i++, item++) {
608 		data = item->data;
609 		/* Mask group presense event. */
610 		regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
611 			     0);
612 		/* Clear group presense event. */
613 		regmap_write(priv->regmap, data->reg +
614 			     MLXREG_HOTPLUG_EVENT_OFF, 0);
615 
616 		/* Remove all the attached devices in group. */
617 		count = item->count;
618 		for (j = 0; j < count; j++, data++)
619 			mlxreg_hotplug_device_destroy(priv, data);
620 	}
621 }
622 
623 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
624 {
625 	struct mlxreg_hotplug_priv_data *priv;
626 
627 	priv = (struct mlxreg_hotplug_priv_data *)dev;
628 
629 	/* Schedule work task for immediate execution.*/
630 	schedule_delayed_work(&priv->dwork_irq, 0);
631 
632 	return IRQ_HANDLED;
633 }
634 
635 static int mlxreg_hotplug_probe(struct platform_device *pdev)
636 {
637 	struct mlxreg_core_hotplug_platform_data *pdata;
638 	struct mlxreg_hotplug_priv_data *priv;
639 	struct i2c_adapter *deferred_adap;
640 	int err;
641 
642 	pdata = dev_get_platdata(&pdev->dev);
643 	if (!pdata) {
644 		dev_err(&pdev->dev, "Failed to get platform data.\n");
645 		return -EINVAL;
646 	}
647 
648 	/* Defer probing if the necessary adapter is not configured yet. */
649 	deferred_adap = i2c_get_adapter(pdata->deferred_nr);
650 	if (!deferred_adap)
651 		return -EPROBE_DEFER;
652 	i2c_put_adapter(deferred_adap);
653 
654 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
655 	if (!priv)
656 		return -ENOMEM;
657 
658 	if (pdata->irq) {
659 		priv->irq = pdata->irq;
660 	} else {
661 		priv->irq = platform_get_irq(pdev, 0);
662 		if (priv->irq < 0)
663 			return priv->irq;
664 	}
665 
666 	priv->regmap = pdata->regmap;
667 	priv->dev = pdev->dev.parent;
668 	priv->pdev = pdev;
669 
670 	err = devm_request_irq(&pdev->dev, priv->irq,
671 			       mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
672 			       | IRQF_SHARED, "mlxreg-hotplug", priv);
673 	if (err) {
674 		dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
675 		return err;
676 	}
677 
678 	disable_irq(priv->irq);
679 	spin_lock_init(&priv->lock);
680 	INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
681 	dev_set_drvdata(&pdev->dev, priv);
682 
683 	err = mlxreg_hotplug_attr_init(priv);
684 	if (err) {
685 		dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
686 			err);
687 		return err;
688 	}
689 
690 	priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
691 					"mlxreg_hotplug", priv, priv->groups);
692 	if (IS_ERR(priv->hwmon)) {
693 		dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
694 			PTR_ERR(priv->hwmon));
695 		return PTR_ERR(priv->hwmon);
696 	}
697 
698 	/* Perform initial interrupts setup. */
699 	mlxreg_hotplug_set_irq(priv);
700 	priv->after_probe = true;
701 
702 	return 0;
703 }
704 
705 static int mlxreg_hotplug_remove(struct platform_device *pdev)
706 {
707 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
708 
709 	/* Clean interrupts setup. */
710 	mlxreg_hotplug_unset_irq(priv);
711 	devm_free_irq(&pdev->dev, priv->irq, priv);
712 
713 	return 0;
714 }
715 
716 static struct platform_driver mlxreg_hotplug_driver = {
717 	.driver = {
718 		.name = "mlxreg-hotplug",
719 	},
720 	.probe = mlxreg_hotplug_probe,
721 	.remove = mlxreg_hotplug_remove,
722 };
723 
724 module_platform_driver(mlxreg_hotplug_driver);
725 
726 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
727 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
728 MODULE_LICENSE("Dual BSD/GPL");
729 MODULE_ALIAS("platform:mlxreg-hotplug");
730