xref: /linux/drivers/iio/buffer/kfifo_buf.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 #include <linux/slab.h>
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/device.h>
5 #include <linux/workqueue.h>
6 #include <linux/kfifo.h>
7 #include <linux/mutex.h>
8 #include <linux/iio/iio.h>
9 #include <linux/iio/buffer.h>
10 #include <linux/iio/kfifo_buf.h>
11 #include <linux/iio/buffer_impl.h>
12 #include <linux/sched.h>
13 #include <linux/poll.h>
14 
15 struct iio_kfifo {
16 	struct iio_buffer buffer;
17 	struct kfifo kf;
18 	struct mutex user_lock;
19 	int update_needed;
20 };
21 
22 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
23 
24 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
25 				int bytes_per_datum, int length)
26 {
27 	if ((length == 0) || (bytes_per_datum == 0))
28 		return -EINVAL;
29 
30 	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
31 			     bytes_per_datum, GFP_KERNEL);
32 }
33 
34 static int iio_request_update_kfifo(struct iio_buffer *r)
35 {
36 	int ret = 0;
37 	struct iio_kfifo *buf = iio_to_kfifo(r);
38 
39 	mutex_lock(&buf->user_lock);
40 	if (buf->update_needed) {
41 		kfifo_free(&buf->kf);
42 		ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
43 				   buf->buffer.length);
44 		if (ret >= 0)
45 			buf->update_needed = false;
46 	} else {
47 		kfifo_reset_out(&buf->kf);
48 	}
49 	mutex_unlock(&buf->user_lock);
50 
51 	return ret;
52 }
53 
54 static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
55 {
56 	struct iio_kfifo *kf = iio_to_kfifo(r);
57 	kf->update_needed = true;
58 	return 0;
59 }
60 
61 static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
62 {
63 	if (r->bytes_per_datum != bpd) {
64 		r->bytes_per_datum = bpd;
65 		iio_mark_update_needed_kfifo(r);
66 	}
67 	return 0;
68 }
69 
70 static int iio_set_length_kfifo(struct iio_buffer *r, int length)
71 {
72 	/* Avoid an invalid state */
73 	if (length < 2)
74 		length = 2;
75 	if (r->length != length) {
76 		r->length = length;
77 		iio_mark_update_needed_kfifo(r);
78 	}
79 	return 0;
80 }
81 
82 static int iio_store_to_kfifo(struct iio_buffer *r,
83 			      const void *data)
84 {
85 	int ret;
86 	struct iio_kfifo *kf = iio_to_kfifo(r);
87 	ret = kfifo_in(&kf->kf, data, 1);
88 	if (ret != 1)
89 		return -EBUSY;
90 	return 0;
91 }
92 
93 static int iio_read_first_n_kfifo(struct iio_buffer *r,
94 			   size_t n, char __user *buf)
95 {
96 	int ret, copied;
97 	struct iio_kfifo *kf = iio_to_kfifo(r);
98 
99 	if (mutex_lock_interruptible(&kf->user_lock))
100 		return -ERESTARTSYS;
101 
102 	if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
103 		ret = -EINVAL;
104 	else
105 		ret = kfifo_to_user(&kf->kf, buf, n, &copied);
106 	mutex_unlock(&kf->user_lock);
107 	if (ret < 0)
108 		return ret;
109 
110 	return copied;
111 }
112 
113 static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
114 {
115 	struct iio_kfifo *kf = iio_to_kfifo(r);
116 	size_t samples;
117 
118 	mutex_lock(&kf->user_lock);
119 	samples = kfifo_len(&kf->kf);
120 	mutex_unlock(&kf->user_lock);
121 
122 	return samples;
123 }
124 
125 static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
126 {
127 	struct iio_kfifo *kf = iio_to_kfifo(buffer);
128 
129 	mutex_destroy(&kf->user_lock);
130 	kfifo_free(&kf->kf);
131 	kfree(kf);
132 }
133 
134 static const struct iio_buffer_access_funcs kfifo_access_funcs = {
135 	.store_to = &iio_store_to_kfifo,
136 	.read_first_n = &iio_read_first_n_kfifo,
137 	.data_available = iio_kfifo_buf_data_available,
138 	.request_update = &iio_request_update_kfifo,
139 	.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
140 	.set_length = &iio_set_length_kfifo,
141 	.release = &iio_kfifo_buffer_release,
142 
143 	.modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
144 };
145 
146 struct iio_buffer *iio_kfifo_allocate(void)
147 {
148 	struct iio_kfifo *kf;
149 
150 	kf = kzalloc(sizeof(*kf), GFP_KERNEL);
151 	if (!kf)
152 		return NULL;
153 
154 	kf->update_needed = true;
155 	iio_buffer_init(&kf->buffer);
156 	kf->buffer.access = &kfifo_access_funcs;
157 	kf->buffer.length = 2;
158 	mutex_init(&kf->user_lock);
159 
160 	return &kf->buffer;
161 }
162 EXPORT_SYMBOL(iio_kfifo_allocate);
163 
164 void iio_kfifo_free(struct iio_buffer *r)
165 {
166 	iio_buffer_put(r);
167 }
168 EXPORT_SYMBOL(iio_kfifo_free);
169 
170 static void devm_iio_kfifo_release(struct device *dev, void *res)
171 {
172 	iio_kfifo_free(*(struct iio_buffer **)res);
173 }
174 
175 static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
176 {
177 	struct iio_buffer **r = res;
178 
179 	if (WARN_ON(!r || !*r))
180 		return 0;
181 
182 	return *r == data;
183 }
184 
185 /**
186  * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
187  * @dev:		Device to allocate kfifo buffer for
188  *
189  * RETURNS:
190  * Pointer to allocated iio_buffer on success, NULL on failure.
191  */
192 struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
193 {
194 	struct iio_buffer **ptr, *r;
195 
196 	ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
197 	if (!ptr)
198 		return NULL;
199 
200 	r = iio_kfifo_allocate();
201 	if (r) {
202 		*ptr = r;
203 		devres_add(dev, ptr);
204 	} else {
205 		devres_free(ptr);
206 	}
207 
208 	return r;
209 }
210 EXPORT_SYMBOL(devm_iio_kfifo_allocate);
211 
212 /**
213  * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
214  * @dev:		Device the buffer belongs to
215  * @r:			The buffer associated with the device
216  */
217 void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
218 {
219 	WARN_ON(devres_release(dev, devm_iio_kfifo_release,
220 			       devm_iio_kfifo_match, r));
221 }
222 EXPORT_SYMBOL(devm_iio_kfifo_free);
223 
224 MODULE_LICENSE("GPL");
225