xref: /linux/fs/timerfd.c (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 /*
2  *  fs/timerfd.c
3  *
4  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
5  *
6  *
7  *  Thanks to Thomas Gleixner for code reviews and useful comments.
8  *
9  */
10 
11 #include <linux/file.h>
12 #include <linux/poll.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/timerfd.h>
24 #include <linux/syscalls.h>
25 #include <linux/rcupdate.h>
26 
27 struct timerfd_ctx {
28 	struct hrtimer tmr;
29 	ktime_t tintv;
30 	ktime_t moffs;
31 	wait_queue_head_t wqh;
32 	u64 ticks;
33 	int expired;
34 	int clockid;
35 	struct rcu_head rcu;
36 	struct list_head clist;
37 	bool might_cancel;
38 };
39 
40 static LIST_HEAD(cancel_list);
41 static DEFINE_SPINLOCK(cancel_lock);
42 
43 /*
44  * This gets called when the timer event triggers. We set the "expired"
45  * flag, but we do not re-arm the timer (in case it's necessary,
46  * tintv.tv64 != 0) until the timer is accessed.
47  */
48 static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
49 {
50 	struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr);
51 	unsigned long flags;
52 
53 	spin_lock_irqsave(&ctx->wqh.lock, flags);
54 	ctx->expired = 1;
55 	ctx->ticks++;
56 	wake_up_locked(&ctx->wqh);
57 	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
58 
59 	return HRTIMER_NORESTART;
60 }
61 
62 /*
63  * Called when the clock was set to cancel the timers in the cancel
64  * list. This will wake up processes waiting on these timers. The
65  * wake-up requires ctx->ticks to be non zero, therefore we increment
66  * it before calling wake_up_locked().
67  */
68 void timerfd_clock_was_set(void)
69 {
70 	ktime_t moffs = ktime_get_monotonic_offset();
71 	struct timerfd_ctx *ctx;
72 	unsigned long flags;
73 
74 	rcu_read_lock();
75 	list_for_each_entry_rcu(ctx, &cancel_list, clist) {
76 		if (!ctx->might_cancel)
77 			continue;
78 		spin_lock_irqsave(&ctx->wqh.lock, flags);
79 		if (ctx->moffs.tv64 != moffs.tv64) {
80 			ctx->moffs.tv64 = KTIME_MAX;
81 			ctx->ticks++;
82 			wake_up_locked(&ctx->wqh);
83 		}
84 		spin_unlock_irqrestore(&ctx->wqh.lock, flags);
85 	}
86 	rcu_read_unlock();
87 }
88 
89 static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
90 {
91 	if (ctx->might_cancel) {
92 		ctx->might_cancel = false;
93 		spin_lock(&cancel_lock);
94 		list_del_rcu(&ctx->clist);
95 		spin_unlock(&cancel_lock);
96 	}
97 }
98 
99 static bool timerfd_canceled(struct timerfd_ctx *ctx)
100 {
101 	if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
102 		return false;
103 	ctx->moffs = ktime_get_monotonic_offset();
104 	return true;
105 }
106 
107 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
108 {
109 	if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) &&
110 	    (flags & TFD_TIMER_CANCEL_ON_SET)) {
111 		if (!ctx->might_cancel) {
112 			ctx->might_cancel = true;
113 			spin_lock(&cancel_lock);
114 			list_add_rcu(&ctx->clist, &cancel_list);
115 			spin_unlock(&cancel_lock);
116 		}
117 	} else if (ctx->might_cancel) {
118 		timerfd_remove_cancel(ctx);
119 	}
120 }
121 
122 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
123 {
124 	ktime_t remaining;
125 
126 	remaining = hrtimer_expires_remaining(&ctx->tmr);
127 	return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
128 }
129 
130 static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
131 			 const struct itimerspec *ktmr)
132 {
133 	enum hrtimer_mode htmode;
134 	ktime_t texp;
135 	int clockid = ctx->clockid;
136 
137 	htmode = (flags & TFD_TIMER_ABSTIME) ?
138 		HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
139 
140 	texp = timespec_to_ktime(ktmr->it_value);
141 	ctx->expired = 0;
142 	ctx->ticks = 0;
143 	ctx->tintv = timespec_to_ktime(ktmr->it_interval);
144 	hrtimer_init(&ctx->tmr, clockid, htmode);
145 	hrtimer_set_expires(&ctx->tmr, texp);
146 	ctx->tmr.function = timerfd_tmrproc;
147 	if (texp.tv64 != 0) {
148 		hrtimer_start(&ctx->tmr, texp, htmode);
149 		if (timerfd_canceled(ctx))
150 			return -ECANCELED;
151 	}
152 	return 0;
153 }
154 
155 static int timerfd_release(struct inode *inode, struct file *file)
156 {
157 	struct timerfd_ctx *ctx = file->private_data;
158 
159 	timerfd_remove_cancel(ctx);
160 	hrtimer_cancel(&ctx->tmr);
161 	kfree_rcu(ctx, rcu);
162 	return 0;
163 }
164 
165 static unsigned int timerfd_poll(struct file *file, poll_table *wait)
166 {
167 	struct timerfd_ctx *ctx = file->private_data;
168 	unsigned int events = 0;
169 	unsigned long flags;
170 
171 	poll_wait(file, &ctx->wqh, wait);
172 
173 	spin_lock_irqsave(&ctx->wqh.lock, flags);
174 	if (ctx->ticks)
175 		events |= POLLIN;
176 	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
177 
178 	return events;
179 }
180 
181 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
182 			    loff_t *ppos)
183 {
184 	struct timerfd_ctx *ctx = file->private_data;
185 	ssize_t res;
186 	u64 ticks = 0;
187 
188 	if (count < sizeof(ticks))
189 		return -EINVAL;
190 	spin_lock_irq(&ctx->wqh.lock);
191 	if (file->f_flags & O_NONBLOCK)
192 		res = -EAGAIN;
193 	else
194 		res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
195 
196 	/*
197 	 * If clock has changed, we do not care about the
198 	 * ticks and we do not rearm the timer. Userspace must
199 	 * reevaluate anyway.
200 	 */
201 	if (timerfd_canceled(ctx)) {
202 		ctx->ticks = 0;
203 		ctx->expired = 0;
204 		res = -ECANCELED;
205 	}
206 
207 	if (ctx->ticks) {
208 		ticks = ctx->ticks;
209 
210 		if (ctx->expired && ctx->tintv.tv64) {
211 			/*
212 			 * If tintv.tv64 != 0, this is a periodic timer that
213 			 * needs to be re-armed. We avoid doing it in the timer
214 			 * callback to avoid DoS attacks specifying a very
215 			 * short timer period.
216 			 */
217 			ticks += hrtimer_forward_now(&ctx->tmr,
218 						     ctx->tintv) - 1;
219 			hrtimer_restart(&ctx->tmr);
220 		}
221 		ctx->expired = 0;
222 		ctx->ticks = 0;
223 	}
224 	spin_unlock_irq(&ctx->wqh.lock);
225 	if (ticks)
226 		res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
227 	return res;
228 }
229 
230 static const struct file_operations timerfd_fops = {
231 	.release	= timerfd_release,
232 	.poll		= timerfd_poll,
233 	.read		= timerfd_read,
234 	.llseek		= noop_llseek,
235 };
236 
237 static struct file *timerfd_fget(int fd)
238 {
239 	struct file *file;
240 
241 	file = fget(fd);
242 	if (!file)
243 		return ERR_PTR(-EBADF);
244 	if (file->f_op != &timerfd_fops) {
245 		fput(file);
246 		return ERR_PTR(-EINVAL);
247 	}
248 
249 	return file;
250 }
251 
252 SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
253 {
254 	int ufd;
255 	struct timerfd_ctx *ctx;
256 
257 	/* Check the TFD_* constants for consistency.  */
258 	BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
259 	BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
260 
261 	if ((flags & ~TFD_CREATE_FLAGS) ||
262 	    (clockid != CLOCK_MONOTONIC &&
263 	     clockid != CLOCK_REALTIME))
264 		return -EINVAL;
265 
266 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
267 	if (!ctx)
268 		return -ENOMEM;
269 
270 	init_waitqueue_head(&ctx->wqh);
271 	ctx->clockid = clockid;
272 	hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
273 	ctx->moffs = ktime_get_monotonic_offset();
274 
275 	ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
276 			       O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
277 	if (ufd < 0)
278 		kfree(ctx);
279 
280 	return ufd;
281 }
282 
283 SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
284 		const struct itimerspec __user *, utmr,
285 		struct itimerspec __user *, otmr)
286 {
287 	struct file *file;
288 	struct timerfd_ctx *ctx;
289 	struct itimerspec ktmr, kotmr;
290 	int ret;
291 
292 	if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
293 		return -EFAULT;
294 
295 	if ((flags & ~TFD_SETTIME_FLAGS) ||
296 	    !timespec_valid(&ktmr.it_value) ||
297 	    !timespec_valid(&ktmr.it_interval))
298 		return -EINVAL;
299 
300 	file = timerfd_fget(ufd);
301 	if (IS_ERR(file))
302 		return PTR_ERR(file);
303 	ctx = file->private_data;
304 
305 	timerfd_setup_cancel(ctx, flags);
306 
307 	/*
308 	 * We need to stop the existing timer before reprogramming
309 	 * it to the new values.
310 	 */
311 	for (;;) {
312 		spin_lock_irq(&ctx->wqh.lock);
313 		if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
314 			break;
315 		spin_unlock_irq(&ctx->wqh.lock);
316 		cpu_relax();
317 	}
318 
319 	/*
320 	 * If the timer is expired and it's periodic, we need to advance it
321 	 * because the caller may want to know the previous expiration time.
322 	 * We do not update "ticks" and "expired" since the timer will be
323 	 * re-programmed again in the following timerfd_setup() call.
324 	 */
325 	if (ctx->expired && ctx->tintv.tv64)
326 		hrtimer_forward_now(&ctx->tmr, ctx->tintv);
327 
328 	kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
329 	kotmr.it_interval = ktime_to_timespec(ctx->tintv);
330 
331 	/*
332 	 * Re-program the timer to the new value ...
333 	 */
334 	ret = timerfd_setup(ctx, flags, &ktmr);
335 
336 	spin_unlock_irq(&ctx->wqh.lock);
337 	fput(file);
338 	if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
339 		return -EFAULT;
340 
341 	return ret;
342 }
343 
344 SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
345 {
346 	struct file *file;
347 	struct timerfd_ctx *ctx;
348 	struct itimerspec kotmr;
349 
350 	file = timerfd_fget(ufd);
351 	if (IS_ERR(file))
352 		return PTR_ERR(file);
353 	ctx = file->private_data;
354 
355 	spin_lock_irq(&ctx->wqh.lock);
356 	if (ctx->expired && ctx->tintv.tv64) {
357 		ctx->expired = 0;
358 		ctx->ticks +=
359 			hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
360 		hrtimer_restart(&ctx->tmr);
361 	}
362 	kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
363 	kotmr.it_interval = ktime_to_timespec(ctx->tintv);
364 	spin_unlock_irq(&ctx->wqh.lock);
365 	fput(file);
366 
367 	return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
368 }
369 
370