1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fcntl.h> 4 #include <linux/file.h> 5 #include <linux/fs.h> 6 #include <linux/anon_inodes.h> 7 #include <linux/fsnotify_backend.h> 8 #include <linux/init.h> 9 #include <linux/mount.h> 10 #include <linux/namei.h> 11 #include <linux/poll.h> 12 #include <linux/security.h> 13 #include <linux/syscalls.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/uaccess.h> 17 #include <linux/compat.h> 18 #include <linux/sched/signal.h> 19 20 #include <asm/ioctls.h> 21 22 #include "../../mount.h" 23 #include "../fdinfo.h" 24 #include "fanotify.h" 25 26 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 27 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 28 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 29 30 /* 31 * All flags that may be specified in parameter event_f_flags of fanotify_init. 32 * 33 * Internal and external open flags are stored together in field f_flags of 34 * struct file. Only external open flags shall be allowed in event_f_flags. 35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be 36 * excluded. 37 */ 38 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ 39 O_ACCMODE | O_APPEND | O_NONBLOCK | \ 40 __O_SYNC | O_DSYNC | O_CLOEXEC | \ 41 O_LARGEFILE | O_NOATIME ) 42 43 extern const struct fsnotify_ops fanotify_fsnotify_ops; 44 45 struct kmem_cache *fanotify_mark_cache __read_mostly; 46 struct kmem_cache *fanotify_event_cachep __read_mostly; 47 struct kmem_cache *fanotify_perm_event_cachep __read_mostly; 48 49 /* 50 * Get an fsnotify notification event if one exists and is small 51 * enough to fit in "count". Return an error pointer if the count 52 * is not large enough. 53 * 54 * Called with the group->notification_lock held. 55 */ 56 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 57 size_t count) 58 { 59 assert_spin_locked(&group->notification_lock); 60 61 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); 62 63 if (fsnotify_notify_queue_is_empty(group)) 64 return NULL; 65 66 if (FAN_EVENT_METADATA_LEN > count) 67 return ERR_PTR(-EINVAL); 68 69 /* held the notification_lock the whole time, so this is the 70 * same event we peeked above */ 71 return fsnotify_remove_first_event(group); 72 } 73 74 static int create_fd(struct fsnotify_group *group, 75 struct fanotify_event_info *event, 76 struct file **file) 77 { 78 int client_fd; 79 struct file *new_file; 80 81 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 82 83 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); 84 if (client_fd < 0) 85 return client_fd; 86 87 /* 88 * we need a new file handle for the userspace program so it can read even if it was 89 * originally opened O_WRONLY. 90 */ 91 /* it's possible this event was an overflow event. in that case dentry and mnt 92 * are NULL; That's fine, just don't call dentry open */ 93 if (event->path.dentry && event->path.mnt) 94 new_file = dentry_open(&event->path, 95 group->fanotify_data.f_flags | FMODE_NONOTIFY, 96 current_cred()); 97 else 98 new_file = ERR_PTR(-EOVERFLOW); 99 if (IS_ERR(new_file)) { 100 /* 101 * we still send an event even if we can't open the file. this 102 * can happen when say tasks are gone and we try to open their 103 * /proc files or we try to open a WRONLY file like in sysfs 104 * we just send the errno to userspace since there isn't much 105 * else we can do. 106 */ 107 put_unused_fd(client_fd); 108 client_fd = PTR_ERR(new_file); 109 } else { 110 *file = new_file; 111 } 112 113 return client_fd; 114 } 115 116 static int fill_event_metadata(struct fsnotify_group *group, 117 struct fanotify_event_metadata *metadata, 118 struct fsnotify_event *fsn_event, 119 struct file **file) 120 { 121 int ret = 0; 122 struct fanotify_event_info *event; 123 124 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 125 group, metadata, fsn_event); 126 127 *file = NULL; 128 event = container_of(fsn_event, struct fanotify_event_info, fse); 129 metadata->event_len = FAN_EVENT_METADATA_LEN; 130 metadata->metadata_len = FAN_EVENT_METADATA_LEN; 131 metadata->vers = FANOTIFY_METADATA_VERSION; 132 metadata->reserved = 0; 133 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS; 134 metadata->pid = pid_vnr(event->tgid); 135 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) 136 metadata->fd = FAN_NOFD; 137 else { 138 metadata->fd = create_fd(group, event, file); 139 if (metadata->fd < 0) 140 ret = metadata->fd; 141 } 142 143 return ret; 144 } 145 146 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 147 static struct fanotify_perm_event_info *dequeue_event( 148 struct fsnotify_group *group, int fd) 149 { 150 struct fanotify_perm_event_info *event, *return_e = NULL; 151 152 spin_lock(&group->notification_lock); 153 list_for_each_entry(event, &group->fanotify_data.access_list, 154 fae.fse.list) { 155 if (event->fd != fd) 156 continue; 157 158 list_del_init(&event->fae.fse.list); 159 return_e = event; 160 break; 161 } 162 spin_unlock(&group->notification_lock); 163 164 pr_debug("%s: found return_re=%p\n", __func__, return_e); 165 166 return return_e; 167 } 168 169 static int process_access_response(struct fsnotify_group *group, 170 struct fanotify_response *response_struct) 171 { 172 struct fanotify_perm_event_info *event; 173 int fd = response_struct->fd; 174 int response = response_struct->response; 175 176 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, 177 fd, response); 178 /* 179 * make sure the response is valid, if invalid we do nothing and either 180 * userspace can send a valid response or we will clean it up after the 181 * timeout 182 */ 183 switch (response) { 184 case FAN_ALLOW: 185 case FAN_DENY: 186 break; 187 default: 188 return -EINVAL; 189 } 190 191 if (fd < 0) 192 return -EINVAL; 193 194 event = dequeue_event(group, fd); 195 if (!event) 196 return -ENOENT; 197 198 event->response = response; 199 wake_up(&group->fanotify_data.access_waitq); 200 201 return 0; 202 } 203 #endif 204 205 static ssize_t copy_event_to_user(struct fsnotify_group *group, 206 struct fsnotify_event *event, 207 char __user *buf) 208 { 209 struct fanotify_event_metadata fanotify_event_metadata; 210 struct file *f; 211 int fd, ret; 212 213 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 214 215 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); 216 if (ret < 0) 217 return ret; 218 219 fd = fanotify_event_metadata.fd; 220 ret = -EFAULT; 221 if (copy_to_user(buf, &fanotify_event_metadata, 222 fanotify_event_metadata.event_len)) 223 goto out_close_fd; 224 225 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 226 if (event->mask & FAN_ALL_PERM_EVENTS) 227 FANOTIFY_PE(event)->fd = fd; 228 #endif 229 230 if (fd != FAN_NOFD) 231 fd_install(fd, f); 232 return fanotify_event_metadata.event_len; 233 234 out_close_fd: 235 if (fd != FAN_NOFD) { 236 put_unused_fd(fd); 237 fput(f); 238 } 239 return ret; 240 } 241 242 /* intofiy userspace file descriptor functions */ 243 static unsigned int fanotify_poll(struct file *file, poll_table *wait) 244 { 245 struct fsnotify_group *group = file->private_data; 246 int ret = 0; 247 248 poll_wait(file, &group->notification_waitq, wait); 249 spin_lock(&group->notification_lock); 250 if (!fsnotify_notify_queue_is_empty(group)) 251 ret = POLLIN | POLLRDNORM; 252 spin_unlock(&group->notification_lock); 253 254 return ret; 255 } 256 257 static ssize_t fanotify_read(struct file *file, char __user *buf, 258 size_t count, loff_t *pos) 259 { 260 struct fsnotify_group *group; 261 struct fsnotify_event *kevent; 262 char __user *start; 263 int ret; 264 DEFINE_WAIT_FUNC(wait, woken_wake_function); 265 266 start = buf; 267 group = file->private_data; 268 269 pr_debug("%s: group=%p\n", __func__, group); 270 271 add_wait_queue(&group->notification_waitq, &wait); 272 while (1) { 273 spin_lock(&group->notification_lock); 274 kevent = get_one_event(group, count); 275 spin_unlock(&group->notification_lock); 276 277 if (IS_ERR(kevent)) { 278 ret = PTR_ERR(kevent); 279 break; 280 } 281 282 if (!kevent) { 283 ret = -EAGAIN; 284 if (file->f_flags & O_NONBLOCK) 285 break; 286 287 ret = -ERESTARTSYS; 288 if (signal_pending(current)) 289 break; 290 291 if (start != buf) 292 break; 293 294 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 295 continue; 296 } 297 298 ret = copy_event_to_user(group, kevent, buf); 299 if (unlikely(ret == -EOPENSTALE)) { 300 /* 301 * We cannot report events with stale fd so drop it. 302 * Setting ret to 0 will continue the event loop and 303 * do the right thing if there are no more events to 304 * read (i.e. return bytes read, -EAGAIN or wait). 305 */ 306 ret = 0; 307 } 308 309 /* 310 * Permission events get queued to wait for response. Other 311 * events can be destroyed now. 312 */ 313 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) { 314 fsnotify_destroy_event(group, kevent); 315 } else { 316 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 317 if (ret <= 0) { 318 FANOTIFY_PE(kevent)->response = FAN_DENY; 319 wake_up(&group->fanotify_data.access_waitq); 320 } else { 321 spin_lock(&group->notification_lock); 322 list_add_tail(&kevent->list, 323 &group->fanotify_data.access_list); 324 spin_unlock(&group->notification_lock); 325 } 326 #endif 327 } 328 if (ret < 0) 329 break; 330 buf += ret; 331 count -= ret; 332 } 333 remove_wait_queue(&group->notification_waitq, &wait); 334 335 if (start != buf && ret != -EFAULT) 336 ret = buf - start; 337 return ret; 338 } 339 340 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 341 { 342 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 343 struct fanotify_response response = { .fd = -1, .response = -1 }; 344 struct fsnotify_group *group; 345 int ret; 346 347 group = file->private_data; 348 349 if (count > sizeof(response)) 350 count = sizeof(response); 351 352 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); 353 354 if (copy_from_user(&response, buf, count)) 355 return -EFAULT; 356 357 ret = process_access_response(group, &response); 358 if (ret < 0) 359 count = ret; 360 361 return count; 362 #else 363 return -EINVAL; 364 #endif 365 } 366 367 static int fanotify_release(struct inode *ignored, struct file *file) 368 { 369 struct fsnotify_group *group = file->private_data; 370 371 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 372 struct fanotify_perm_event_info *event, *next; 373 struct fsnotify_event *fsn_event; 374 375 /* 376 * Stop new events from arriving in the notification queue. since 377 * userspace cannot use fanotify fd anymore, no event can enter or 378 * leave access_list by now either. 379 */ 380 fsnotify_group_stop_queueing(group); 381 382 /* 383 * Process all permission events on access_list and notification queue 384 * and simulate reply from userspace. 385 */ 386 spin_lock(&group->notification_lock); 387 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, 388 fae.fse.list) { 389 pr_debug("%s: found group=%p event=%p\n", __func__, group, 390 event); 391 392 list_del_init(&event->fae.fse.list); 393 event->response = FAN_ALLOW; 394 } 395 396 /* 397 * Destroy all non-permission events. For permission events just 398 * dequeue them and set the response. They will be freed once the 399 * response is consumed and fanotify_get_response() returns. 400 */ 401 while (!fsnotify_notify_queue_is_empty(group)) { 402 fsn_event = fsnotify_remove_first_event(group); 403 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) { 404 spin_unlock(&group->notification_lock); 405 fsnotify_destroy_event(group, fsn_event); 406 spin_lock(&group->notification_lock); 407 } else 408 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; 409 } 410 spin_unlock(&group->notification_lock); 411 412 /* Response for all permission events it set, wakeup waiters */ 413 wake_up(&group->fanotify_data.access_waitq); 414 #endif 415 416 /* matches the fanotify_init->fsnotify_alloc_group */ 417 fsnotify_destroy_group(group); 418 419 return 0; 420 } 421 422 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 423 { 424 struct fsnotify_group *group; 425 struct fsnotify_event *fsn_event; 426 void __user *p; 427 int ret = -ENOTTY; 428 size_t send_len = 0; 429 430 group = file->private_data; 431 432 p = (void __user *) arg; 433 434 switch (cmd) { 435 case FIONREAD: 436 spin_lock(&group->notification_lock); 437 list_for_each_entry(fsn_event, &group->notification_list, list) 438 send_len += FAN_EVENT_METADATA_LEN; 439 spin_unlock(&group->notification_lock); 440 ret = put_user(send_len, (int __user *) p); 441 break; 442 } 443 444 return ret; 445 } 446 447 static const struct file_operations fanotify_fops = { 448 .show_fdinfo = fanotify_show_fdinfo, 449 .poll = fanotify_poll, 450 .read = fanotify_read, 451 .write = fanotify_write, 452 .fasync = NULL, 453 .release = fanotify_release, 454 .unlocked_ioctl = fanotify_ioctl, 455 .compat_ioctl = fanotify_ioctl, 456 .llseek = noop_llseek, 457 }; 458 459 static int fanotify_find_path(int dfd, const char __user *filename, 460 struct path *path, unsigned int flags) 461 { 462 int ret; 463 464 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, 465 dfd, filename, flags); 466 467 if (filename == NULL) { 468 struct fd f = fdget(dfd); 469 470 ret = -EBADF; 471 if (!f.file) 472 goto out; 473 474 ret = -ENOTDIR; 475 if ((flags & FAN_MARK_ONLYDIR) && 476 !(S_ISDIR(file_inode(f.file)->i_mode))) { 477 fdput(f); 478 goto out; 479 } 480 481 *path = f.file->f_path; 482 path_get(path); 483 fdput(f); 484 } else { 485 unsigned int lookup_flags = 0; 486 487 if (!(flags & FAN_MARK_DONT_FOLLOW)) 488 lookup_flags |= LOOKUP_FOLLOW; 489 if (flags & FAN_MARK_ONLYDIR) 490 lookup_flags |= LOOKUP_DIRECTORY; 491 492 ret = user_path_at(dfd, filename, lookup_flags, path); 493 if (ret) 494 goto out; 495 } 496 497 /* you can only watch an inode if you have read permissions on it */ 498 ret = inode_permission(path->dentry->d_inode, MAY_READ); 499 if (ret) 500 path_put(path); 501 out: 502 return ret; 503 } 504 505 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, 506 __u32 mask, 507 unsigned int flags, 508 int *destroy) 509 { 510 __u32 oldmask = 0; 511 512 spin_lock(&fsn_mark->lock); 513 if (!(flags & FAN_MARK_IGNORED_MASK)) { 514 __u32 tmask = fsn_mark->mask & ~mask; 515 516 if (flags & FAN_MARK_ONDIR) 517 tmask &= ~FAN_ONDIR; 518 519 oldmask = fsn_mark->mask; 520 fsn_mark->mask = tmask; 521 } else { 522 __u32 tmask = fsn_mark->ignored_mask & ~mask; 523 if (flags & FAN_MARK_ONDIR) 524 tmask &= ~FAN_ONDIR; 525 fsn_mark->ignored_mask = tmask; 526 } 527 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask); 528 spin_unlock(&fsn_mark->lock); 529 530 return mask & oldmask; 531 } 532 533 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, 534 struct vfsmount *mnt, __u32 mask, 535 unsigned int flags) 536 { 537 struct fsnotify_mark *fsn_mark = NULL; 538 __u32 removed; 539 int destroy_mark; 540 541 mutex_lock(&group->mark_mutex); 542 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks, 543 group); 544 if (!fsn_mark) { 545 mutex_unlock(&group->mark_mutex); 546 return -ENOENT; 547 } 548 549 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 550 &destroy_mark); 551 if (removed & real_mount(mnt)->mnt_fsnotify_mask) 552 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks); 553 if (destroy_mark) 554 fsnotify_detach_mark(fsn_mark); 555 mutex_unlock(&group->mark_mutex); 556 if (destroy_mark) 557 fsnotify_free_mark(fsn_mark); 558 559 fsnotify_put_mark(fsn_mark); 560 return 0; 561 } 562 563 static int fanotify_remove_inode_mark(struct fsnotify_group *group, 564 struct inode *inode, __u32 mask, 565 unsigned int flags) 566 { 567 struct fsnotify_mark *fsn_mark = NULL; 568 __u32 removed; 569 int destroy_mark; 570 571 mutex_lock(&group->mark_mutex); 572 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 573 if (!fsn_mark) { 574 mutex_unlock(&group->mark_mutex); 575 return -ENOENT; 576 } 577 578 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 579 &destroy_mark); 580 if (removed & inode->i_fsnotify_mask) 581 fsnotify_recalc_mask(inode->i_fsnotify_marks); 582 if (destroy_mark) 583 fsnotify_detach_mark(fsn_mark); 584 mutex_unlock(&group->mark_mutex); 585 if (destroy_mark) 586 fsnotify_free_mark(fsn_mark); 587 588 /* matches the fsnotify_find_mark() */ 589 fsnotify_put_mark(fsn_mark); 590 591 return 0; 592 } 593 594 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, 595 __u32 mask, 596 unsigned int flags) 597 { 598 __u32 oldmask = -1; 599 600 spin_lock(&fsn_mark->lock); 601 if (!(flags & FAN_MARK_IGNORED_MASK)) { 602 __u32 tmask = fsn_mark->mask | mask; 603 604 if (flags & FAN_MARK_ONDIR) 605 tmask |= FAN_ONDIR; 606 607 oldmask = fsn_mark->mask; 608 fsn_mark->mask = tmask; 609 } else { 610 __u32 tmask = fsn_mark->ignored_mask | mask; 611 if (flags & FAN_MARK_ONDIR) 612 tmask |= FAN_ONDIR; 613 614 fsn_mark->ignored_mask = tmask; 615 if (flags & FAN_MARK_IGNORED_SURV_MODIFY) 616 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; 617 } 618 spin_unlock(&fsn_mark->lock); 619 620 return mask & ~oldmask; 621 } 622 623 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, 624 struct inode *inode, 625 struct vfsmount *mnt) 626 { 627 struct fsnotify_mark *mark; 628 int ret; 629 630 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 631 return ERR_PTR(-ENOSPC); 632 633 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 634 if (!mark) 635 return ERR_PTR(-ENOMEM); 636 637 fsnotify_init_mark(mark, group); 638 ret = fsnotify_add_mark_locked(mark, inode, mnt, 0); 639 if (ret) { 640 fsnotify_put_mark(mark); 641 return ERR_PTR(ret); 642 } 643 644 return mark; 645 } 646 647 648 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, 649 struct vfsmount *mnt, __u32 mask, 650 unsigned int flags) 651 { 652 struct fsnotify_mark *fsn_mark; 653 __u32 added; 654 655 mutex_lock(&group->mark_mutex); 656 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks, 657 group); 658 if (!fsn_mark) { 659 fsn_mark = fanotify_add_new_mark(group, NULL, mnt); 660 if (IS_ERR(fsn_mark)) { 661 mutex_unlock(&group->mark_mutex); 662 return PTR_ERR(fsn_mark); 663 } 664 } 665 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 666 if (added & ~real_mount(mnt)->mnt_fsnotify_mask) 667 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks); 668 mutex_unlock(&group->mark_mutex); 669 670 fsnotify_put_mark(fsn_mark); 671 return 0; 672 } 673 674 static int fanotify_add_inode_mark(struct fsnotify_group *group, 675 struct inode *inode, __u32 mask, 676 unsigned int flags) 677 { 678 struct fsnotify_mark *fsn_mark; 679 __u32 added; 680 681 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 682 683 /* 684 * If some other task has this inode open for write we should not add 685 * an ignored mark, unless that ignored mark is supposed to survive 686 * modification changes anyway. 687 */ 688 if ((flags & FAN_MARK_IGNORED_MASK) && 689 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && 690 (atomic_read(&inode->i_writecount) > 0)) 691 return 0; 692 693 mutex_lock(&group->mark_mutex); 694 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 695 if (!fsn_mark) { 696 fsn_mark = fanotify_add_new_mark(group, inode, NULL); 697 if (IS_ERR(fsn_mark)) { 698 mutex_unlock(&group->mark_mutex); 699 return PTR_ERR(fsn_mark); 700 } 701 } 702 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 703 if (added & ~inode->i_fsnotify_mask) 704 fsnotify_recalc_mask(inode->i_fsnotify_marks); 705 mutex_unlock(&group->mark_mutex); 706 707 fsnotify_put_mark(fsn_mark); 708 return 0; 709 } 710 711 /* fanotify syscalls */ 712 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 713 { 714 struct fsnotify_group *group; 715 int f_flags, fd; 716 struct user_struct *user; 717 struct fanotify_event_info *oevent; 718 719 pr_debug("%s: flags=%d event_f_flags=%d\n", 720 __func__, flags, event_f_flags); 721 722 if (!capable(CAP_SYS_ADMIN)) 723 return -EPERM; 724 725 if (flags & ~FAN_ALL_INIT_FLAGS) 726 return -EINVAL; 727 728 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) 729 return -EINVAL; 730 731 switch (event_f_flags & O_ACCMODE) { 732 case O_RDONLY: 733 case O_RDWR: 734 case O_WRONLY: 735 break; 736 default: 737 return -EINVAL; 738 } 739 740 user = get_current_user(); 741 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { 742 free_uid(user); 743 return -EMFILE; 744 } 745 746 f_flags = O_RDWR | FMODE_NONOTIFY; 747 if (flags & FAN_CLOEXEC) 748 f_flags |= O_CLOEXEC; 749 if (flags & FAN_NONBLOCK) 750 f_flags |= O_NONBLOCK; 751 752 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 753 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 754 if (IS_ERR(group)) { 755 free_uid(user); 756 return PTR_ERR(group); 757 } 758 759 group->fanotify_data.user = user; 760 atomic_inc(&user->fanotify_listeners); 761 762 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); 763 if (unlikely(!oevent)) { 764 fd = -ENOMEM; 765 goto out_destroy_group; 766 } 767 group->overflow_event = &oevent->fse; 768 769 if (force_o_largefile()) 770 event_f_flags |= O_LARGEFILE; 771 group->fanotify_data.f_flags = event_f_flags; 772 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 773 init_waitqueue_head(&group->fanotify_data.access_waitq); 774 INIT_LIST_HEAD(&group->fanotify_data.access_list); 775 #endif 776 switch (flags & FAN_ALL_CLASS_BITS) { 777 case FAN_CLASS_NOTIF: 778 group->priority = FS_PRIO_0; 779 break; 780 case FAN_CLASS_CONTENT: 781 group->priority = FS_PRIO_1; 782 break; 783 case FAN_CLASS_PRE_CONTENT: 784 group->priority = FS_PRIO_2; 785 break; 786 default: 787 fd = -EINVAL; 788 goto out_destroy_group; 789 } 790 791 if (flags & FAN_UNLIMITED_QUEUE) { 792 fd = -EPERM; 793 if (!capable(CAP_SYS_ADMIN)) 794 goto out_destroy_group; 795 group->max_events = UINT_MAX; 796 } else { 797 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; 798 } 799 800 if (flags & FAN_UNLIMITED_MARKS) { 801 fd = -EPERM; 802 if (!capable(CAP_SYS_ADMIN)) 803 goto out_destroy_group; 804 group->fanotify_data.max_marks = UINT_MAX; 805 } else { 806 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; 807 } 808 809 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); 810 if (fd < 0) 811 goto out_destroy_group; 812 813 return fd; 814 815 out_destroy_group: 816 fsnotify_destroy_group(group); 817 return fd; 818 } 819 820 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, 821 __u64, mask, int, dfd, 822 const char __user *, pathname) 823 { 824 struct inode *inode = NULL; 825 struct vfsmount *mnt = NULL; 826 struct fsnotify_group *group; 827 struct fd f; 828 struct path path; 829 int ret; 830 831 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 832 __func__, fanotify_fd, flags, dfd, pathname, mask); 833 834 /* we only use the lower 32 bits as of right now. */ 835 if (mask & ((__u64)0xffffffff << 32)) 836 return -EINVAL; 837 838 if (flags & ~FAN_ALL_MARK_FLAGS) 839 return -EINVAL; 840 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 841 case FAN_MARK_ADD: /* fallthrough */ 842 case FAN_MARK_REMOVE: 843 if (!mask) 844 return -EINVAL; 845 break; 846 case FAN_MARK_FLUSH: 847 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH)) 848 return -EINVAL; 849 break; 850 default: 851 return -EINVAL; 852 } 853 854 if (mask & FAN_ONDIR) { 855 flags |= FAN_MARK_ONDIR; 856 mask &= ~FAN_ONDIR; 857 } 858 859 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 860 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) 861 #else 862 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) 863 #endif 864 return -EINVAL; 865 866 f = fdget(fanotify_fd); 867 if (unlikely(!f.file)) 868 return -EBADF; 869 870 /* verify that this is indeed an fanotify instance */ 871 ret = -EINVAL; 872 if (unlikely(f.file->f_op != &fanotify_fops)) 873 goto fput_and_out; 874 group = f.file->private_data; 875 876 /* 877 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not 878 * allowed to set permissions events. 879 */ 880 ret = -EINVAL; 881 if (mask & FAN_ALL_PERM_EVENTS && 882 group->priority == FS_PRIO_0) 883 goto fput_and_out; 884 885 if (flags & FAN_MARK_FLUSH) { 886 ret = 0; 887 if (flags & FAN_MARK_MOUNT) 888 fsnotify_clear_vfsmount_marks_by_group(group); 889 else 890 fsnotify_clear_inode_marks_by_group(group); 891 goto fput_and_out; 892 } 893 894 ret = fanotify_find_path(dfd, pathname, &path, flags); 895 if (ret) 896 goto fput_and_out; 897 898 /* inode held in place by reference to path; group by fget on fd */ 899 if (!(flags & FAN_MARK_MOUNT)) 900 inode = path.dentry->d_inode; 901 else 902 mnt = path.mnt; 903 904 /* create/update an inode mark */ 905 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) { 906 case FAN_MARK_ADD: 907 if (flags & FAN_MARK_MOUNT) 908 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); 909 else 910 ret = fanotify_add_inode_mark(group, inode, mask, flags); 911 break; 912 case FAN_MARK_REMOVE: 913 if (flags & FAN_MARK_MOUNT) 914 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); 915 else 916 ret = fanotify_remove_inode_mark(group, inode, mask, flags); 917 break; 918 default: 919 ret = -EINVAL; 920 } 921 922 path_put(&path); 923 fput_and_out: 924 fdput(f); 925 return ret; 926 } 927 928 #ifdef CONFIG_COMPAT 929 COMPAT_SYSCALL_DEFINE6(fanotify_mark, 930 int, fanotify_fd, unsigned int, flags, 931 __u32, mask0, __u32, mask1, int, dfd, 932 const char __user *, pathname) 933 { 934 return sys_fanotify_mark(fanotify_fd, flags, 935 #ifdef __BIG_ENDIAN 936 ((__u64)mask0 << 32) | mask1, 937 #else 938 ((__u64)mask1 << 32) | mask0, 939 #endif 940 dfd, pathname); 941 } 942 #endif 943 944 /* 945 * fanotify_user_setup - Our initialization function. Note that we cannot return 946 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 947 * must result in panic(). 948 */ 949 static int __init fanotify_user_setup(void) 950 { 951 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); 952 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); 953 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 954 fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info, 955 SLAB_PANIC); 956 #endif 957 958 return 0; 959 } 960 device_initcall(fanotify_user_setup); 961