1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 * Copyright 2016 Joyent, Inc. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/devops.h> 33 #include <sys/conf.h> 34 #include <sys/modctl.h> 35 #include <sys/sunddi.h> 36 #include <sys/stat.h> 37 #include <sys/poll_impl.h> 38 #include <sys/errno.h> 39 #include <sys/kmem.h> 40 #include <sys/mkdev.h> 41 #include <sys/debug.h> 42 #include <sys/file.h> 43 #include <sys/sysmacros.h> 44 #include <sys/systm.h> 45 #include <sys/bitmap.h> 46 #include <sys/devpoll.h> 47 #include <sys/rctl.h> 48 #include <sys/resource.h> 49 #include <sys/schedctl.h> 50 #include <sys/epoll.h> 51 52 #define RESERVED 1 53 54 /* local data struct */ 55 static dp_entry_t **devpolltbl; /* dev poll entries */ 56 static size_t dptblsize; 57 58 static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 59 int devpoll_init; /* is /dev/poll initialized already */ 60 61 /* device local functions */ 62 63 static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 64 static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 65 static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 66 int *rvalp); 67 static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 68 struct pollhead **phpp); 69 static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 70 static dev_info_t *dpdevi; 71 72 73 static struct cb_ops dp_cb_ops = { 74 dpopen, /* open */ 75 dpclose, /* close */ 76 nodev, /* strategy */ 77 nodev, /* print */ 78 nodev, /* dump */ 79 nodev, /* read */ 80 dpwrite, /* write */ 81 dpioctl, /* ioctl */ 82 nodev, /* devmap */ 83 nodev, /* mmap */ 84 nodev, /* segmap */ 85 dppoll, /* poll */ 86 ddi_prop_op, /* prop_op */ 87 (struct streamtab *)0, /* streamtab */ 88 D_MP, /* flags */ 89 CB_REV, /* cb_ops revision */ 90 nodev, /* aread */ 91 nodev /* awrite */ 92 }; 93 94 static int dpattach(dev_info_t *, ddi_attach_cmd_t); 95 static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 96 static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 97 98 static struct dev_ops dp_ops = { 99 DEVO_REV, /* devo_rev */ 100 0, /* refcnt */ 101 dpinfo, /* info */ 102 nulldev, /* identify */ 103 nulldev, /* probe */ 104 dpattach, /* attach */ 105 dpdetach, /* detach */ 106 nodev, /* reset */ 107 &dp_cb_ops, /* driver operations */ 108 (struct bus_ops *)NULL, /* bus operations */ 109 nulldev, /* power */ 110 ddi_quiesce_not_needed, /* quiesce */ 111 }; 112 113 114 static struct modldrv modldrv = { 115 &mod_driverops, /* type of module - a driver */ 116 "/dev/poll driver", 117 &dp_ops, 118 }; 119 120 static struct modlinkage modlinkage = { 121 MODREV_1, 122 (void *)&modldrv, 123 NULL 124 }; 125 126 static void pcachelink_assoc(pollcache_t *, pollcache_t *); 127 static void pcachelink_mark_stale(pollcache_t *); 128 static void pcachelink_purge_stale(pollcache_t *); 129 static void pcachelink_purge_all(pollcache_t *); 130 131 132 /* 133 * Locking Design 134 * 135 * The /dev/poll driver shares most of its code with poll sys call whose 136 * code is in common/syscall/poll.c. In poll(2) design, the pollcache 137 * structure is per lwp. An implicit assumption is made there that some 138 * portion of pollcache will never be touched by other lwps. E.g., in 139 * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 140 * This assumption is not true for /dev/poll; hence the need for extra 141 * locking. 142 * 143 * To allow more parallelism, each /dev/poll file descriptor (indexed by 144 * minor number) has its own lock. Since read (dpioctl) is a much more 145 * frequent operation than write, we want to allow multiple reads on same 146 * /dev/poll fd. However, we prevent writes from being starved by giving 147 * priority to write operation. Theoretically writes can starve reads as 148 * well. But in practical sense this is not important because (1) writes 149 * happens less often than reads, and (2) write operation defines the 150 * content of poll fd a cache set. If writes happens so often that they 151 * can starve reads, that means the cached set is very unstable. It may 152 * not make sense to read an unstable cache set anyway. Therefore, the 153 * writers starving readers case is not handled in this design. 154 */ 155 156 int 157 _init() 158 { 159 int error; 160 161 dptblsize = DEVPOLLSIZE; 162 devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 163 mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 164 devpoll_init = 1; 165 if ((error = mod_install(&modlinkage)) != 0) { 166 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 167 devpoll_init = 0; 168 } 169 return (error); 170 } 171 172 int 173 _fini() 174 { 175 int error; 176 177 if ((error = mod_remove(&modlinkage)) != 0) { 178 return (error); 179 } 180 mutex_destroy(&devpoll_lock); 181 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 182 return (0); 183 } 184 185 int 186 _info(struct modinfo *modinfop) 187 { 188 return (mod_info(&modlinkage, modinfop)); 189 } 190 191 /*ARGSUSED*/ 192 static int 193 dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 194 { 195 if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 196 == DDI_FAILURE) { 197 ddi_remove_minor_node(devi, NULL); 198 return (DDI_FAILURE); 199 } 200 dpdevi = devi; 201 return (DDI_SUCCESS); 202 } 203 204 static int 205 dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 206 { 207 if (cmd != DDI_DETACH) 208 return (DDI_FAILURE); 209 210 ddi_remove_minor_node(devi, NULL); 211 return (DDI_SUCCESS); 212 } 213 214 /* ARGSUSED */ 215 static int 216 dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 217 { 218 int error; 219 220 switch (infocmd) { 221 case DDI_INFO_DEVT2DEVINFO: 222 *result = (void *)dpdevi; 223 error = DDI_SUCCESS; 224 break; 225 case DDI_INFO_DEVT2INSTANCE: 226 *result = (void *)0; 227 error = DDI_SUCCESS; 228 break; 229 default: 230 error = DDI_FAILURE; 231 } 232 return (error); 233 } 234 235 /* 236 * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 237 * differences are: (1) /dev/poll requires scanning the bitmap starting at 238 * where it was stopped last time, instead of always starting from 0, 239 * (2) since user may not have cleaned up the cached fds when they are 240 * closed, some polldats in cache may refer to closed or reused fds. We 241 * need to check for those cases. 242 * 243 * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 244 * poll(2) caches but NOT for /dev/poll caches. So expect some 245 * stale entries! 246 */ 247 static int 248 dp_pcache_poll(dp_entry_t *dpep, void *dpbuf, 249 pollcache_t *pcp, nfds_t nfds, int *fdcntp) 250 { 251 int start, ostart, end; 252 int fdcnt, fd; 253 boolean_t done; 254 file_t *fp; 255 short revent; 256 boolean_t no_wrap; 257 pollhead_t *php; 258 polldat_t *pdp; 259 pollfd_t *pfdp; 260 epoll_event_t *epoll; 261 int error = 0; 262 short mask = POLLRDHUP | POLLWRBAND; 263 boolean_t is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 264 265 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 266 if (pcp->pc_bitmap == NULL) { 267 /* 268 * No Need to search because no poll fd 269 * has been cached. 270 */ 271 return (error); 272 } 273 274 if (is_epoll) { 275 pfdp = NULL; 276 epoll = (epoll_event_t *)dpbuf; 277 } else { 278 pfdp = (pollfd_t *)dpbuf; 279 epoll = NULL; 280 } 281 retry: 282 start = ostart = pcp->pc_mapstart; 283 end = pcp->pc_mapend; 284 php = NULL; 285 286 if (start == 0) { 287 /* 288 * started from every begining, no need to wrap around. 289 */ 290 no_wrap = B_TRUE; 291 } else { 292 no_wrap = B_FALSE; 293 } 294 done = B_FALSE; 295 fdcnt = 0; 296 while ((fdcnt < nfds) && !done) { 297 php = NULL; 298 revent = 0; 299 /* 300 * Examine the bit map in a circular fashion 301 * to avoid starvation. Always resume from 302 * last stop. Scan till end of the map. Then 303 * wrap around. 304 */ 305 fd = bt_getlowbit(pcp->pc_bitmap, start, end); 306 ASSERT(fd <= end); 307 if (fd >= 0) { 308 if (fd == end) { 309 if (no_wrap) { 310 done = B_TRUE; 311 } else { 312 start = 0; 313 end = ostart - 1; 314 no_wrap = B_TRUE; 315 } 316 } else { 317 start = fd + 1; 318 } 319 pdp = pcache_lookup_fd(pcp, fd); 320 repoll: 321 ASSERT(pdp != NULL); 322 ASSERT(pdp->pd_fd == fd); 323 if (pdp->pd_fp == NULL) { 324 /* 325 * The fd is POLLREMOVed. This fd is 326 * logically no longer cached. So move 327 * on to the next one. 328 */ 329 continue; 330 } 331 if ((fp = getf(fd)) == NULL) { 332 /* 333 * The fd has been closed, but user has not 334 * done a POLLREMOVE on this fd yet. Instead 335 * of cleaning it here implicitly, we return 336 * POLLNVAL. This is consistent with poll(2) 337 * polling a closed fd. Hope this will remind 338 * user to do a POLLREMOVE. 339 */ 340 if (!is_epoll && pfdp != NULL) { 341 pfdp[fdcnt].fd = fd; 342 pfdp[fdcnt].revents = POLLNVAL; 343 fdcnt++; 344 continue; 345 } 346 347 /* 348 * In the epoll compatibility case, we actually 349 * perform the implicit removal to remain 350 * closer to the epoll semantics. 351 */ 352 if (is_epoll) { 353 pdp->pd_fp = NULL; 354 pdp->pd_events = 0; 355 356 if (php != NULL) { 357 pollhead_delete(php, pdp); 358 pdp->pd_php = NULL; 359 } 360 361 BT_CLEAR(pcp->pc_bitmap, fd); 362 continue; 363 } 364 } 365 366 if (fp != pdp->pd_fp) { 367 /* 368 * user is polling on a cached fd which was 369 * closed and then reused. Unfortunately 370 * there is no good way to inform user. 371 * If the file struct is also reused, we 372 * may not be able to detect the fd reuse 373 * at all. As long as this does not 374 * cause system failure and/or memory leak, 375 * we will play along. Man page states if 376 * user does not clean up closed fds, polling 377 * results will be indeterministic. 378 * 379 * XXX - perhaps log the detection of fd 380 * reuse? 381 */ 382 pdp->pd_fp = fp; 383 } 384 /* 385 * XXX - pollrelock() logic needs to know which 386 * which pollcache lock to grab. It'd be a 387 * cleaner solution if we could pass pcp as 388 * an arguement in VOP_POLL interface instead 389 * of implicitly passing it using thread_t 390 * struct. On the other hand, changing VOP_POLL 391 * interface will require all driver/file system 392 * poll routine to change. May want to revisit 393 * the tradeoff later. 394 */ 395 curthread->t_pollcache = pcp; 396 error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 397 &revent, &php, NULL); 398 curthread->t_pollcache = NULL; 399 releasef(fd); 400 if (error != 0) { 401 break; 402 } 403 404 /* 405 * layered devices (e.g. console driver) 406 * may change the vnode and thus the pollhead 407 * pointer out from underneath us. 408 */ 409 if (php != NULL && pdp->pd_php != NULL && 410 php != pdp->pd_php) { 411 pollhead_delete(pdp->pd_php, pdp); 412 pdp->pd_php = php; 413 pollhead_insert(php, pdp); 414 /* 415 * The bit should still be set. 416 */ 417 ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 418 goto retry; 419 } 420 421 if (revent != 0) { 422 if (pfdp != NULL) { 423 pfdp[fdcnt].fd = fd; 424 pfdp[fdcnt].events = pdp->pd_events; 425 pfdp[fdcnt].revents = revent; 426 } else if (epoll != NULL) { 427 epoll_event_t *ep = &epoll[fdcnt]; 428 429 ASSERT(epoll != NULL); 430 ep->data.u64 = pdp->pd_epolldata; 431 432 /* 433 * If any of the event bits are set for 434 * which poll and epoll representations 435 * differ, swizzle in the native epoll 436 * values. 437 */ 438 if (revent & mask) { 439 ep->events = (revent & ~mask) | 440 ((revent & POLLRDHUP) ? 441 EPOLLRDHUP : 0) | 442 ((revent & POLLWRBAND) ? 443 EPOLLWRBAND : 0); 444 } else { 445 ep->events = revent; 446 } 447 448 /* 449 * We define POLLWRNORM to be POLLOUT, 450 * but epoll has separate definitions 451 * for them; if POLLOUT is set and the 452 * user has asked for EPOLLWRNORM, set 453 * that as well. 454 */ 455 if ((revent & POLLOUT) && 456 (pdp->pd_events & EPOLLWRNORM)) { 457 ep->events |= EPOLLWRNORM; 458 } 459 } else { 460 pollstate_t *ps = 461 curthread->t_pollstate; 462 /* 463 * The devpoll handle itself is being 464 * polled. Notify the caller of any 465 * readable event(s), leaving as much 466 * state as possible untouched. 467 */ 468 VERIFY(fdcnt == 0); 469 VERIFY(ps != NULL); 470 471 /* 472 * If a call to pollunlock() fails 473 * during VOP_POLL, skip over the fd 474 * and continue polling. 475 * 476 * Otherwise, report that there is an 477 * event pending. 478 */ 479 if ((ps->ps_flags & POLLSTATE_ULFAIL) 480 != 0) { 481 ps->ps_flags &= 482 ~POLLSTATE_ULFAIL; 483 continue; 484 } else { 485 fdcnt++; 486 break; 487 } 488 } 489 490 /* 491 * If POLLET is set, clear the bit in the 492 * bitmap -- which effectively latches the 493 * edge on a pollwakeup() from the driver. 494 */ 495 if (pdp->pd_events & POLLET) 496 BT_CLEAR(pcp->pc_bitmap, fd); 497 498 /* 499 * If POLLONESHOT is set, perform the implicit 500 * POLLREMOVE. 501 */ 502 if (pdp->pd_events & POLLONESHOT) { 503 pdp->pd_fp = NULL; 504 pdp->pd_events = 0; 505 506 if (php != NULL) { 507 pollhead_delete(php, pdp); 508 pdp->pd_php = NULL; 509 } 510 511 BT_CLEAR(pcp->pc_bitmap, fd); 512 } 513 514 fdcnt++; 515 } else if (php != NULL) { 516 /* 517 * We clear a bit or cache a poll fd if 518 * the driver returns a poll head ptr, 519 * which is expected in the case of 0 520 * revents. Some buggy driver may return 521 * NULL php pointer with 0 revents. In 522 * this case, we just treat the driver as 523 * "noncachable" and not clearing the bit 524 * in bitmap. 525 */ 526 if ((pdp->pd_php != NULL) && 527 ((pcp->pc_flag & PC_POLLWAKE) == 0)) { 528 BT_CLEAR(pcp->pc_bitmap, fd); 529 } 530 if (pdp->pd_php == NULL) { 531 pollhead_insert(php, pdp); 532 pdp->pd_php = php; 533 /* 534 * An event of interest may have 535 * arrived between the VOP_POLL() and 536 * the pollhead_insert(); check again. 537 */ 538 goto repoll; 539 } 540 } 541 } else { 542 /* 543 * No bit set in the range. Check for wrap around. 544 */ 545 if (!no_wrap) { 546 start = 0; 547 end = ostart - 1; 548 no_wrap = B_TRUE; 549 } else { 550 done = B_TRUE; 551 } 552 } 553 } 554 555 if (!done) { 556 pcp->pc_mapstart = start; 557 } 558 ASSERT(*fdcntp == 0); 559 *fdcntp = fdcnt; 560 return (error); 561 } 562 563 /*ARGSUSED*/ 564 static int 565 dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 566 { 567 minor_t minordev; 568 dp_entry_t *dpep; 569 pollcache_t *pcp; 570 571 ASSERT(devpoll_init); 572 ASSERT(dptblsize <= MAXMIN); 573 mutex_enter(&devpoll_lock); 574 for (minordev = 0; minordev < dptblsize; minordev++) { 575 if (devpolltbl[minordev] == NULL) { 576 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 577 break; 578 } 579 } 580 if (minordev == dptblsize) { 581 dp_entry_t **newtbl; 582 size_t oldsize; 583 584 /* 585 * Used up every entry in the existing devpoll table. 586 * Grow the table by DEVPOLLSIZE. 587 */ 588 if ((oldsize = dptblsize) >= MAXMIN) { 589 mutex_exit(&devpoll_lock); 590 return (ENXIO); 591 } 592 dptblsize += DEVPOLLSIZE; 593 if (dptblsize > MAXMIN) { 594 dptblsize = MAXMIN; 595 } 596 newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 597 bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 598 kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 599 devpolltbl = newtbl; 600 devpolltbl[minordev] = (dp_entry_t *)RESERVED; 601 } 602 mutex_exit(&devpoll_lock); 603 604 dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 605 /* 606 * allocate a pollcache skeleton here. Delay allocating bitmap 607 * structures until dpwrite() time, since we don't know the 608 * optimal size yet. We also delay setting the pid until either 609 * dpwrite() or attempt to poll on the instance, allowing parents 610 * to create instances of /dev/poll for their children. (In the 611 * epoll compatibility case, this check isn't performed to maintain 612 * semantic compatibility.) 613 */ 614 pcp = pcache_alloc(); 615 dpep->dpe_pcache = pcp; 616 pcp->pc_pid = -1; 617 *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 618 mutex_enter(&devpoll_lock); 619 ASSERT(minordev < dptblsize); 620 ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 621 devpolltbl[minordev] = dpep; 622 mutex_exit(&devpoll_lock); 623 return (0); 624 } 625 626 /* 627 * Write to dev/poll add/remove fd's to/from a cached poll fd set, 628 * or change poll events for a watched fd. 629 */ 630 /*ARGSUSED*/ 631 static int 632 dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 633 { 634 minor_t minor; 635 dp_entry_t *dpep; 636 pollcache_t *pcp; 637 pollfd_t *pollfdp, *pfdp; 638 dvpoll_epollfd_t *epfdp; 639 uintptr_t limit; 640 int error, size; 641 ssize_t uiosize; 642 nfds_t pollfdnum; 643 struct pollhead *php = NULL; 644 polldat_t *pdp; 645 int fd; 646 file_t *fp; 647 boolean_t is_epoll, fds_added = B_FALSE; 648 649 minor = getminor(dev); 650 651 mutex_enter(&devpoll_lock); 652 ASSERT(minor < dptblsize); 653 dpep = devpolltbl[minor]; 654 ASSERT(dpep != NULL); 655 mutex_exit(&devpoll_lock); 656 657 mutex_enter(&dpep->dpe_lock); 658 pcp = dpep->dpe_pcache; 659 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 660 size = (is_epoll) ? sizeof (dvpoll_epollfd_t) : sizeof (pollfd_t); 661 mutex_exit(&dpep->dpe_lock); 662 663 if (!is_epoll && curproc->p_pid != pcp->pc_pid) { 664 if (pcp->pc_pid != -1) { 665 return (EACCES); 666 } 667 668 pcp->pc_pid = curproc->p_pid; 669 } 670 671 uiosize = uiop->uio_resid; 672 pollfdnum = uiosize / size; 673 674 /* 675 * We want to make sure that pollfdnum isn't large enough to DoS us, 676 * but we also don't want to grab p_lock unnecessarily -- so we 677 * perform the full check against our resource limits if and only if 678 * pollfdnum is larger than the known-to-be-sane value of UINT8_MAX. 679 */ 680 if (pollfdnum > UINT8_MAX) { 681 mutex_enter(&curproc->p_lock); 682 if (pollfdnum > 683 (uint_t)rctl_enforced_value(rctlproc_legacy[RLIMIT_NOFILE], 684 curproc->p_rctls, curproc)) { 685 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 686 curproc->p_rctls, curproc, RCA_SAFE); 687 mutex_exit(&curproc->p_lock); 688 return (EINVAL); 689 } 690 mutex_exit(&curproc->p_lock); 691 } 692 693 /* 694 * Copy in the pollfd array. Walk through the array and add 695 * each polled fd to the cached set. 696 */ 697 pollfdp = kmem_alloc(uiosize, KM_SLEEP); 698 limit = (uintptr_t)pollfdp + (pollfdnum * size); 699 700 /* 701 * Although /dev/poll uses the write(2) interface to cache fds, it's 702 * not supposed to function as a seekable device. To prevent offset 703 * from growing and eventually exceed the maximum, reset the offset 704 * here for every call. 705 */ 706 uiop->uio_loffset = 0; 707 if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop)) 708 != 0) { 709 kmem_free(pollfdp, uiosize); 710 return (error); 711 } 712 /* 713 * We are about to enter the core portion of dpwrite(). Make sure this 714 * write has exclusive access in this portion of the code, i.e., no 715 * other writers in this code. 716 * 717 * Waiting for all readers to drop their references to the dpe is 718 * unecessary since the pollcache itself is protected by pc_lock. 719 */ 720 mutex_enter(&dpep->dpe_lock); 721 dpep->dpe_writerwait++; 722 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0) { 723 ASSERT(dpep->dpe_refcnt != 0); 724 725 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 726 dpep->dpe_writerwait--; 727 mutex_exit(&dpep->dpe_lock); 728 kmem_free(pollfdp, uiosize); 729 return (EINTR); 730 } 731 } 732 dpep->dpe_writerwait--; 733 dpep->dpe_flag |= DP_WRITER_PRESENT; 734 dpep->dpe_refcnt++; 735 736 if (!is_epoll && (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0) { 737 /* 738 * The epoll compat mode was enabled while we were waiting to 739 * establish write access. It is not safe to continue since 740 * state was prepared for non-epoll operation. 741 */ 742 error = EBUSY; 743 goto bypass; 744 } 745 mutex_exit(&dpep->dpe_lock); 746 747 /* 748 * Since the dpwrite() may recursively walk an added /dev/poll handle, 749 * pollstate_enter() deadlock and loop detection must be used. 750 */ 751 (void) pollstate_create(); 752 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS); 753 754 if (pcp->pc_bitmap == NULL) { 755 pcache_create(pcp, pollfdnum); 756 } 757 for (pfdp = pollfdp; (uintptr_t)pfdp < limit; 758 pfdp = (pollfd_t *)((uintptr_t)pfdp + size)) { 759 fd = pfdp->fd; 760 if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) { 761 /* 762 * epoll semantics demand that we return EBADF if our 763 * specified fd is invalid. 764 */ 765 if (is_epoll) { 766 error = EBADF; 767 break; 768 } 769 770 continue; 771 } 772 773 pdp = pcache_lookup_fd(pcp, fd); 774 if (pfdp->events != POLLREMOVE) { 775 776 fp = NULL; 777 778 if (pdp == NULL) { 779 /* 780 * If we're in epoll compatibility mode, check 781 * that the fd is valid before allocating 782 * anything for it; epoll semantics demand that 783 * we return EBADF if our specified fd is 784 * invalid. 785 */ 786 if (is_epoll) { 787 if ((fp = getf(fd)) == NULL) { 788 error = EBADF; 789 break; 790 } 791 } 792 793 pdp = pcache_alloc_fd(0); 794 pdp->pd_fd = fd; 795 pdp->pd_pcache = pcp; 796 pcache_insert_fd(pcp, pdp, pollfdnum); 797 } else { 798 /* 799 * epoll semantics demand that we error out if 800 * a file descriptor is added twice, which we 801 * check (imperfectly) by checking if we both 802 * have the file descriptor cached and the 803 * file pointer that correponds to the file 804 * descriptor matches our cached value. If 805 * there is a pointer mismatch, the file 806 * descriptor was closed without being removed. 807 * The converse is clearly not true, however, 808 * so to narrow the window by which a spurious 809 * EEXIST may be returned, we also check if 810 * this fp has been added to an epoll control 811 * descriptor in the past; if it hasn't, we 812 * know that this is due to fp reuse -- it's 813 * not a true EEXIST case. (By performing this 814 * additional check, we limit the window of 815 * spurious EEXIST to situations where a single 816 * file descriptor is being used across two or 817 * more epoll control descriptors -- and even 818 * then, the file descriptor must be closed and 819 * reused in a relatively tight time span.) 820 */ 821 if (is_epoll) { 822 if (pdp->pd_fp != NULL && 823 (fp = getf(fd)) != NULL && 824 fp == pdp->pd_fp && 825 (fp->f_flag2 & FEPOLLED)) { 826 error = EEXIST; 827 releasef(fd); 828 break; 829 } 830 831 /* 832 * We have decided that the cached 833 * information was stale: it either 834 * didn't match, or the fp had never 835 * actually been epoll()'d on before. 836 * We need to now clear our pd_events 837 * to assure that we don't mistakenly 838 * operate on cached event disposition. 839 */ 840 pdp->pd_events = 0; 841 } 842 } 843 844 if (is_epoll) { 845 epfdp = (dvpoll_epollfd_t *)pfdp; 846 pdp->pd_epolldata = epfdp->dpep_data; 847 } 848 849 ASSERT(pdp->pd_fd == fd); 850 ASSERT(pdp->pd_pcache == pcp); 851 if (fd >= pcp->pc_mapsize) { 852 mutex_exit(&pcp->pc_lock); 853 pcache_grow_map(pcp, fd); 854 mutex_enter(&pcp->pc_lock); 855 } 856 if (fd > pcp->pc_mapend) { 857 pcp->pc_mapend = fd; 858 } 859 if (fp == NULL && (fp = getf(fd)) == NULL) { 860 /* 861 * The fd is not valid. Since we can't pass 862 * this error back in the write() call, set 863 * the bit in bitmap to force DP_POLL ioctl 864 * to examine it. 865 */ 866 BT_SET(pcp->pc_bitmap, fd); 867 pdp->pd_events |= pfdp->events; 868 continue; 869 } 870 871 /* 872 * To (greatly) reduce EEXIST false positives, we 873 * denote that this fp has been epoll()'d. We do this 874 * regardless of epoll compatibility mode, as the flag 875 * is harmless if not in epoll compatibility mode. 876 */ 877 fp->f_flag2 |= FEPOLLED; 878 879 /* 880 * Don't do VOP_POLL for an already cached fd with 881 * same poll events. 882 */ 883 if ((pdp->pd_events == pfdp->events) && 884 (pdp->pd_fp == fp)) { 885 /* 886 * the events are already cached 887 */ 888 releasef(fd); 889 continue; 890 } 891 892 /* 893 * do VOP_POLL and cache this poll fd. 894 */ 895 /* 896 * XXX - pollrelock() logic needs to know which 897 * which pollcache lock to grab. It'd be a 898 * cleaner solution if we could pass pcp as 899 * an arguement in VOP_POLL interface instead 900 * of implicitly passing it using thread_t 901 * struct. On the other hand, changing VOP_POLL 902 * interface will require all driver/file system 903 * poll routine to change. May want to revisit 904 * the tradeoff later. 905 */ 906 curthread->t_pollcache = pcp; 907 error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 908 &pfdp->revents, &php, NULL); 909 curthread->t_pollcache = NULL; 910 /* 911 * We always set the bit when this fd is cached; 912 * this forces the first DP_POLL to poll this fd. 913 * Real performance gain comes from subsequent 914 * DP_POLL. We also attempt a pollhead_insert(); 915 * if it's not possible, we'll do it in dpioctl(). 916 */ 917 BT_SET(pcp->pc_bitmap, fd); 918 if (error != 0) { 919 releasef(fd); 920 break; 921 } 922 pdp->pd_fp = fp; 923 pdp->pd_events |= pfdp->events; 924 if (php != NULL) { 925 if (pdp->pd_php == NULL) { 926 pollhead_insert(php, pdp); 927 pdp->pd_php = php; 928 } else { 929 if (pdp->pd_php != php) { 930 pollhead_delete(pdp->pd_php, 931 pdp); 932 pollhead_insert(php, pdp); 933 pdp->pd_php = php; 934 } 935 } 936 } 937 fds_added = B_TRUE; 938 releasef(fd); 939 } else { 940 if (pdp == NULL || pdp->pd_fp == NULL) { 941 if (is_epoll) { 942 /* 943 * As with the add case (above), epoll 944 * semantics demand that we error out 945 * in this case. 946 */ 947 error = ENOENT; 948 break; 949 } 950 951 continue; 952 } 953 ASSERT(pdp->pd_fd == fd); 954 pdp->pd_fp = NULL; 955 pdp->pd_events = 0; 956 ASSERT(pdp->pd_thread == NULL); 957 if (pdp->pd_php != NULL) { 958 pollhead_delete(pdp->pd_php, pdp); 959 pdp->pd_php = NULL; 960 } 961 BT_CLEAR(pcp->pc_bitmap, fd); 962 } 963 } 964 /* 965 * Wake any pollcache waiters so they can check the new descriptors. 966 * 967 * Any fds added to an recursive-capable pollcache could themselves be 968 * /dev/poll handles. To ensure that proper event propagation occurs, 969 * parent pollcaches are woken too, so that they can create any needed 970 * pollcache links. 971 */ 972 if (fds_added) { 973 cv_broadcast(&pcp->pc_cv); 974 pcache_wake_parents(pcp); 975 } 976 pollstate_exit(pcp); 977 mutex_enter(&dpep->dpe_lock); 978 bypass: 979 dpep->dpe_flag &= ~DP_WRITER_PRESENT; 980 dpep->dpe_refcnt--; 981 cv_broadcast(&dpep->dpe_cv); 982 mutex_exit(&dpep->dpe_lock); 983 kmem_free(pollfdp, uiosize); 984 return (error); 985 } 986 987 #define DP_SIGMASK_RESTORE(ksetp) { \ 988 if (ksetp != NULL) { \ 989 mutex_enter(&p->p_lock); \ 990 if (lwp->lwp_cursig == 0) { \ 991 t->t_hold = lwp->lwp_sigoldmask; \ 992 t->t_flag &= ~T_TOMASK; \ 993 } \ 994 mutex_exit(&p->p_lock); \ 995 } \ 996 } 997 998 /*ARGSUSED*/ 999 static int 1000 dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 1001 { 1002 minor_t minor; 1003 dp_entry_t *dpep; 1004 pollcache_t *pcp; 1005 hrtime_t now; 1006 int error = 0; 1007 boolean_t is_epoll; 1008 STRUCT_DECL(dvpoll, dvpoll); 1009 1010 if (cmd == DP_POLL || cmd == DP_PPOLL) { 1011 /* do this now, before we sleep on DP_WRITER_PRESENT */ 1012 now = gethrtime(); 1013 } 1014 1015 minor = getminor(dev); 1016 mutex_enter(&devpoll_lock); 1017 ASSERT(minor < dptblsize); 1018 dpep = devpolltbl[minor]; 1019 mutex_exit(&devpoll_lock); 1020 ASSERT(dpep != NULL); 1021 pcp = dpep->dpe_pcache; 1022 1023 mutex_enter(&dpep->dpe_lock); 1024 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0; 1025 1026 if (cmd == DP_EPOLLCOMPAT) { 1027 if (dpep->dpe_refcnt != 0) { 1028 /* 1029 * We can't turn on epoll compatibility while there 1030 * are outstanding operations. 1031 */ 1032 mutex_exit(&dpep->dpe_lock); 1033 return (EBUSY); 1034 } 1035 1036 /* 1037 * epoll compatibility is a one-way street: there's no way 1038 * to turn it off for a particular open. 1039 */ 1040 dpep->dpe_flag |= DP_ISEPOLLCOMPAT; 1041 mutex_exit(&dpep->dpe_lock); 1042 1043 return (0); 1044 } 1045 1046 if (!is_epoll && curproc->p_pid != pcp->pc_pid) { 1047 if (pcp->pc_pid != -1) { 1048 mutex_exit(&dpep->dpe_lock); 1049 return (EACCES); 1050 } 1051 1052 pcp->pc_pid = curproc->p_pid; 1053 } 1054 1055 /* Wait until all writers have cleared the handle before continuing */ 1056 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0 || 1057 (dpep->dpe_writerwait != 0)) { 1058 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 1059 mutex_exit(&dpep->dpe_lock); 1060 return (EINTR); 1061 } 1062 } 1063 dpep->dpe_refcnt++; 1064 mutex_exit(&dpep->dpe_lock); 1065 1066 switch (cmd) { 1067 case DP_POLL: 1068 case DP_PPOLL: 1069 { 1070 pollstate_t *ps; 1071 nfds_t nfds; 1072 int fdcnt = 0; 1073 size_t size, fdsize, dpsize; 1074 hrtime_t deadline = 0; 1075 k_sigset_t *ksetp = NULL; 1076 k_sigset_t kset; 1077 sigset_t set; 1078 kthread_t *t = curthread; 1079 klwp_t *lwp = ttolwp(t); 1080 struct proc *p = ttoproc(curthread); 1081 1082 STRUCT_INIT(dvpoll, mode); 1083 1084 /* 1085 * The dp_setp member is only required/consumed for DP_PPOLL, 1086 * which otherwise uses the same structure as DP_POLL. 1087 */ 1088 if (cmd == DP_POLL) { 1089 dpsize = (uintptr_t)STRUCT_FADDR(dvpoll, dp_setp) - 1090 (uintptr_t)STRUCT_FADDR(dvpoll, dp_fds); 1091 } else { 1092 ASSERT(cmd == DP_PPOLL); 1093 dpsize = STRUCT_SIZE(dvpoll); 1094 } 1095 1096 if ((mode & FKIOCTL) != 0) { 1097 /* Kernel-internal ioctl call */ 1098 bcopy((caddr_t)arg, STRUCT_BUF(dvpoll), dpsize); 1099 error = 0; 1100 } else { 1101 error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 1102 dpsize); 1103 } 1104 1105 if (error) { 1106 DP_REFRELE(dpep); 1107 return (EFAULT); 1108 } 1109 1110 deadline = STRUCT_FGET(dvpoll, dp_timeout); 1111 if (deadline > 0) { 1112 /* 1113 * Convert the deadline from relative milliseconds 1114 * to absolute nanoseconds. They must wait for at 1115 * least a tick. 1116 */ 1117 deadline = MSEC2NSEC(deadline); 1118 deadline = MAX(deadline, nsec_per_tick); 1119 deadline += now; 1120 } 1121 1122 if (cmd == DP_PPOLL) { 1123 void *setp = STRUCT_FGETP(dvpoll, dp_setp); 1124 1125 if (setp != NULL) { 1126 if (copyin(setp, &set, sizeof (set))) { 1127 DP_REFRELE(dpep); 1128 return (EFAULT); 1129 } 1130 1131 sigutok(&set, &kset); 1132 ksetp = &kset; 1133 1134 mutex_enter(&p->p_lock); 1135 schedctl_finish_sigblock(t); 1136 lwp->lwp_sigoldmask = t->t_hold; 1137 t->t_hold = *ksetp; 1138 t->t_flag |= T_TOMASK; 1139 1140 /* 1141 * Like ppoll() with a non-NULL sigset, we'll 1142 * call cv_reltimedwait_sig() just to check for 1143 * signals. This call will return immediately 1144 * with either 0 (signalled) or -1 (no signal). 1145 * There are some conditions whereby we can 1146 * get 0 from cv_reltimedwait_sig() without 1147 * a true signal (e.g., a directed stop), so 1148 * we restore our signal mask in the unlikely 1149 * event that lwp_cursig is 0. 1150 */ 1151 if (!cv_reltimedwait_sig(&t->t_delay_cv, 1152 &p->p_lock, 0, TR_CLOCK_TICK)) { 1153 if (lwp->lwp_cursig == 0) { 1154 t->t_hold = lwp->lwp_sigoldmask; 1155 t->t_flag &= ~T_TOMASK; 1156 } 1157 1158 mutex_exit(&p->p_lock); 1159 1160 DP_REFRELE(dpep); 1161 return (EINTR); 1162 } 1163 1164 mutex_exit(&p->p_lock); 1165 } 1166 } 1167 1168 if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 1169 /* 1170 * We are just using DP_POLL to sleep, so 1171 * we don't any of the devpoll apparatus. 1172 * Do not check for signals if we have a zero timeout. 1173 */ 1174 DP_REFRELE(dpep); 1175 if (deadline == 0) { 1176 DP_SIGMASK_RESTORE(ksetp); 1177 return (0); 1178 } 1179 1180 mutex_enter(&curthread->t_delay_lock); 1181 while ((error = 1182 cv_timedwait_sig_hrtime(&curthread->t_delay_cv, 1183 &curthread->t_delay_lock, deadline)) > 0) 1184 continue; 1185 mutex_exit(&curthread->t_delay_lock); 1186 1187 DP_SIGMASK_RESTORE(ksetp); 1188 1189 return (error == 0 ? EINTR : 0); 1190 } 1191 1192 if (is_epoll) { 1193 size = nfds * (fdsize = sizeof (epoll_event_t)); 1194 } else { 1195 size = nfds * (fdsize = sizeof (pollfd_t)); 1196 } 1197 1198 /* 1199 * XXX It would be nice not to have to alloc each time, but it 1200 * requires another per thread structure hook. This can be 1201 * implemented later if data suggests that it's necessary. 1202 */ 1203 ps = pollstate_create(); 1204 1205 if (ps->ps_dpbufsize < size) { 1206 /* 1207 * If nfds is larger than twice the current maximum 1208 * open file count, we'll silently clamp it. This 1209 * only limits our exposure to allocating an 1210 * inordinate amount of kernel memory; it doesn't 1211 * otherwise affect the semantics. (We have this 1212 * check at twice the maximum instead of merely the 1213 * maximum because some applications pass an nfds that 1214 * is only slightly larger than their limit.) 1215 */ 1216 mutex_enter(&p->p_lock); 1217 if ((nfds >> 1) > p->p_fno_ctl) { 1218 nfds = p->p_fno_ctl; 1219 size = nfds * fdsize; 1220 } 1221 mutex_exit(&p->p_lock); 1222 1223 if (ps->ps_dpbufsize < size) { 1224 kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize); 1225 ps->ps_dpbuf = kmem_zalloc(size, KM_SLEEP); 1226 ps->ps_dpbufsize = size; 1227 } 1228 } 1229 1230 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS); 1231 for (;;) { 1232 pcp->pc_flag &= ~PC_POLLWAKE; 1233 1234 /* 1235 * Mark all child pcachelinks as stale. 1236 * Those which are still part of the tree will be 1237 * marked as valid during the poll. 1238 */ 1239 pcachelink_mark_stale(pcp); 1240 1241 error = dp_pcache_poll(dpep, ps->ps_dpbuf, 1242 pcp, nfds, &fdcnt); 1243 if (fdcnt > 0 || error != 0) 1244 break; 1245 1246 /* Purge still-stale child pcachelinks */ 1247 pcachelink_purge_stale(pcp); 1248 1249 /* 1250 * A pollwake has happened since we polled cache. 1251 */ 1252 if (pcp->pc_flag & PC_POLLWAKE) 1253 continue; 1254 1255 /* 1256 * Sleep until we are notified, signaled, or timed out. 1257 */ 1258 if (deadline == 0) { 1259 /* immediate timeout; do not check signals */ 1260 break; 1261 } 1262 1263 error = cv_timedwait_sig_hrtime(&pcp->pc_cv, 1264 &pcp->pc_lock, deadline); 1265 1266 /* 1267 * If we were awakened by a signal or timeout then 1268 * break the loop, else poll again. 1269 */ 1270 if (error <= 0) { 1271 error = (error == 0) ? EINTR : 0; 1272 break; 1273 } else { 1274 error = 0; 1275 } 1276 } 1277 pollstate_exit(pcp); 1278 1279 DP_SIGMASK_RESTORE(ksetp); 1280 1281 if (error == 0 && fdcnt > 0) { 1282 if (copyout(ps->ps_dpbuf, 1283 STRUCT_FGETP(dvpoll, dp_fds), fdcnt * fdsize)) { 1284 DP_REFRELE(dpep); 1285 return (EFAULT); 1286 } 1287 *rvalp = fdcnt; 1288 } 1289 break; 1290 } 1291 1292 case DP_ISPOLLED: 1293 { 1294 pollfd_t pollfd; 1295 polldat_t *pdp; 1296 1297 STRUCT_INIT(dvpoll, mode); 1298 error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 1299 if (error) { 1300 DP_REFRELE(dpep); 1301 return (EFAULT); 1302 } 1303 mutex_enter(&pcp->pc_lock); 1304 if (pcp->pc_hash == NULL) { 1305 /* 1306 * No Need to search because no poll fd 1307 * has been cached. 1308 */ 1309 mutex_exit(&pcp->pc_lock); 1310 DP_REFRELE(dpep); 1311 return (0); 1312 } 1313 if (pollfd.fd < 0) { 1314 mutex_exit(&pcp->pc_lock); 1315 break; 1316 } 1317 pdp = pcache_lookup_fd(pcp, pollfd.fd); 1318 if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 1319 (pdp->pd_fp != NULL)) { 1320 pollfd.revents = pdp->pd_events; 1321 if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 1322 mutex_exit(&pcp->pc_lock); 1323 DP_REFRELE(dpep); 1324 return (EFAULT); 1325 } 1326 *rvalp = 1; 1327 } 1328 mutex_exit(&pcp->pc_lock); 1329 break; 1330 } 1331 1332 default: 1333 DP_REFRELE(dpep); 1334 return (EINVAL); 1335 } 1336 DP_REFRELE(dpep); 1337 return (error); 1338 } 1339 1340 /* 1341 * Overview of Recursive Polling 1342 * 1343 * It is possible for /dev/poll to poll for events on file descriptors which 1344 * themselves are /dev/poll handles. Pending events in the child handle are 1345 * represented as readable data via the POLLIN flag. To limit surface area, 1346 * this recursion is presently allowed on only /dev/poll handles which have 1347 * been placed in epoll mode via the DP_EPOLLCOMPAT ioctl. Recursion depth is 1348 * limited to 5 in order to be consistent with Linux epoll. 1349 * 1350 * Extending dppoll() for VOP_POLL: 1351 * 1352 * The recursive /dev/poll implementation begins by extending dppoll() to 1353 * report when resources contained in the pollcache have relevant event state. 1354 * At the highest level, it means calling dp_pcache_poll() so it indicates if 1355 * fd events are present without consuming them or altering the pollcache 1356 * bitmap. This ensures that a subsequent DP_POLL operation on the bitmap will 1357 * yield the initiating event. Additionally, the VOP_POLL should return in 1358 * such a way that dp_pcache_poll() does not clear the parent bitmap entry 1359 * which corresponds to the child /dev/poll fd. This means that child 1360 * pollcaches will be checked during every poll which facilitates wake-up 1361 * behavior detailed below. 1362 * 1363 * Pollcache Links and Wake Events: 1364 * 1365 * Recursive /dev/poll avoids complicated pollcache locking constraints during 1366 * pollwakeup events by eschewing the traditional pollhead mechanism in favor 1367 * of a different approach. For each pollcache at the root of a recursive 1368 * /dev/poll "tree", pcachelink_t structures are established to all child 1369 * /dev/poll pollcaches. During pollnotify() in a child pollcache, the 1370 * linked list of pcachelink_t entries is walked, where those marked as valid 1371 * incur a cv_broadcast to their parent pollcache. Most notably, these 1372 * pcachelink_t cv wakeups are performed without acquiring pc_lock on the 1373 * parent pollcache (which would require careful deadlock avoidance). This 1374 * still allows the woken poll on the parent to discover the pertinent events 1375 * due to the fact that bitmap entires for the child pollcache are always 1376 * maintained by the dppoll() logic above. 1377 * 1378 * Depth Limiting and Loop Prevention: 1379 * 1380 * As each pollcache is encountered (either via DP_POLL or dppoll()), depth and 1381 * loop constraints are enforced via pollstate_enter(). The pollcache_t 1382 * pointer is compared against any existing entries in ps_pc_stack and is added 1383 * to the end if no match (and therefore loop) is found. Once poll operations 1384 * for a given pollcache_t are complete, pollstate_exit() clears the pointer 1385 * from the list. The pollstate_enter() and pollstate_exit() functions are 1386 * responsible for acquiring and releasing pc_lock, respectively. 1387 * 1388 * Deadlock Safety: 1389 * 1390 * Descending through a tree of recursive /dev/poll handles involves the tricky 1391 * business of sequentially entering multiple pollcache locks. This tree 1392 * topology cannot define a lock acquisition order in such a way that it is 1393 * immune to deadlocks between threads. The pollstate_enter() and 1394 * pollstate_exit() functions provide an interface for recursive /dev/poll 1395 * operations to safely lock pollcaches while failing gracefully in the face of 1396 * deadlocking topologies. (See pollstate_contend() for more detail about how 1397 * deadlocks are detected and resolved.) 1398 */ 1399 1400 /*ARGSUSED*/ 1401 static int 1402 dppoll(dev_t dev, short events, int anyyet, short *reventsp, 1403 struct pollhead **phpp) 1404 { 1405 minor_t minor; 1406 dp_entry_t *dpep; 1407 pollcache_t *pcp; 1408 int res, rc = 0; 1409 1410 minor = getminor(dev); 1411 mutex_enter(&devpoll_lock); 1412 ASSERT(minor < dptblsize); 1413 dpep = devpolltbl[minor]; 1414 ASSERT(dpep != NULL); 1415 mutex_exit(&devpoll_lock); 1416 1417 mutex_enter(&dpep->dpe_lock); 1418 if ((dpep->dpe_flag & DP_ISEPOLLCOMPAT) == 0) { 1419 /* Poll recursion is not yet supported for non-epoll handles */ 1420 *reventsp = POLLERR; 1421 mutex_exit(&dpep->dpe_lock); 1422 return (0); 1423 } else { 1424 dpep->dpe_refcnt++; 1425 pcp = dpep->dpe_pcache; 1426 mutex_exit(&dpep->dpe_lock); 1427 } 1428 1429 res = pollstate_enter(pcp); 1430 if (res == PSE_SUCCESS) { 1431 nfds_t nfds = 1; 1432 int fdcnt = 0; 1433 pollstate_t *ps = curthread->t_pollstate; 1434 1435 rc = dp_pcache_poll(dpep, NULL, pcp, nfds, &fdcnt); 1436 if (rc == 0) { 1437 *reventsp = (fdcnt > 0) ? POLLIN : 0; 1438 } 1439 pcachelink_assoc(pcp, ps->ps_pc_stack[0]); 1440 pollstate_exit(pcp); 1441 } else { 1442 switch (res) { 1443 case PSE_FAIL_DEPTH: 1444 rc = EINVAL; 1445 break; 1446 case PSE_FAIL_LOOP: 1447 case PSE_FAIL_DEADLOCK: 1448 rc = ELOOP; 1449 break; 1450 default: 1451 /* 1452 * If anything else has gone awry, such as being polled 1453 * from an unexpected context, fall back to the 1454 * recursion-intolerant response. 1455 */ 1456 *reventsp = POLLERR; 1457 rc = 0; 1458 break; 1459 } 1460 } 1461 1462 DP_REFRELE(dpep); 1463 return (rc); 1464 } 1465 1466 /* 1467 * devpoll close should do enough clean up before the pollcache is deleted, 1468 * i.e., it should ensure no one still references the pollcache later. 1469 * There is no "permission" check in here. Any process having the last 1470 * reference of this /dev/poll fd can close. 1471 */ 1472 /*ARGSUSED*/ 1473 static int 1474 dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 1475 { 1476 minor_t minor; 1477 dp_entry_t *dpep; 1478 pollcache_t *pcp; 1479 int i; 1480 polldat_t **hashtbl; 1481 polldat_t *pdp; 1482 1483 minor = getminor(dev); 1484 1485 mutex_enter(&devpoll_lock); 1486 dpep = devpolltbl[minor]; 1487 ASSERT(dpep != NULL); 1488 devpolltbl[minor] = NULL; 1489 mutex_exit(&devpoll_lock); 1490 pcp = dpep->dpe_pcache; 1491 ASSERT(pcp != NULL); 1492 /* 1493 * At this point, no other lwp can access this pollcache via the 1494 * /dev/poll fd. This pollcache is going away, so do the clean 1495 * up without the pc_lock. 1496 */ 1497 hashtbl = pcp->pc_hash; 1498 for (i = 0; i < pcp->pc_hashsize; i++) { 1499 for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 1500 if (pdp->pd_php != NULL) { 1501 pollhead_delete(pdp->pd_php, pdp); 1502 pdp->pd_php = NULL; 1503 pdp->pd_fp = NULL; 1504 } 1505 } 1506 } 1507 /* 1508 * pollwakeup() may still interact with this pollcache. Wait until 1509 * it is done. 1510 */ 1511 mutex_enter(&pcp->pc_no_exit); 1512 ASSERT(pcp->pc_busy >= 0); 1513 while (pcp->pc_busy > 0) 1514 cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 1515 mutex_exit(&pcp->pc_no_exit); 1516 1517 /* Clean up any pollcache links created via recursive /dev/poll */ 1518 if (pcp->pc_parents != NULL || pcp->pc_children != NULL) { 1519 /* 1520 * Because of the locking rules for pcachelink manipulation, 1521 * acquring pc_lock is required for this step. 1522 */ 1523 mutex_enter(&pcp->pc_lock); 1524 pcachelink_purge_all(pcp); 1525 mutex_exit(&pcp->pc_lock); 1526 } 1527 1528 pcache_destroy(pcp); 1529 ASSERT(dpep->dpe_refcnt == 0); 1530 kmem_free(dpep, sizeof (dp_entry_t)); 1531 return (0); 1532 } 1533 1534 static void 1535 pcachelink_locked_rele(pcachelink_t *pl) 1536 { 1537 ASSERT(MUTEX_HELD(&pl->pcl_lock)); 1538 VERIFY(pl->pcl_refcnt >= 1); 1539 1540 pl->pcl_refcnt--; 1541 if (pl->pcl_refcnt == 0) { 1542 VERIFY(pl->pcl_state == PCL_INVALID); 1543 ASSERT(pl->pcl_parent_pc == NULL); 1544 ASSERT(pl->pcl_child_pc == NULL); 1545 ASSERT(pl->pcl_parent_next == NULL); 1546 ASSERT(pl->pcl_child_next == NULL); 1547 1548 pl->pcl_state = PCL_FREE; 1549 mutex_destroy(&pl->pcl_lock); 1550 kmem_free(pl, sizeof (pcachelink_t)); 1551 } else { 1552 mutex_exit(&pl->pcl_lock); 1553 } 1554 } 1555 1556 /* 1557 * Associate parent and child pollcaches via a pcachelink_t. If an existing 1558 * link (stale or valid) between the two is found, it will be reused. If a 1559 * suitable link is not found for reuse, a new one will be allocated. 1560 */ 1561 static void 1562 pcachelink_assoc(pollcache_t *child, pollcache_t *parent) 1563 { 1564 pcachelink_t *pl, **plpn; 1565 1566 ASSERT(MUTEX_HELD(&child->pc_lock)); 1567 ASSERT(MUTEX_HELD(&parent->pc_lock)); 1568 1569 /* Search for an existing link we can reuse. */ 1570 plpn = &child->pc_parents; 1571 for (pl = child->pc_parents; pl != NULL; pl = *plpn) { 1572 mutex_enter(&pl->pcl_lock); 1573 if (pl->pcl_state == PCL_INVALID) { 1574 /* Clean any invalid links while walking the list */ 1575 *plpn = pl->pcl_parent_next; 1576 pl->pcl_child_pc = NULL; 1577 pl->pcl_parent_next = NULL; 1578 pcachelink_locked_rele(pl); 1579 } else if (pl->pcl_parent_pc == parent) { 1580 /* Successfully found parent link */ 1581 ASSERT(pl->pcl_state == PCL_VALID || 1582 pl->pcl_state == PCL_STALE); 1583 pl->pcl_state = PCL_VALID; 1584 mutex_exit(&pl->pcl_lock); 1585 return; 1586 } else { 1587 plpn = &pl->pcl_parent_next; 1588 mutex_exit(&pl->pcl_lock); 1589 } 1590 } 1591 1592 /* No existing link to the parent was found. Create a fresh one. */ 1593 pl = kmem_zalloc(sizeof (pcachelink_t), KM_SLEEP); 1594 mutex_init(&pl->pcl_lock, NULL, MUTEX_DEFAULT, NULL); 1595 1596 pl->pcl_parent_pc = parent; 1597 pl->pcl_child_next = parent->pc_children; 1598 parent->pc_children = pl; 1599 pl->pcl_refcnt++; 1600 1601 pl->pcl_child_pc = child; 1602 pl->pcl_parent_next = child->pc_parents; 1603 child->pc_parents = pl; 1604 pl->pcl_refcnt++; 1605 1606 pl->pcl_state = PCL_VALID; 1607 } 1608 1609 /* 1610 * Mark all child links in a pollcache as stale. Any invalid child links found 1611 * during iteration are purged. 1612 */ 1613 static void 1614 pcachelink_mark_stale(pollcache_t *pcp) 1615 { 1616 pcachelink_t *pl, **plpn; 1617 1618 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1619 1620 plpn = &pcp->pc_children; 1621 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1622 mutex_enter(&pl->pcl_lock); 1623 if (pl->pcl_state == PCL_INVALID) { 1624 /* 1625 * Remove any invalid links while we are going to the 1626 * trouble of walking the list. 1627 */ 1628 *plpn = pl->pcl_child_next; 1629 pl->pcl_parent_pc = NULL; 1630 pl->pcl_child_next = NULL; 1631 pcachelink_locked_rele(pl); 1632 } else { 1633 pl->pcl_state = PCL_STALE; 1634 plpn = &pl->pcl_child_next; 1635 mutex_exit(&pl->pcl_lock); 1636 } 1637 } 1638 } 1639 1640 /* 1641 * Purge all stale (or invalid) child links from a pollcache. 1642 */ 1643 static void 1644 pcachelink_purge_stale(pollcache_t *pcp) 1645 { 1646 pcachelink_t *pl, **plpn; 1647 1648 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1649 1650 plpn = &pcp->pc_children; 1651 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1652 mutex_enter(&pl->pcl_lock); 1653 switch (pl->pcl_state) { 1654 case PCL_STALE: 1655 pl->pcl_state = PCL_INVALID; 1656 /* FALLTHROUGH */ 1657 case PCL_INVALID: 1658 *plpn = pl->pcl_child_next; 1659 pl->pcl_parent_pc = NULL; 1660 pl->pcl_child_next = NULL; 1661 pcachelink_locked_rele(pl); 1662 break; 1663 default: 1664 plpn = &pl->pcl_child_next; 1665 mutex_exit(&pl->pcl_lock); 1666 } 1667 } 1668 } 1669 1670 /* 1671 * Purge all child and parent links from a pollcache, regardless of status. 1672 */ 1673 static void 1674 pcachelink_purge_all(pollcache_t *pcp) 1675 { 1676 pcachelink_t *pl, **plpn; 1677 1678 ASSERT(MUTEX_HELD(&pcp->pc_lock)); 1679 1680 plpn = &pcp->pc_parents; 1681 for (pl = pcp->pc_parents; pl != NULL; pl = *plpn) { 1682 mutex_enter(&pl->pcl_lock); 1683 pl->pcl_state = PCL_INVALID; 1684 *plpn = pl->pcl_parent_next; 1685 pl->pcl_child_pc = NULL; 1686 pl->pcl_parent_next = NULL; 1687 pcachelink_locked_rele(pl); 1688 } 1689 1690 plpn = &pcp->pc_children; 1691 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) { 1692 mutex_enter(&pl->pcl_lock); 1693 pl->pcl_state = PCL_INVALID; 1694 *plpn = pl->pcl_child_next; 1695 pl->pcl_parent_pc = NULL; 1696 pl->pcl_child_next = NULL; 1697 pcachelink_locked_rele(pl); 1698 } 1699 1700 ASSERT(pcp->pc_parents == NULL); 1701 ASSERT(pcp->pc_children == NULL); 1702 } 1703