diff --git a/external/cddl/osnet/dev/dtrace/dtrace_modevent.c b/external/cddl/osnet/dev/dtrace/dtrace_modevent.c index cc0f8103fb98..f1338840125a 100644 --- a/external/cddl/osnet/dev/dtrace/dtrace_modevent.c +++ b/external/cddl/osnet/dev/dtrace/dtrace_modevent.c @@ -42,9 +42,7 @@ dtrace_modcmd(modcmd_t cmd, void *data) return error; case MODULE_CMD_FINI: - error = devsw_detach(NULL, &dtrace_cdevsw); - if (error != 0) - return error; + devsw_detach(NULL, &dtrace_cdevsw); error = dtrace_unload(); if (error != 0) { diff --git a/external/cddl/osnet/dev/fbt/fbt.c b/external/cddl/osnet/dev/fbt/fbt.c index b367c2155292..46dd7c1f7f06 100644 --- a/external/cddl/osnet/dev/fbt/fbt.c +++ b/external/cddl/osnet/dev/fbt/fbt.c @@ -1329,7 +1329,8 @@ dtrace_fbt_modcmd(modcmd_t cmd, void *data) error = fbt_unload(); if (error != 0) return error; - return devsw_detach(NULL, &fbt_cdevsw); + devsw_detach(NULL, &fbt_cdevsw); + return 0; case MODULE_CMD_AUTOUNLOAD: return EBUSY; default: diff --git a/external/cddl/osnet/dev/sdt/sdt.c b/external/cddl/osnet/dev/sdt/sdt.c index c3ad129f8284..5a41270a2917 100644 --- a/external/cddl/osnet/dev/sdt/sdt.c +++ b/external/cddl/osnet/dev/sdt/sdt.c @@ -562,7 +562,8 @@ dtrace_sdt_modcmd(modcmd_t cmd, void *data) error = sdt_unload(); if (error != 0) return error; - return devsw_detach(NULL, &sdt_cdevsw); + devsw_detach(NULL, &sdt_cdevsw); + return 0; case MODULE_CMD_AUTOUNLOAD: return EBUSY; default: diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c index 9e19cd1dc0c3..d74d8c71e54d 100644 --- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c @@ -7231,7 +7231,7 @@ zfs_modcmd(modcmd_t cmd, void *arg) if (error) return error; - (void) devsw_detach(&zfs_bdevsw, &zfs_cdevsw); + devsw_detach(&zfs_bdevsw, &zfs_cdevsw); attacherr: zfs_sysctl_fini(); diff --git a/share/man/man9/devsw_attach.9 b/share/man/man9/devsw_attach.9 index cf862be5846a..6ffc51957a3f 100644 --- a/share/man/man9/devsw_attach.9 +++ b/share/man/man9/devsw_attach.9 @@ -27,7 +27,7 @@ .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" -.Dd April 30, 2017 +.Dd January 11, 2022 .Dt DEVSW 9 .Os .Sh NAME @@ -49,7 +49,7 @@ .Fa "const struct cdevsw *cdev" .Fa "devmajor_t *cmajor" .Fc -.Ft int +.Ft void .Fo devsw_detach .Fa "const struct bdevsw *bdev" .Fa "const struct cdevsw *cdev" @@ -130,6 +130,11 @@ and structures. .Fn devsw_detach should be called before a loaded device driver is unloaded. +The caller must ensure that there are no open instances of the device, +and that the device's +.Fn d_open +function will fail, before calling. +Fn devsw_detach . .Pp The .Fn bdevsw_lookup @@ -155,10 +160,8 @@ or .Sh RETURN VALUES Upon successful completion, .Fn devsw_attach -and -.Fn devsw_detach -return 0. -Otherwise they return an error value. +returns 0. +Otherwise it returns an error value. .Pp In case of failure, .Fn bdevsw_lookup diff --git a/sys/coda/coda_psdev.c b/sys/coda/coda_psdev.c index cede16da3f53..7f531f03fe56 100644 --- a/sys/coda/coda_psdev.c +++ b/sys/coda/coda_psdev.c @@ -758,7 +758,7 @@ vcoda_modcmd(modcmd_t cmd, void *arg) if (VC_OPEN(vcp)) return EBUSY; } - return devsw_detach(NULL, &vcoda_cdevsw); + devsw_detach(NULL, &vcoda_cdevsw); } #endif break; diff --git a/sys/dev/ata/wd.c b/sys/dev/ata/wd.c index b5d7476d1e6c..672684a3b4aa 100644 --- a/sys/dev/ata/wd.c +++ b/sys/dev/ata/wd.c @@ -152,6 +152,8 @@ const struct bdevsw wd_bdevsw = { .d_dump = wddump, .d_psize = wdsize, .d_discard = wddiscard, + .d_cfdriver = &wd_cd, + .d_devtounit = disklabel_dev_unit, .d_flag = D_DISK }; @@ -167,6 +169,8 @@ const struct cdevsw wd_cdevsw = { .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_discard = wddiscard, + .d_cfdriver = &wd_cd, + .d_devtounit = disklabel_dev_unit, .d_flag = D_DISK }; diff --git a/sys/dev/audio/audio.c b/sys/dev/audio/audio.c index d22ea989a180..176749ac449f 100644 --- a/sys/dev/audio/audio.c +++ b/sys/dev/audio/audio.c @@ -521,7 +521,6 @@ static int audio_exlock_mutex_enter(struct audio_softc *); static void audio_exlock_mutex_exit(struct audio_softc *); static int audio_exlock_enter(struct audio_softc *); static void audio_exlock_exit(struct audio_softc *); -static void audio_sc_acquire_foropen(struct audio_softc *, struct psref *); static struct audio_softc *audio_sc_acquire_fromfile(audio_file_t *, struct psref *); static void audio_sc_release(struct audio_softc *, struct psref *); @@ -732,6 +731,8 @@ const struct cdevsw audio_cdevsw = { .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_discard = nodiscard, + .d_cfdriver = &audio_cd, + .d_devtounit = dev_minor_unit, .d_flag = D_OTHER | D_MPSAFE }; @@ -1543,31 +1544,6 @@ audio_exlock_exit(struct audio_softc *sc) audio_exlock_mutex_exit(sc); } -/* - * Increment reference counter for this sc. - * This is intended to be used for open. - */ -void -audio_sc_acquire_foropen(struct audio_softc *sc, struct psref *refp) -{ - int s; - - /* Block audiodetach while we acquire a reference */ - s = pserialize_read_enter(); - - /* - * We don't examine sc_dying here. However, all open methods - * call audio_exlock_enter() right after this, so we can examine - * sc_dying in it. - */ - - /* Acquire a reference */ - psref_acquire(refp, &sc->sc_psref, audio_psref_class); - - /* Now sc won't go away until we drop the reference count */ - pserialize_read_exit(s); -} - /* * Get sc from file, and increment reference counter for this sc. * This is intended to be used for methods other than open. @@ -1680,21 +1656,20 @@ static int audioopen(dev_t dev, int flags, int ifmt, struct lwp *l) { struct audio_softc *sc; - struct psref sc_ref; - int bound; int error; - /* Find the device */ + /* + * Find the device. Because we wired the cdevsw to the audio + * autoconf instance, the system ensures it will not go away + * until after we return. + */ sc = device_lookup_private(&audio_cd, AUDIOUNIT(dev)); if (sc == NULL || sc->hw_if == NULL) return ENXIO; - bound = curlwp_bind(); - audio_sc_acquire_foropen(sc, &sc_ref); - error = audio_exlock_enter(sc); if (error) - goto done; + return error; device_active(sc->sc_dev, DVA_SYSTEM); switch (AUDIODEV(dev)) { @@ -1714,9 +1689,6 @@ audioopen(dev_t dev, int flags, int ifmt, struct lwp *l) } audio_exlock_exit(sc); -done: - audio_sc_release(sc, &sc_ref); - curlwp_bindx(bound); return error; } @@ -2098,30 +2070,41 @@ done: int audiobellopen(dev_t dev, audio_file_t **filep) { + device_t audiodev = NULL; struct audio_softc *sc; - struct psref sc_ref; - int bound; + bool exlock = false; int error; - /* Find the device */ - sc = device_lookup_private(&audio_cd, AUDIOUNIT(dev)); - if (sc == NULL || sc->hw_if == NULL) - return ENXIO; + /* + * Find the autoconf instance and make sure it doesn't go away + * while we are opening it. + */ + audiodev = device_lookup_acquire(&audio_cd, AUDIOUNIT(dev)); + if (audiodev == NULL) { + error = ENXIO; + goto out; + } - bound = curlwp_bind(); - audio_sc_acquire_foropen(sc, &sc_ref); + /* If attach failed, it's hopeless -- give up. */ + sc = device_private(audiodev); + if (sc->hw_if == NULL) { + error = ENXIO; + goto out; + } + /* Take the exclusive configuration lock. */ error = audio_exlock_enter(sc); if (error) - goto done; + goto out; + /* Open the audio device. */ device_active(sc->sc_dev, DVA_SYSTEM); error = audio_open(dev, sc, FWRITE, 0, curlwp, filep); - audio_exlock_exit(sc); -done: - audio_sc_release(sc, &sc_ref); - curlwp_bindx(bound); +out: if (exlock) + audio_exlock_exit(sc); + if (audiodev) + device_release(audiodev); return error; } diff --git a/sys/dev/ccd.c b/sys/dev/ccd.c index 05945f9a67ba..2283bc0346da 100644 --- a/sys/dev/ccd.c +++ b/sys/dev/ccd.c @@ -1710,7 +1710,7 @@ ccd_modcmd(modcmd_t cmd, void *arg) error = EBUSY; } else { mutex_exit(&ccd_lock); - error = devsw_detach(&ccd_bdevsw, &ccd_cdevsw); + devsw_detach(&ccd_bdevsw, &ccd_cdevsw); ccddetach(); } #endif diff --git a/sys/dev/clockctl.c b/sys/dev/clockctl.c index 0da5e7765fe8..9685c0f129f6 100644 --- a/sys/dev/clockctl.c +++ b/sys/dev/clockctl.c @@ -182,14 +182,12 @@ clockctl_modcmd(modcmd_t cmd, void *data) return EBUSY; } #ifdef _MODULE - error = devsw_detach(NULL, &clockctl_cdevsw); + devsw_detach(NULL, &clockctl_cdevsw); #endif mutex_exit(&clockctl_mtx); - if (error == 0) { - kauth_unlisten_scope(clockctl_listener); - mutex_destroy(&clockctl_mtx); - } + kauth_unlisten_scope(clockctl_listener); + mutex_destroy(&clockctl_mtx); break; default: diff --git a/sys/dev/hdaudio/hdaudio.c b/sys/dev/hdaudio/hdaudio.c index d39ff2db6cde..5c7874778e22 100644 --- a/sys/dev/hdaudio/hdaudio.c +++ b/sys/dev/hdaudio/hdaudio.c @@ -1636,11 +1636,7 @@ hdaudio_modcmd(modcmd_t cmd, void *opaque) error = config_cfdriver_detach(&hdaudio_cd); if (error) break; - error = devsw_detach(NULL, &hdaudio_cdevsw); - if (error) { - config_cfdriver_attach(&hdaudio_cd); - break; - } + devsw_detach(NULL, &hdaudio_cdevsw); #endif break; default: diff --git a/sys/dev/i2c/i2c.c b/sys/dev/i2c/i2c.c index 322f6ad3f199..b56b3e235112 100644 --- a/sys/dev/i2c/i2c.c +++ b/sys/dev/i2c/i2c.c @@ -904,7 +904,7 @@ iic_modcmd(modcmd_t cmd, void *opaque) if (error) { aprint_error("%s: unable to init component\n", iic_cd.cd_name); - (void)devsw_detach(NULL, &iic_cdevsw); + devsw_detach(NULL, &iic_cdevsw); } mutex_exit(&iic_mtx); #endif @@ -922,10 +922,7 @@ iic_modcmd(modcmd_t cmd, void *opaque) mutex_exit(&iic_mtx); break; } - error = devsw_detach(NULL, &iic_cdevsw); - if (error != 0) - config_init_component(cfdriver_ioconf_iic, - cfattach_ioconf_iic, cfdata_ioconf_iic); + devsw_detach(NULL, &iic_cdevsw); #endif mutex_exit(&iic_mtx); break; diff --git a/sys/dev/pad/pad.c b/sys/dev/pad/pad.c index fe0b429cf386..a779f1f71b8d 100644 --- a/sys/dev/pad/pad.c +++ b/sys/dev/pad/pad.c @@ -777,9 +777,7 @@ pad_modcmd(modcmd_t cmd, void *arg) case MODULE_CMD_FINI: #ifdef _MODULE - error = devsw_detach(NULL, &pad_cdevsw); - if (error) - break; + devsw_detach(NULL, &pad_cdevsw); error = config_fini_component(cfdriver_ioconf_pad, pad_cfattach, cfdata_ioconf_pad); diff --git a/sys/dev/raidframe/rf_netbsdkintf.c b/sys/dev/raidframe/rf_netbsdkintf.c index d1bda3553e03..87439aa70bfb 100644 --- a/sys/dev/raidframe/rf_netbsdkintf.c +++ b/sys/dev/raidframe/rf_netbsdkintf.c @@ -4088,16 +4088,7 @@ raid_modcmd_fini(void) return error; } #endif - error = devsw_detach(&raid_bdevsw, &raid_cdevsw); - if (error != 0) { - aprint_error("%s: cannot detach devsw\n",__func__); -#ifdef _MODULE - config_cfdriver_attach(&raid_cd); -#endif - config_cfattach_attach(raid_cd.cd_name, &raid_ca); - mutex_exit(&raid_lock); - return error; - } + devsw_detach(&raid_bdevsw, &raid_cdevsw); rf_BootRaidframe(false); #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0) rf_destroy_mutex2(rf_sparet_wait_mutex); diff --git a/sys/dev/scsipi/sd.c b/sys/dev/scsipi/sd.c index 7bd0d12c070b..24b3b6c1aa9c 100644 --- a/sys/dev/scsipi/sd.c +++ b/sys/dev/scsipi/sd.c @@ -167,6 +167,8 @@ const struct bdevsw sd_bdevsw = { .d_dump = sddump, .d_psize = sdsize, .d_discard = nodiscard, + .d_cfdriver = &sd_cd, + .d_devtounit = disklabel_dev_unit, .d_flag = D_DISK | D_MPSAFE }; @@ -182,6 +184,8 @@ const struct cdevsw sd_cdevsw = { .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_discard = nodiscard, + .d_cfdriver = &sd_cd, + .d_devtounit = disklabel_dev_unit, .d_flag = D_DISK | D_MPSAFE }; diff --git a/sys/dev/sysmon/sysmon.c b/sys/dev/sysmon/sysmon.c index 46aedaad7337..fd2993f0c180 100644 --- a/sys/dev/sysmon/sysmon.c +++ b/sys/dev/sysmon/sysmon.c @@ -356,7 +356,7 @@ sysmon_fini(void) if (error == 0) { mutex_enter(&sysmon_minor_mtx); sm_is_attached = false; - error = devsw_detach(NULL, &sysmon_cdevsw); + devsw_detach(NULL, &sysmon_cdevsw); mutex_exit(&sysmon_minor_mtx); } #endif diff --git a/sys/dev/tprof/tprof.c b/sys/dev/tprof/tprof.c index b069a5b7df5d..136fd190ad14 100644 --- a/sys/dev/tprof/tprof.c +++ b/sys/dev/tprof/tprof.c @@ -768,13 +768,7 @@ tprof_modcmd(modcmd_t cmd, void *arg) case MODULE_CMD_FINI: #if defined(_MODULE) - { - int error; - error = devsw_detach(NULL, &tprof_cdevsw); - if (error) { - return error; - } - } + devsw_detach(NULL, &tprof_cdevsw); #endif /* defined(_MODULE) */ tprof_driver_fini(); return 0; diff --git a/sys/dev/usb/uhid.c b/sys/dev/usb/uhid.c index 81292fd5130e..506f23fcda09 100644 --- a/sys/dev/usb/uhid.c +++ b/sys/dev/usb/uhid.c @@ -89,7 +89,6 @@ struct uhid_softc { kmutex_t sc_lock; kcondvar_t sc_cv; - kcondvar_t sc_detach_cv; int sc_isize; int sc_osize; @@ -104,7 +103,6 @@ struct uhid_softc { volatile uint32_t sc_state; /* driver state */ #define UHID_IMMED 0x02 /* return read data immediately */ - int sc_refcnt; int sc_raw; u_char sc_open; u_char sc_dying; @@ -134,6 +132,8 @@ const struct cdevsw uhid_cdevsw = { .d_mmap = nommap, .d_kqfilter = uhidkqfilter, .d_discard = nodiscard, + .d_cfdriver = &uhid_cd, + .d_devtounit = dev_minor_unit, .d_flag = D_OTHER }; @@ -194,7 +194,6 @@ uhid_attach(device_t parent, device_t self, void *aux) mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); cv_init(&sc->sc_cv, "uhidrea"); - cv_init(&sc->sc_detach_cv, "uhiddet"); if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); @@ -233,15 +232,6 @@ uhid_detach(device_t self, int flags) /* Interrupt any pending uhidev_write. */ uhidev_stop(&sc->sc_hdev); - /* Wait for I/O operations to complete. */ - mutex_enter(&sc->sc_lock); - while (sc->sc_refcnt) { - DPRINTF(("%s: open=%d refcnt=%d\n", __func__, - sc->sc_open, sc->sc_refcnt)); - cv_wait(&sc->sc_detach_cv, &sc->sc_lock); - } - mutex_exit(&sc->sc_lock); - pmf_device_deregister(self); /* locate the major number */ @@ -251,28 +241,9 @@ uhid_detach(device_t self, int flags) mn = device_unit(self); vdevgone(maj, mn, mn, VCHR); - /* - * Wait for close to finish. - * - * XXX I assumed that vdevgone would synchronously call close, - * and not return before it has completed, but empirically the - * assertion of sc->sc_open == 0 below fires if we don't wait - * here. Someone^TM should carefully examine vdevgone to - * ascertain what it guarantees, and audit all other users of - * it accordingly. - */ - mutex_enter(&sc->sc_lock); - while (sc->sc_open) { - DPRINTF(("%s: open=%d\n", __func__, sc->sc_open)); - cv_wait(&sc->sc_detach_cv, &sc->sc_lock); - } - mutex_exit(&sc->sc_lock); - KASSERT(sc->sc_open == 0); - KASSERT(sc->sc_refcnt == 0); cv_destroy(&sc->sc_cv); - cv_destroy(&sc->sc_detach_cv); mutex_destroy(&sc->sc_lock); seldestroy(&sc->sc_rsel); @@ -389,7 +360,6 @@ fail1: selnotify(&sc->sc_rsel, POLLHUP, 0); fail0: mutex_enter(&sc->sc_lock); KASSERT(sc->sc_open == 1); sc->sc_open = 0; - cv_broadcast(&sc->sc_detach_cv); atomic_store_relaxed(&sc->sc_state, 0); mutex_exit(&sc->sc_lock); return error; @@ -432,7 +402,6 @@ uhidclose(dev_t dev, int flag, int mode, struct lwp *l) mutex_enter(&sc->sc_lock); KASSERT(sc->sc_open == 1); sc->sc_open = 0; - cv_broadcast(&sc->sc_detach_cv); atomic_store_relaxed(&sc->sc_state, 0); mutex_exit(&sc->sc_lock); @@ -454,11 +423,8 @@ uhid_enter(dev_t dev, struct uhid_softc **scp) KASSERT(sc->sc_open == 2); if (sc->sc_dying) { error = ENXIO; - } else if (sc->sc_refcnt == INT_MAX) { - error = EBUSY; } else { *scp = sc; - sc->sc_refcnt++; error = 0; } mutex_exit(&sc->sc_lock); @@ -472,9 +438,6 @@ uhid_exit(struct uhid_softc *sc) mutex_enter(&sc->sc_lock); KASSERT(sc->sc_open == 2); - KASSERT(sc->sc_refcnt > 0); - if (--sc->sc_refcnt == 0) - cv_broadcast(&sc->sc_detach_cv); mutex_exit(&sc->sc_lock); } diff --git a/sys/dist/pf/net/pf_ioctl.c b/sys/dist/pf/net/pf_ioctl.c index 94bfb70a411d..e4c13be698f8 100644 --- a/sys/dist/pf/net/pf_ioctl.c +++ b/sys/dist/pf/net/pf_ioctl.c @@ -3459,7 +3459,8 @@ pf_modcmd(modcmd_t cmd, void *opaque) } else { pfdetach(); pflogdetach(); - return devsw_detach(NULL, &pf_cdevsw); + devsw_detach(NULL, &pf_cdevsw); + return 0; } default: return ENOTTY; diff --git a/sys/external/bsd/ipf/netinet/ip_fil_netbsd.c b/sys/external/bsd/ipf/netinet/ip_fil_netbsd.c index d0c4ca95097c..bb4e0706cc9b 100644 --- a/sys/external/bsd/ipf/netinet/ip_fil_netbsd.c +++ b/sys/external/bsd/ipf/netinet/ip_fil_netbsd.c @@ -2256,7 +2256,7 @@ ipl_fini(void *opaque) { #ifdef _MODULE - (void)devsw_detach(NULL, &ipl_cdevsw); + devsw_detach(NULL, &ipl_cdevsw); #endif /* diff --git a/sys/fs/autofs/autofs_vfsops.c b/sys/fs/autofs/autofs_vfsops.c index fbd6eafe6532..1204d1f9b6d3 100644 --- a/sys/fs/autofs/autofs_vfsops.c +++ b/sys/fs/autofs/autofs_vfsops.c @@ -496,9 +496,7 @@ autofs_modcmd(modcmd_t cmd, void *arg) } mutex_exit(&autofs_softc->sc_lock); - error = devsw_detach(NULL, &autofs_cdevsw); - if (error) - break; + devsw_detach(NULL, &autofs_cdevsw); #endif error = vfs_detach(&autofs_vfsops); if (error) diff --git a/sys/kern/kern_drvctl.c b/sys/kern/kern_drvctl.c index 37f4730b2512..8a4156f8a0aa 100644 --- a/sys/kern/kern_drvctl.c +++ b/sys/kern/kern_drvctl.c @@ -665,15 +665,10 @@ drvctl_modcmd(modcmd_t cmd, void *arg) devmon_insert_vec = saved_insert_vec; saved_insert_vec = NULL; #ifdef _MODULE - error = devsw_detach(NULL, &drvctl_cdevsw); - if (error != 0) { - saved_insert_vec = devmon_insert_vec; - devmon_insert_vec = devmon_insert; - } + devsw_detach(NULL, &drvctl_cdevsw); #endif mutex_exit(&drvctl_lock); - if (error == 0) - drvctl_fini(); + drvctl_fini(); break; default: diff --git a/sys/kern/subr_autoconf.c b/sys/kern/subr_autoconf.c index 8439ae8c6a00..073ce6f52059 100644 --- a/sys/kern/subr_autoconf.c +++ b/sys/kern/subr_autoconf.c @@ -108,6 +108,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.291 2021/12/31 14:19:57 riastrad #include #include #include +#include #include @@ -1453,6 +1454,9 @@ config_devdelete(device_t dev) if (dg->dg_devs != NULL) kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs); + localcount_fini(dev->dv_localcount); + kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount)); + cv_destroy(&dvl->dvl_cv); mutex_destroy(&dvl->dvl_mtx); @@ -1556,6 +1560,7 @@ config_devalloc(const device_t parent, const cfdata_t cf, dev->dv_activity_handlers = NULL; dev->dv_private = dev_private; dev->dv_flags = ca->ca_flags; /* inherit flags from class */ + dev->dv_attaching = curlwp; myunit = config_unit_alloc(dev, cd, cf); if (myunit == -1) { @@ -1604,6 +1609,10 @@ config_devalloc(const device_t parent, const cfdata_t cf, "device-parent", device_xname(parent)); } + dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount), + KM_SLEEP); + localcount_init(dev->dv_localcount); + if (dev->dv_cfdriver->cd_attrs != NULL) config_add_attrib_dict(dev); @@ -1755,8 +1764,29 @@ config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print, /* Let userland know */ devmon_report_device(dev, true); + /* + * Prevent detach until the driver's attach function, and all + * deferred actions, have finished. + */ config_pending_incr(dev); + + /* Call the driver's attach function. */ (*dev->dv_cfattach->ca_attach)(parent, dev, aux); + + /* + * Allow other threads to acquire references to the device now + * that the driver's attach function is done. + */ + mutex_enter(&config_misc_lock); + KASSERT(dev->dv_attaching == curlwp); + dev->dv_attaching = NULL; + cv_broadcast(&config_misc_cv); + mutex_exit(&config_misc_lock); + + /* + * Synchronous parts of attach are done. Allow detach, unless + * the driver's attach function scheduled deferred actions. + */ config_pending_decr(dev); mutex_enter(&config_misc_lock); @@ -1822,8 +1852,29 @@ config_attach_pseudo(cfdata_t cf) /* Let userland know */ devmon_report_device(dev, true); + /* + * Prevent detach until the driver's attach function, and all + * deferred actions, have finished. + */ config_pending_incr(dev); + + /* Call the driver's attach function. */ (*dev->dv_cfattach->ca_attach)(ROOT, dev, NULL); + + /* + * Allow other threads to acquire references to the device now + * that the driver's attach function is done. + */ + mutex_enter(&config_misc_lock); + KASSERT(dev->dv_attaching == curlwp); + dev->dv_attaching = NULL; + cv_broadcast(&config_misc_cv); + mutex_exit(&config_misc_lock); + + /* + * Synchronous parts of attach are done. Allow detach, unless + * the driver's attach function scheduled deferred actions. + */ config_pending_decr(dev); config_process_deferred(&deferred_config_queue, dev); @@ -1872,24 +1923,49 @@ config_dump_garbage(struct devicelist *garbage) static int config_detach_enter(device_t dev) { - int error; + int error = 0; mutex_enter(&config_misc_lock); - for (;;) { - if (dev->dv_pending == 0 && dev->dv_detaching == NULL) { - dev->dv_detaching = curlwp; - error = 0; - break; - } + + /* + * Wait until attach has fully completed, and until any + * concurrent detach (e.g., drvctl racing with USB event + * thread) has completed. + * + * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via + * deviter) to ensure the winner of the race doesn't free the + * device leading the loser of the race into use-after-free. + * + * XXX Not all callers do this! + */ + while (dev->dv_pending || dev->dv_detaching) { KASSERTMSG(dev->dv_detaching != curlwp, "recursively detaching %s", device_xname(dev)); error = cv_wait_sig(&config_misc_cv, &config_misc_lock); if (error) - break; + goto out; } - KASSERT(error || dev->dv_detaching == curlwp); - mutex_exit(&config_misc_lock); + /* + * Attach has completed, and no other concurrent detach is + * running. Claim the device for detaching. This will cause + * all new attempts to acquire references to block. + */ + KASSERT(dev->dv_attaching == NULL); + KASSERT(dev->dv_detaching == NULL); + dev->dv_detaching = curlwp; + + /* + * Wait for all device_lookup_acquire references -- mostly, for + * all attempts to open the device -- to drain. There may + * still be open instances of the device after this point, but + * all new attempts to acquire references will block until + * dv_detaching clears. + */ + localcount_drain(dev->dv_localcount, + &config_misc_cv, &config_misc_lock); + +out: mutex_exit(&config_misc_lock); return error; } @@ -1980,9 +2056,18 @@ config_detach(device_t dev, int flags) */ if (rv == 0) dev->dv_flags &= ~DVF_ACTIVE; - else if ((flags & DETACH_FORCE) == 0) + else if ((flags & DETACH_FORCE) == 0) { + /* + * Detach failed -- likely EBUSY. Reset the + * localcount. At this point there can be no + * references held, and no new references acquired -- + * calls to device_lookup_acquire are held up on + * dv_detaching until config_detach_exit. + */ + localcount_fini(dev->dv_localcount); + localcount_init(dev->dv_localcount); goto out; - else { + } else { panic("config_detach: forced detach of %s failed (%d)", device_xname(dev), rv); } @@ -2498,6 +2583,14 @@ config_alldevs_exit(struct alldevs_foray *af) * device_lookup: * * Look up a device instance for a given driver. + * + * Caller is responsible for ensuring the device's state is + * stable, either by holding a reference already obtained with + * device_lookup_acquire or by otherwise ensuring the device is + * attached and can't be detached (e.g., holding an open device + * node and ensuring *_detach calls vdevgone). + * + * XXX Find a way to assert this. */ device_t device_lookup(cfdriver_t cd, int unit) @@ -2526,6 +2619,69 @@ device_lookup_private(cfdriver_t cd, int unit) return device_private(device_lookup(cd, unit)); } +/* + * device_lookup_acquire: + * + * Look up a device instance for a given driver, and return a + * reference to it that must be released by device_release. + * + * => If the device is still attaching, blocks until *_attach has + * returned. + * + * => If the device is detaching, blocks until *_detach has + * returned. May succeed or fail in that case, depending on + * whether *_detach has backed out (EBUSY) or committed to + * detaching. + */ +device_t +device_lookup_acquire(cfdriver_t cd, int unit) +{ + device_t dv; + + /* XXX This should have a pserialized fast path -- TBD. */ + mutex_enter(&config_misc_lock); + mutex_enter(&alldevs_lock); +retry: if (unit < 0 || unit >= cd->cd_ndevs || + (dv = cd->cd_devs[unit]) == NULL || + dv->dv_del_gen != 0) { + dv = NULL; + } else { + /* + * Wait for the device to stabilize, if attaching or + * detaching. Either way we must wait for *_attach or + * *_detach to complete, and either way we must retry: + * even if detaching, *_detach might fail (EBUSY) so + * the device may still be there. + */ + if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) || + dv->dv_detaching != NULL) { + mutex_exit(&alldevs_lock); + cv_wait(&config_misc_cv, &config_misc_lock); + mutex_enter(&alldevs_lock); + goto retry; + } + localcount_acquire(dv->dv_localcount); + } + mutex_exit(&alldevs_lock); + mutex_exit(&config_misc_lock); + + return dv; +} + +/* + * device_release: + * + * Release a reference to a device acquired with + * device_lookup_acquire. + */ +void +device_release(device_t dv) +{ + + localcount_release(dv->dv_localcount, + &config_misc_cv, &config_misc_lock); +} + /* * device_find_by_xname: * diff --git a/sys/kern/subr_devsw.c b/sys/kern/subr_devsw.c index 1a0f721fdd65..42bf25c4e354 100644 --- a/sys/kern/subr_devsw.c +++ b/sys/kern/subr_devsw.c @@ -85,6 +85,11 @@ __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.38 2017/11/07 18:35:57 christos Exp #include #include #include +#include +#include +#include +#include +#include #ifdef DEVSW_DEBUG #define DPRINTF(x) printf x @@ -97,12 +102,22 @@ __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.38 2017/11/07 18:35:57 christos Exp #define CDEVSW_SIZE (sizeof(struct cdevsw *)) #define DEVSWCONV_SIZE (sizeof(struct devsw_conv)) +struct devswref { + struct localcount *dr_lc; + bool dr_dynamic; +}; + +/* XXX bdevsw, cdevsw, max_bdevsws, and max_cdevsws should be volatile */ extern const struct bdevsw **bdevsw, *bdevsw0[]; extern const struct cdevsw **cdevsw, *cdevsw0[]; extern struct devsw_conv *devsw_conv, devsw_conv0[]; extern const int sys_bdevsws, sys_cdevsws; extern int max_bdevsws, max_cdevsws, max_devsw_convs; +static struct devswref *cdevswref; +static struct devswref *bdevswref; +static kcondvar_t devsw_cv; + static int bdevsw_attach(const struct bdevsw *, devmajor_t *); static int cdevsw_attach(const struct cdevsw *, devmajor_t *); static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *); @@ -118,6 +133,8 @@ devsw_init(void) KASSERT(sys_bdevsws < MAXDEVSW - 1); KASSERT(sys_cdevsws < MAXDEVSW - 1); mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE); + + cv_init(&devsw_cv, "devsw"); } int @@ -158,15 +175,12 @@ devsw_attach(const char *devname, error = EEXIST; goto fail; } - - if (bdev != NULL) - bdevsw[*bmajor] = bdev; - cdevsw[*cmajor] = cdev; - - mutex_exit(&device_lock); - return (0); } + /* + * XXX This should allocate what it needs up front so we never + * need to flail around trying to unwind. + */ error = bdevsw_attach(bdev, bmajor); if (error != 0) goto fail; @@ -176,6 +190,13 @@ devsw_attach(const char *devname, goto fail; } + /* + * If we already found a conv, we're done. Otherwise, find an + * empty slot or extend the table. + */ + if (i == max_devsw_convs) + goto fail; + for (i = 0 ; i < max_devsw_convs ; i++) { if (devsw_conv[i].d_name == NULL) break; @@ -224,7 +245,9 @@ devsw_attach(const char *devname, static int bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor) { - const struct bdevsw **newptr; + const struct bdevsw **newbdevsw = NULL; + struct devswref *newbdevswref = NULL; + struct localcount *lc; devmajor_t bmajor; int i; @@ -253,20 +276,35 @@ bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor) return (ENOMEM); } + if (bdevswref == NULL) { + newbdevswref = kmem_zalloc(MAXDEVSW * sizeof(newbdevswref[0]), + KM_NOSLEEP); + if (newbdevswref == NULL) + return ENOMEM; + atomic_store_release(&bdevswref, newbdevswref); + } + if (*devmajor >= max_bdevsws) { KASSERT(bdevsw == bdevsw0); - newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP); - if (newptr == NULL) - return (ENOMEM); - memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE); - bdevsw = newptr; - max_bdevsws = MAXDEVSW; + newbdevsw = kmem_zalloc(MAXDEVSW * sizeof(newbdevsw[0]), + KM_NOSLEEP); + if (newbdevsw == NULL) + return ENOMEM; + memcpy(newbdevsw, bdevsw, max_bdevsws * sizeof(bdevsw[0])); + atomic_store_release(&bdevsw, newbdevsw); + atomic_store_release(&max_bdevsws, MAXDEVSW); } if (bdevsw[*devmajor] != NULL) return (EEXIST); - bdevsw[*devmajor] = devsw; + KASSERT(bdevswref[*devmajor].dr_lc == NULL); + lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); + localcount_init(lc); + bdevswref[*devmajor].dr_lc = lc; + bdevswref[*devmajor].dr_dynamic = true; + + atomic_store_release(&bdevsw[*devmajor], devsw); return (0); } @@ -274,7 +312,9 @@ bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor) static int cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor) { - const struct cdevsw **newptr; + const struct cdevsw **newcdevsw = NULL; + struct devswref *newcdevswref = NULL; + struct localcount *lc; devmajor_t cmajor; int i; @@ -300,20 +340,35 @@ cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor) return (ENOMEM); } + if (cdevswref == NULL) { + newcdevswref = kmem_zalloc(MAXDEVSW * sizeof(newcdevswref[0]), + KM_NOSLEEP); + if (newcdevswref == NULL) + return ENOMEM; + atomic_store_release(&cdevswref, newcdevswref); + } + if (*devmajor >= max_cdevsws) { KASSERT(cdevsw == cdevsw0); - newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP); - if (newptr == NULL) - return (ENOMEM); - memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE); - cdevsw = newptr; - max_cdevsws = MAXDEVSW; + newcdevsw = kmem_zalloc(MAXDEVSW * sizeof(newcdevsw[0]), + KM_NOSLEEP); + if (newcdevsw == NULL) + return ENOMEM; + memcpy(newcdevsw, cdevsw, max_cdevsws * sizeof(cdevsw[0])); + atomic_store_release(&cdevsw, newcdevsw); + atomic_store_release(&max_cdevsws, MAXDEVSW); } if (cdevsw[*devmajor] != NULL) return (EEXIST); - cdevsw[*devmajor] = devsw; + KASSERT(cdevswref[*devmajor].dr_lc == NULL); + lc = kmem_zalloc(sizeof(*lc), KM_SLEEP); + localcount_init(lc); + cdevswref[*devmajor].dr_lc = lc; + cdevswref[*devmajor].dr_dynamic = true; + + atomic_store_release(&cdevsw[*devmajor], devsw); return (0); } @@ -321,36 +376,75 @@ cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor) static void devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev) { - int i; + int bi, ci; KASSERT(mutex_owned(&device_lock)); + /* Prevent new references. */ if (bdev != NULL) { - for (i = 0 ; i < max_bdevsws ; i++) { - if (bdevsw[i] != bdev) + for (bi = 0; bi < max_bdevsws; bi++) { + if (bdevsw[bi] != bdev) continue; - bdevsw[i] = NULL; + atomic_store_relaxed(&bdevsw[bi], NULL); break; } + KASSERT(bi < max_bdevsws); } if (cdev != NULL) { - for (i = 0 ; i < max_cdevsws ; i++) { - if (cdevsw[i] != cdev) + for (ci = 0; ci < max_cdevsws; ci++) { + if (cdevsw[ci] != cdev) continue; - cdevsw[i] = NULL; + atomic_store_relaxed(&cdevsw[ci], NULL); break; } + KASSERT(ci < max_cdevsws); + } + + if (bdev == NULL && cdev == NULL) /* XXX possible? */ + return; + + /* + * Wait for all bdevsw_lookup_acquire, cdevsw_lookup_acquire + * calls to notice that the devsw is gone. + * + * XXX Can't use pserialize_perform here because devsw_init is + * too early for pserialize_create(). + */ + xc_barrier(0); + + /* + * Wait for all references to drain. It is the caller's + * responsibility to ensure that at this point, there are no + * extant open instances and all new d_open calls will fail. + * + * Note that localcount_drain may release and reacquire + * device_lock. + */ + if (bdev != NULL) { + localcount_drain(bdevswref[bi].dr_lc, + &devsw_cv, &device_lock); + localcount_fini(bdevswref[bi].dr_lc); + kmem_free(bdevswref[bi].dr_lc, sizeof(*bdevswref[bi].dr_lc)); + bdevswref[bi].dr_lc = NULL; + bdevswref[bi].dr_dynamic = false; + } + if (cdev != NULL) { + localcount_drain(cdevswref[ci].dr_lc, + &devsw_cv, &device_lock); + localcount_fini(cdevswref[ci].dr_lc); + kmem_free(cdevswref[ci].dr_lc, sizeof(*cdevswref[ci].dr_lc)); + cdevswref[ci].dr_lc = NULL; + cdevswref[ci].dr_dynamic = false; } } -int +void devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev) { mutex_enter(&device_lock); devsw_detach_locked(bdev, cdev); mutex_exit(&device_lock); - return 0; } /* @@ -366,10 +460,60 @@ bdevsw_lookup(dev_t dev) if (dev == NODEV) return (NULL); bmajor = major(dev); - if (bmajor < 0 || bmajor >= max_bdevsws) + if (bmajor < 0 || bmajor >= atomic_load_relaxed(&max_bdevsws)) return (NULL); - return (bdevsw[bmajor]); + return atomic_load_consume(&bdevsw)[bmajor]; +} + +static const struct bdevsw * +bdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) +{ + devmajor_t bmajor; + const struct bdevsw *bdev = NULL, *const *curbdevsw; + struct devswref *curbdevswref; + int s; + + if (dev == NODEV) + return NULL; + bmajor = major(dev); + if (bmajor < 0) + return NULL; + + s = pserialize_read_enter(); + + /* + * max_bdevsws never goes down, so it is safe to rely on this + * condition without any locking for the array access below. + * Test sys_bdevsws first so we can avoid the memory barrier in + * that case. + */ + if (bmajor >= sys_bdevsws && + bmajor >= atomic_load_acquire(&max_bdevsws)) + goto out; + curbdevsw = atomic_load_consume(&bdevsw); + if ((bdev = atomic_load_consume(&curbdevsw[bmajor])) == NULL) + goto out; + + curbdevswref = atomic_load_consume(&bdevswref); + if (curbdevswref == NULL || !curbdevswref[bmajor].dr_dynamic) { + *lcp = NULL; + } else { + *lcp = curbdevswref[bmajor].dr_lc; + localcount_acquire(*lcp); + } + +out: pserialize_read_exit(s); + return bdev; +} + +static void +bdevsw_release(const struct bdevsw *bdev, struct localcount *lc) +{ + + if (lc == NULL) + return; + localcount_release(lc, &devsw_cv, &device_lock); } /* @@ -385,10 +529,60 @@ cdevsw_lookup(dev_t dev) if (dev == NODEV) return (NULL); cmajor = major(dev); - if (cmajor < 0 || cmajor >= max_cdevsws) + if (cmajor < 0 || cmajor >= atomic_load_relaxed(&max_cdevsws)) return (NULL); - return (cdevsw[cmajor]); + return atomic_load_consume(&cdevsw)[cmajor]; +} + +static const struct cdevsw * +cdevsw_lookup_acquire(dev_t dev, struct localcount **lcp) +{ + devmajor_t cmajor; + const struct cdevsw *cdev = NULL, *const *curcdevsw; + struct devswref *curcdevswref; + int s; + + if (dev == NODEV) + return NULL; + cmajor = major(dev); + if (cmajor < 0) + return NULL; + + s = pserialize_read_enter(); + + /* + * max_cdevsws never goes down, so it is safe to rely on this + * condition without any locking for the array access below. + * Test sys_cdevsws first so we can avoid the memory barrier in + * that case. + */ + if (cmajor >= sys_cdevsws && + cmajor >= atomic_load_acquire(&max_cdevsws)) + goto out; + curcdevsw = atomic_load_consume(&cdevsw); + if ((cdev = atomic_load_consume(&curcdevsw[cmajor])) == NULL) + goto out; + + curcdevswref = atomic_load_consume(&cdevswref); + if (curcdevswref == NULL || !curcdevswref[cmajor].dr_dynamic) { + *lcp = NULL; + } else { + *lcp = curcdevswref[cmajor].dr_lc; + localcount_acquire(*lcp); + } + +out: pserialize_read_exit(s); + return cdev; +} + +static void +cdevsw_release(const struct cdevsw *cdev, struct localcount *lc) +{ + + if (lc == NULL) + return; + localcount_release(lc, &devsw_cv, &device_lock); } /* @@ -400,10 +594,13 @@ cdevsw_lookup(dev_t dev) devmajor_t bdevsw_lookup_major(const struct bdevsw *bdev) { - devmajor_t bmajor; + const struct bdevsw *const *curbdevsw; + devmajor_t bmajor, bmax; - for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) { - if (bdevsw[bmajor] == bdev) + bmax = atomic_load_acquire(&max_bdevsws); + curbdevsw = atomic_load_consume(&bdevsw); + for (bmajor = 0; bmajor < bmax; bmajor++) { + if (atomic_load_relaxed(&curbdevsw[bmajor]) == bdev) return (bmajor); } @@ -419,10 +616,13 @@ bdevsw_lookup_major(const struct bdevsw *bdev) devmajor_t cdevsw_lookup_major(const struct cdevsw *cdev) { - devmajor_t cmajor; + const struct cdevsw *const *curcdevsw; + devmajor_t cmajor, cmax; - for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) { - if (cdevsw[cmajor] == cdev) + cmax = atomic_load_acquire(&max_cdevsws); + curcdevsw = atomic_load_consume(&cdevsw); + for (cmajor = 0; cmajor < cmax; cmajor++) { + if (atomic_load_relaxed(&curcdevsw[cmajor]) == cdev) return (cmajor); } @@ -697,22 +897,41 @@ int bdev_open(dev_t dev, int flag, int devtype, lwp_t *l) { const struct bdevsw *d; - int rv, mpflag; + struct localcount *lc; + device_t dv = NULL/*XXXGCC*/; + int unit, rv, mpflag; - /* - * For open we need to lock, in order to synchronize - * with attach/detach. - */ - mutex_enter(&device_lock); - d = bdevsw_lookup(dev); - mutex_exit(&device_lock); + d = bdevsw_lookup_acquire(dev, &lc); if (d == NULL) return ENXIO; + if (d->d_devtounit) { + /* + * If the device node corresponds to an autoconf device + * instance, acquire a reference to it so that during + * d_open, device_lookup is stable. + * + * XXX This should also arrange to instantiate cloning + * pseudo-devices if appropriate, but that requires + * reviewing them all to find and verify a common + * pattern. + */ + if ((unit = (*d->d_devtounit)(dev)) == -1) + return ENXIO; + if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) + return ENXIO; + } + DEV_LOCK(d); rv = (*d->d_open)(dev, flag, devtype, l); DEV_UNLOCK(d); + if (d->d_devtounit) { + device_release(dv); + } + + bdevsw_release(d, lc); + return rv; } @@ -855,22 +1074,41 @@ int cdev_open(dev_t dev, int flag, int devtype, lwp_t *l) { const struct cdevsw *d; - int rv, mpflag; + struct localcount *lc; + device_t dv = NULL/*XXXGCC*/; + int unit, rv, mpflag; - /* - * For open we need to lock, in order to synchronize - * with attach/detach. - */ - mutex_enter(&device_lock); - d = cdevsw_lookup(dev); - mutex_exit(&device_lock); + d = cdevsw_lookup_acquire(dev, &lc); if (d == NULL) return ENXIO; + if (d->d_devtounit) { + /* + * If the device node corresponds to an autoconf device + * instance, acquire a reference to it so that during + * d_open, device_lookup is stable. + * + * XXX This should also arrange to instantiate cloning + * pseudo-devices if appropriate, but that requires + * reviewing them all to find and verify a common + * pattern. + */ + if ((unit = (*d->d_devtounit)(dev)) == -1) + return ENXIO; + if ((dv = device_lookup_acquire(d->d_cfdriver, unit)) == NULL) + return ENXIO; + } + DEV_LOCK(d); rv = (*d->d_open)(dev, flag, devtype, l); DEV_UNLOCK(d); + if (d->d_devtounit) { + device_release(dv); + } + + cdevsw_release(d, lc); + return rv; } @@ -1063,3 +1301,18 @@ nommap(dev_t dev, off_t off, int prot) return (paddr_t)-1; } + +/* + * dev_minor_unit(dev) + * + * Returns minor(dev) as an int. Intended for use with struct + * bdevsw, cdevsw::d_devtounit for drivers whose /dev nodes are + * implemented by reference to an autoconf instance with the minor + * number. + */ +int +dev_minor_unit(dev_t dev) +{ + + return minor(dev); +} diff --git a/sys/kern/subr_disk.c b/sys/kern/subr_disk.c index da664f920382..41218421db57 100644 --- a/sys/kern/subr_disk.c +++ b/sys/kern/subr_disk.c @@ -728,3 +728,10 @@ disk_set_info(device_t dev, struct disk *dk, const char *type) if (odisk_info) prop_object_release(odisk_info); } + +int +disklabel_dev_unit(dev_t dev) +{ + + return DISKUNIT(dev); +} diff --git a/sys/miscfs/specfs/spec_vnops.c b/sys/miscfs/specfs/spec_vnops.c index b4bc4c34ab03..15217b15a4ae 100644 --- a/sys/miscfs/specfs/spec_vnops.c +++ b/sys/miscfs/specfs/spec_vnops.c @@ -81,10 +81,18 @@ __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.183 2021/07/18 23:57:14 dholland Ex #include #include #include +#include +#include #include #include +/* + * Lock order: + * + * vnode lock -> struct specdev::sd_opencloselock -> device_lock + */ + /* symbolic sleep message strings for devices */ const char devopn[] = "devopn"; const char devio[] = "devio"; @@ -250,6 +258,7 @@ spec_node_init(vnode_t *vp, dev_t rdev) sd->sd_refcnt = 1; sd->sd_opencnt = 0; sd->sd_bdevvp = NULL; + rw_init(&sd->sd_opencloselock); sn->sn_dev = sd; sd = NULL; } else { @@ -463,6 +472,7 @@ spec_node_destroy(vnode_t *vp) if (refcnt == 1) { KASSERT(sd->sd_opencnt == 0); KASSERT(sd->sd_bdevvp == NULL); + rw_destroy(&sd->sd_opencloselock); kmem_free(sd, sizeof(*sd)); } kmem_free(sn, sizeof(*sn)); @@ -517,7 +527,10 @@ spec_open(void *v) sd = sn->sn_dev; name = NULL; gen = 0; - + + KASSERTMSG(vp->v_type == VBLK || vp->v_type == VCHR, "type=%d", + vp->v_type); + /* * Don't allow open if fs is mounted -nodev. */ @@ -535,28 +548,102 @@ spec_open(void *v) req = KAUTH_REQ_DEVICE_RAWIO_SPEC_READ; break; } + error = kauth_authorize_device_spec(ap->a_cred, req, vp); + if (error != 0) + return (error); + + /* + * Prevent concurrent .d_close while we are trying to open the + * device with the vnode unlocked later on. Concurrent opens + * by other treads are OK -- it's up to the device driver to + * reject those if need be. Because concurrent .d_close is + * blocked, once we acquire an open reference, the vnode cannot + * be revoked until we allow .d_close again -- spec_node_revoke + * will wait for this via VOP_CLOSE. + */ + rw_enter(&sd->sd_opencloselock, RW_READER); + /* + * Acquire an open reference -- as long as we hold onto it, and + * the vnode isn't revoked, it can't be closed. + * + * But first check whether it has been revoked -- if so, we + * can't acquire more open references and we must fail + * immediately with EBADF. + */ + mutex_enter(&device_lock); + KASSERT(!sn->sn_gone); switch (vp->v_type) { case VCHR: - error = kauth_authorize_device_spec(ap->a_cred, req, vp); - if (error != 0) - return (error); - /* * Character devices can accept opens from multiple * vnodes. */ - mutex_enter(&device_lock); - if (sn->sn_gone) { - mutex_exit(&device_lock); - return (EBADF); - } sd->sd_opencnt++; sn->sn_opencnt++; - mutex_exit(&device_lock); + break; + case VBLK: + /* + * For block devices, permit only one open. The buffer + * cache cannot remain self-consistent with multiple + * vnodes holding a block device open. + * + * Treat zero opencnt with non-NULL mountpoint as open. + * This may happen after forced detach of a mounted device. + */ + if (sd->sd_opencnt != 0 || sd->sd_mountpoint != NULL) { + error = EBUSY; + break; + } + KASSERTMSG(sn->sn_opencnt == 0, "%u", sn->sn_opencnt); + sn->sn_opencnt = 1; + sd->sd_opencnt = 1; + sd->sd_bdevvp = vp; + break; + default: + panic("invalid specfs vnode type: %d", vp->v_type); + } + mutex_exit(&device_lock); + if (error) { + rw_exit(&sd->sd_opencloselock); + return error; + } + + /* + * Set VV_ISTTY if this is a tty cdev. + * + * XXX This does the wrong thing if the module has to be + * autoloaded. We should maybe set this after autoloading + * modules and calling .d_open successfully, except (a) we need + * the vnode lock to touch it, and (b) once we acquire the + * vnode lock again, the vnode may have been revoked, and + * deadfs's dead_read needs VV_ISTTY to be already set in order + * to return the right answer. So this needs some additional + * synchronization to be made to work correctly with tty driver + * module autoload. For now, let's just hope it doesn't cause + * too much trouble for a tty from an autoloaded driver module + * to fail with EIO instead of returning EOF. + */ + if (vp->v_type == VCHR) { if (cdev_type(dev) == D_TTY) vp->v_vflag |= VV_ISTTY; - VOP_UNLOCK(vp); + } + + /* + * Open the device. If .d_open returns ENXIO (device not + * configured), the driver may not be loaded, so try + * autoloading a module and then try .d_open again if anything + * got loaded. + * + * Because the opening the device may block indefinitely, + * e.g. when opening a tty, and loading a module may cross into + * many other subsystems, we must not hold the vnode lock while + * calling .d_open, so release it now and reacquire it when + * done. + */ + VOP_UNLOCK(vp); + switch (vp->v_type) { + case VCHR: do { const struct cdevsw *cdev; @@ -579,36 +666,9 @@ spec_open(void *v) /* Try to autoload device module */ (void) module_autoload(name, MODULE_CLASS_DRIVER); } while (gen != module_gen); - - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); break; case VBLK: - error = kauth_authorize_device_spec(ap->a_cred, req, vp); - if (error != 0) - return (error); - - /* - * For block devices, permit only one open. The buffer - * cache cannot remain self-consistent with multiple - * vnodes holding a block device open. - * - * Treat zero opencnt with non-NULL mountpoint as open. - * This may happen after forced detach of a mounted device. - */ - mutex_enter(&device_lock); - if (sn->sn_gone) { - mutex_exit(&device_lock); - return (EBADF); - } - if (sd->sd_opencnt != 0 || sd->sd_mountpoint != NULL) { - mutex_exit(&device_lock); - return EBUSY; - } - sn->sn_opencnt = 1; - sd->sd_opencnt = 1; - sd->sd_bdevvp = vp; - mutex_exit(&device_lock); do { const struct bdevsw *bdev; @@ -628,28 +688,30 @@ spec_open(void *v) if ((name = bdevsw_getname(major(dev))) == NULL) break; - VOP_UNLOCK(vp); - /* Try to autoload device module */ (void) module_autoload(name, MODULE_CLASS_DRIVER); - - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } while (gen != module_gen); - break; - case VNON: - case VLNK: - case VDIR: - case VREG: - case VBAD: - case VFIFO: - case VSOCK: default: - return 0; + __unreachable(); } + /* + * If it has been revoked since we released the vnode lock and + * reacquired it, then spec_node_revoke will close it as soon + * as we release sd->sd_opencloselock, and we must fail with + * EBADF. + * + * Otherwise, if opening it failed, back out and release the + * open reference. This can only release the last reference if + * it was not previously open at all, because during the whole + * time we held an open reference we blocked out .d_close with + * sd->sd_opencloselock, so we need not close it even if + * sd->sd_opencnt drops to zero. + */ mutex_enter(&device_lock); + KASSERT(vp->v_specnode == sn); if (sn->sn_gone) { if (error == 0) error = EBADF; @@ -662,15 +724,50 @@ spec_open(void *v) } mutex_exit(&device_lock); - if (cdev_type(dev) != D_DISK || error != 0) + /* + * Allow .d_close again and reacquire the vnode lock before we + * return. At this point, the vnode may be revoked. Once we + * lock it again, it may be dead, so we must not touch sn or sd + * after this point. + */ + rw_exit(&sd->sd_opencloselock); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + + /* If anything went wrong, we're done. */ + if (error) return error; - - ioctl = vp->v_type == VCHR ? cdev_ioctl : bdev_ioctl; - error = (*ioctl)(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, curlwp); - if (error == 0) - uvm_vnp_setsize(vp, (voff_t)pi.pi_secsize * pi.pi_size); + /* + * The vnode may have been revoked between releasing + * sd_opencloselock and reacquiring the vnode lock, while we + * dropped the vnode lock. If that has happened, the device + * has been closed, so we must not call .d_ioctl; we must + * immediately fail with EBADF. + */ + mutex_enter(vp->v_interlock); + if (vdead_check(vp, VDEAD_NOWAIT) != 0) + error = EBADF; + mutex_exit(vp->v_interlock); + if (error) + return error; + + /* + * For disk devices, automagically set the vnode size to the + * partition size, if we can. This applies to block devices + * and character devices alike -- every block device must have + * a corresponding character device. And if the module is + * loaded it will remain loaded until we're done here (it is + * forbidden to devsw_detach until closed). So it is safe to + * query cdev_type unconditionally here. + */ + if (cdev_type(dev) == D_DISK) { + ioctl = vp->v_type == VCHR ? cdev_ioctl : bdev_ioctl; + if ((*ioctl)(dev, DIOCGPARTINFO, &pi, FREAD, curlwp) == 0) + uvm_vnp_setsize(vp, + (voff_t)pi.pi_secsize * pi.pi_size); + } + /* Success! */ return 0; } @@ -870,17 +967,7 @@ spec_fdiscard(void *v) dev_t dev; vp = ap->a_vp; - dev = NODEV; - - mutex_enter(vp->v_interlock); - if (vdead_check(vp, VDEAD_NOWAIT) == 0 && vp->v_specnode != NULL) { - dev = vp->v_rdev; - } - mutex_exit(vp->v_interlock); - - if (dev == NODEV) { - return ENXIO; - } + dev = vp->v_rdev; switch (vp->v_type) { case VCHR: @@ -1269,15 +1356,39 @@ spec_close(void *v) panic("spec_close: not special"); } + /* + * Before we examine the open reference count and, if + * appropriate, call .d_close, wait for any concurrent .d_open + * calls to complete, and block new ones. That way, drivers + * need not worry about .d_open and .d_close running at the + * same time. + * + * This also avoids a race where .d_open fails in spec_open but + * spec_close concurrently thinks the device is still open + * before spec_open has had a chance to drop sd_opencnt back to + * 1, so nobody calls .d_close in the end. + */ + rw_enter(&sd->sd_opencloselock, RW_WRITER); + + /* + * Decrement the open reference count of this node and the + * device. For block devices, the open reference count must be + * 1 at this point. If the device's open reference count goes + * to zero, we're the last one out so get the lights. + */ mutex_enter(&device_lock); sn->sn_opencnt--; count = --sd->sd_opencnt; - if (vp->v_type == VBLK) + if (vp->v_type == VBLK) { + KASSERTMSG(count == 0, "block device with %u opens", + count + 1); sd->sd_bdevvp = NULL; + } mutex_exit(&device_lock); - - if (count != 0 && (vp->v_type != VCHR || !(cdev_flags(dev) & D_MCLOSE))) - return 0; + if (count != 0) { + error = 0; + goto out; + } /* * If we're able to block, release the vnode lock & reacquire. We @@ -1295,7 +1406,8 @@ spec_close(void *v) if (!(flags & FNONBLOCK)) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - return (error); +out: rw_exit(&sd->sd_opencloselock); + return error; } /* diff --git a/sys/miscfs/specfs/specdev.h b/sys/miscfs/specfs/specdev.h index 621c103e80b1..7917c3aea594 100644 --- a/sys/miscfs/specfs/specdev.h +++ b/sys/miscfs/specfs/specdev.h @@ -60,7 +60,7 @@ #ifndef _MISCFS_SPECFS_SPECDEV_H_ #define _MISCFS_SPECFS_SPECDEV_H_ -#include +#include #include typedef struct specnode { @@ -78,6 +78,7 @@ typedef struct specdev { u_int sd_opencnt; u_int sd_refcnt; dev_t sd_rdev; + krwlock_t sd_opencloselock; } specdev_t; /* diff --git a/sys/modules/examples/pollpal/pollpal.c b/sys/modules/examples/pollpal/pollpal.c index b76e0733699b..d8ddc73450e0 100644 --- a/sys/modules/examples/pollpal/pollpal.c +++ b/sys/modules/examples/pollpal/pollpal.c @@ -311,7 +311,8 @@ pollpal_modcmd(modcmd_t cmd, void *arg __unused) case MODULE_CMD_FINI: if (pollpal_nopen != 0) return EBUSY; - return devsw_detach(NULL, &pollpal_cdevsw); + devsw_detach(NULL, &pollpal_cdevsw); + return 0; default: return ENOTTY; } diff --git a/sys/net/if_tap.c b/sys/net/if_tap.c index 0b57ad4a711c..314f4647707c 100644 --- a/sys/net/if_tap.c +++ b/sys/net/if_tap.c @@ -256,9 +256,7 @@ tapdetach(void) if_clone_detach(&tap_cloners); #ifdef _MODULE - error = devsw_detach(NULL, &tap_cdevsw); - if (error != 0) - goto out2; + devsw_detach(NULL, &tap_cdevsw); #endif if (tap_count != 0) { @@ -277,7 +275,6 @@ tapdetach(void) out1: #ifdef _MODULE devsw_attach("tap", NULL, &tap_bmajor, &tap_cdevsw, &tap_cmajor); - out2: #endif if_clone_attach(&tap_cloners); diff --git a/sys/net/if_tun.c b/sys/net/if_tun.c index 4f533a8f08d1..f4e5b6d86d43 100644 --- a/sys/net/if_tun.c +++ b/sys/net/if_tun.c @@ -142,17 +142,10 @@ tuninit(void) static int tundetach(void) { -#ifdef _MODULE - int error; -#endif if_clone_detach(&tun_cloner); #ifdef _MODULE - error = devsw_detach(NULL, &tun_cdevsw); - if (error != 0) { - if_clone_attach(&tun_cloner); - return error; - } + devsw_detach(NULL, &tun_cdevsw); #endif if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) { diff --git a/sys/rump/dev/lib/libbpf/bpf_component.c b/sys/rump/dev/lib/libbpf/bpf_component.c index 05807d371d40..d41d1987afe8 100644 --- a/sys/rump/dev/lib/libbpf/bpf_component.c +++ b/sys/rump/dev/lib/libbpf/bpf_component.c @@ -50,6 +50,5 @@ RUMP_COMPONENT(RUMP_COMPONENT_NET) panic("bpf devsw attach failed: %d", error); if ((error = rump_vfs_makeonedevnode(S_IFCHR, "/dev/bpf", cmaj, 0)) !=0) panic("cannot create bpf device nodes: %d", error); - if ((error = devsw_detach(NULL, &bpf_cdevsw)) != 0) - panic("cannot detach bpf devsw: %d", error); + devsw_detach(NULL, &bpf_cdevsw); } diff --git a/sys/rump/dev/lib/libdrvctl/drvctl_component.c b/sys/rump/dev/lib/libdrvctl/drvctl_component.c index e2e79f45f9de..ac4e103fdb9c 100644 --- a/sys/rump/dev/lib/libdrvctl/drvctl_component.c +++ b/sys/rump/dev/lib/libdrvctl/drvctl_component.c @@ -51,7 +51,5 @@ RUMP_COMPONENT(RUMP_COMPONENT_DEV) if ( error !=0) panic("cannot create drvctl device node: %d", error); - error = devsw_detach(NULL, &drvctl_cdevsw); - if (error != 0) - panic("cannot detach drvctl devsw: %d", error); + devsw_detach(NULL, &drvctl_cdevsw); } diff --git a/sys/sys/conf.h b/sys/sys/conf.h index 081631d2111f..16dd87e5480c 100644 --- a/sys/sys/conf.h +++ b/sys/sys/conf.h @@ -63,7 +63,7 @@ struct vnode; #define D_TYPEMASK 0x00ff #define D_MPSAFE 0x0100 #define D_NEGOFFSAFE 0x0200 -#define D_MCLOSE 0x0400 +#define D_UNUSED0 0x0400 /* was D_MCLOSE */ /* * Block device switch table @@ -76,6 +76,8 @@ struct bdevsw { int (*d_dump)(dev_t, daddr_t, void *, size_t); int (*d_psize)(dev_t); int (*d_discard)(dev_t, off_t, off_t); + int (*d_devtounit)(dev_t); + struct cfdriver *d_cfdriver; int d_flag; }; @@ -94,6 +96,8 @@ struct cdevsw { paddr_t (*d_mmap)(dev_t, off_t, int); int (*d_kqfilter)(dev_t, struct knote *); int (*d_discard)(dev_t, off_t, off_t); + int (*d_devtounit)(dev_t); + struct cfdriver *d_cfdriver; int d_flag; }; @@ -104,7 +108,7 @@ extern kmutex_t device_lock; int devsw_attach(const char *, const struct bdevsw *, devmajor_t *, const struct cdevsw *, devmajor_t *); -int devsw_detach(const struct bdevsw *, const struct cdevsw *); +void devsw_detach(const struct bdevsw *, const struct cdevsw *); const struct bdevsw *bdevsw_lookup(dev_t); const struct cdevsw *cdevsw_lookup(dev_t); devmajor_t bdevsw_lookup_major(const struct bdevsw *); @@ -276,6 +280,7 @@ devmajor_t devsw_name2blk(const char *, char *, size_t); devmajor_t devsw_name2chr(const char *, char *, size_t); dev_t devsw_chr2blk(dev_t); dev_t devsw_blk2chr(dev_t); +int dev_minor_unit(dev_t); void mm_init(void); #endif /* _KERNEL */ diff --git a/sys/sys/device.h b/sys/sys/device.h index 99940ab1563e..76d9f8be5ad6 100644 --- a/sys/sys/device.h +++ b/sys/sys/device.h @@ -274,10 +274,12 @@ struct device { void *dv_private; /* this device's private storage */ int *dv_locators; /* our actual locators (optional) */ prop_dictionary_t dv_properties;/* properties dictionary */ + struct localcount *dv_localcount;/* reference count */ int dv_pending; /* config_pending count */ TAILQ_ENTRY(device) dv_pending_list; + struct lwp *dv_attaching; /* thread not yet finished in attach */ struct lwp *dv_detaching; /* detach lock (config_misc_lock/cv) */ size_t dv_activity_count; @@ -651,6 +653,10 @@ void null_childdetached(device_t, device_t); device_t device_lookup(cfdriver_t, int); void *device_lookup_private(cfdriver_t, int); + +device_t device_lookup_acquire(cfdriver_t, int); +void device_release(device_t); + void device_register(device_t, void *); void device_register_post_config(device_t, void *); diff --git a/sys/sys/disklabel.h b/sys/sys/disklabel.h index 4e94b8671332..853cdbe668a3 100644 --- a/sys/sys/disklabel.h +++ b/sys/sys/disklabel.h @@ -509,6 +509,7 @@ const char *convertdisklabel(struct disklabel *, void (*)(struct buf *), int bounds_check_with_label(struct disk *, struct buf *, int); int bounds_check_with_mediasize(struct buf *, int, uint64_t); const char *getfstypename(int); +int disklabel_dev_unit(dev_t); #endif #endif /* _LOCORE */