style. use uppercase for enums. Index: raidframevar.h =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/raidframevar.h,v retrieving revision 1.17 diff -u -p -u -r1.17 raidframevar.h --- raidframevar.h 14 Nov 2014 14:29:16 -0000 1.17 +++ raidframevar.h 14 Jan 2017 20:09:47 -0000 @@ -383,17 +383,17 @@ struct RF_SparetWait_s { * IF YOU ADD A STATE, CHECK TO SEE IF YOU NEED TO MODIFY RF_DEAD_DISK(). */ enum RF_DiskStatus_e { - rf_ds_optimal, /* no problems */ - rf_ds_failed, /* disk has failed */ - rf_ds_reconstructing, /* reconstruction ongoing */ - rf_ds_dist_spared, /* reconstruction complete to distributed + RF_DS_OPTIMAL, /* no problems */ + RF_DS_FAILED, /* disk has failed */ + RF_DS_RECONSTRUCTING, /* reconstruction ongoing */ + RF_DS_DIST_SPARED, /* reconstruction complete to distributed * spare space, dead disk not yet replaced */ - rf_ds_spared, /* reconstruction complete, dead disk not + RF_DS_SPARED, /* reconstruction complete, dead disk not yet replaced */ - rf_ds_spare, /* an available spare disk */ - rf_ds_used_spare, /* a spare which has been used, and hence is + RF_DS_SPARE, /* an available spare disk */ + RF_DS_USED_SPARE, /* a spare which has been used, and hence is * not available */ - rf_ds_rebuilding_spare /* a spare which is being rebuilt to */ + RF_DS_REBUILDING_SPARE /* a spare which is being rebuilt to */ }; typedef enum RF_DiskStatus_e RF_DiskStatus_t; @@ -434,7 +434,7 @@ typedef struct RF_ComponentLabel_s { int num_rows; /* number of rows in this RAID set */ int num_columns; /* number of columns in this RAID set */ int clean; /* 1 when clean, 0 when dirty */ - int status; /* rf_ds_optimal, rf_ds_dist_spared, whatever. */ + int status; /* RF_DS_OPTIMAL, RF_DS_DIST_SPARED, whatever. */ /* stuff that will be in version 2 of the label */ int sectPerSU; /* Sectors per Stripe Unit */ int SUsPerPU; /* Stripe Units per Parity Units */ Index: rf_chaindecluster.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_chaindecluster.c,v retrieving revision 1.15 diff -u -p -u -r1.15 rf_chaindecluster.c --- rf_chaindecluster.c 16 Nov 2006 01:33:23 -0000 1.15 +++ rf_chaindecluster.c 14 Jan 2017 20:09:47 -0000 @@ -254,7 +254,7 @@ rf_RAIDCDagSelect(RF_Raid_t *raidPtr, RF *createFunc = (type == RF_IO_TYPE_READ) ? (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG : (RF_VoidFuncPtr) rf_CreateRaidOneWriteDAG; if (type == RF_IO_TYPE_READ) { - if ((raidPtr->status == rf_rs_degraded) || (raidPtr->status == rf_rs_reconstructing)) + if ((raidPtr->status == RF_RS_DEGRADED) || (raidPtr->status == RF_RS_RECONSTRUCTING)) *createFunc = (RF_VoidFuncPtr) rf_CreateRaidCDegradedReadDAG; /* array status is * degraded, implement * workload shifting */ Index: rf_copyback.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_copyback.c,v retrieving revision 1.50 diff -u -p -u -r1.50 rf_copyback.c --- rf_copyback.c 14 Jun 2014 07:39:00 -0000 1.50 +++ rf_copyback.c 14 Jan 2017 20:09:47 -0000 @@ -104,8 +104,8 @@ rf_CopybackReconstructedData(RF_Raid_t * fcol = 0; found = 0; for (fcol = 0; fcol < raidPtr->numCol; fcol++) { - if (raidPtr->Disks[fcol].status == rf_ds_dist_spared - || raidPtr->Disks[fcol].status == rf_ds_spared) { + if (raidPtr->Disks[fcol].status == RF_DS_DIST_SPARED + || raidPtr->Disks[fcol].status == RF_DS_SPARED) { found = 1; break; } @@ -205,8 +205,8 @@ rf_CopybackReconstructedData(RF_Raid_t * /* adjust state of the array and of the disks */ rf_lock_mutex2(raidPtr->mutex); - raidPtr->Disks[desc->fcol].status = rf_ds_optimal; - raidPtr->status = rf_rs_optimal; + raidPtr->Disks[desc->fcol].status = RF_DS_OPTIMAL; + raidPtr->status = RF_RS_OPTIMAL; rf_copyback_in_progress = 1; /* debug only */ rf_unlock_mutex2(raidPtr->mutex); @@ -408,7 +408,7 @@ rf_CopybackComplete(RF_CopybackDesc_t *d RF_ASSERT(raidPtr->Layout.map->parityConfig == 'D'); rf_FreeSpareTable(raidPtr); } else { - raidPtr->Disks[desc->spCol].status = rf_ds_spare; + raidPtr->Disks[desc->spCol].status = RF_DS_SPARE; } rf_unlock_mutex2(raidPtr->mutex); Index: rf_dagutils.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_dagutils.c,v retrieving revision 1.54 diff -u -p -u -r1.54 rf_dagutils.c --- rf_dagutils.c 7 Jan 2016 21:57:00 -0000 1.54 +++ rf_dagutils.c 14 Jan 2017 20:09:47 -0000 @@ -931,7 +931,7 @@ rf_redirect_asm(RF_Raid_t *raidPtr, RF_A int scol = raidPtr->reconControl->spareCol; RF_PhysDiskAddr_t *pda; - RF_ASSERT(raidPtr->status == rf_rs_reconstructing); + RF_ASSERT(raidPtr->status == RF_RS_RECONSTRUCTING); for (pda = asmap->physInfo; pda; pda = pda->next) { if (pda->col == fcol) { #if RF_DEBUG_DAG Index: rf_decluster.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_decluster.c,v retrieving revision 1.24 diff -u -p -u -r1.24 rf_decluster.c --- rf_decluster.c 23 Mar 2014 09:30:59 -0000 1.24 +++ rf_decluster.c 14 Jan 2017 20:09:47 -0000 @@ -350,8 +350,8 @@ rf_MapSectorDeclustered(RF_Raid_t *raidP /* remap to distributed spare space if indicated */ if (remap) { - RF_ASSERT(raidPtr->Disks[*col].status == rf_ds_reconstructing || raidPtr->Disks[*col].status == rf_ds_dist_spared || - (rf_copyback_in_progress && raidPtr->Disks[*col].status == rf_ds_optimal)); + RF_ASSERT(raidPtr->Disks[*col].status == RF_DS_RECONSTRUCTING || raidPtr->Disks[*col].status == RF_DS_DIST_SPARED || + (rf_copyback_in_progress && raidPtr->Disks[*col].status == RF_DS_OPTIMAL)); rf_remap_to_spare_space(layoutPtr, info, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU); } else { @@ -411,8 +411,8 @@ rf_MapParityDeclustered(RF_Raid_t *raidP *col = info->LayoutTable[BlockID][RepIndex]; if (remap) { - RF_ASSERT(raidPtr->Disks[*col].status == rf_ds_reconstructing || raidPtr->Disks[*col].status == rf_ds_dist_spared || - (rf_copyback_in_progress && raidPtr->Disks[*col].status == rf_ds_optimal)); + RF_ASSERT(raidPtr->Disks[*col].status == RF_DS_RECONSTRUCTING || raidPtr->Disks[*col].status == RF_DS_DIST_SPARED || + (rf_copyback_in_progress && raidPtr->Disks[*col].status == RF_DS_OPTIMAL)); rf_remap_to_spare_space(layoutPtr, info, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU); } else { Index: rf_disks.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_disks.c,v retrieving revision 1.89 diff -u -p -u -r1.89 rf_disks.c --- rf_disks.c 13 Jan 2017 13:01:13 -0000 1.89 +++ rf_disks.c 14 Jan 2017 20:09:47 -0000 @@ -133,18 +133,18 @@ rf_ConfigureDisks(RF_ShutdownList_t **li if (ret) goto fail; - if (disks[c].status == rf_ds_optimal) { + if (disks[c].status == RF_DS_OPTIMAL) { ret = raidfetch_component_label(raidPtr, c); if (ret) goto fail; /* mark it as failed if the label looks bogus... */ if (!rf_reasonable_label(&raidPtr->raid_cinfo[c].ci_label,0) && !force) { - disks[c].status = rf_ds_failed; + disks[c].status = RF_DS_FAILED; } } - if (disks[c].status != rf_ds_optimal) { + if (disks[c].status != RF_DS_OPTIMAL) { numFailuresThisRow++; } else { if (disks[c].numBlocks < min_numblks) @@ -161,14 +161,14 @@ rf_ConfigureDisks(RF_ShutdownList_t **li /* XXX this should probably check to see how many failures we can handle for this configuration! */ if (numFailuresThisRow > 0) - raidPtr->status = rf_rs_degraded; + raidPtr->status = RF_RS_DEGRADED; /* all disks must be the same size & have the same block size, bs must * be a power of 2 */ bs = 0; foundone = 0; for (c = 0; c < raidPtr->numCol; c++) { - if (disks[c].status == rf_ds_optimal) { + if (disks[c].status == RF_DS_OPTIMAL) { bs = disks[c].blockSize; foundone = 1; break; @@ -200,7 +200,7 @@ rf_ConfigureDisks(RF_ShutdownList_t **li } for (c = 0; c < raidPtr->numCol; c++) { - if (disks[c].status == rf_ds_optimal) { + if (disks[c].status == RF_DS_OPTIMAL) { if (disks[c].blockSize != bs) { RF_ERRORMSG1("Error: block size of disk at c %d different from disk at c 0\n", c); ret = EINVAL; @@ -253,11 +253,11 @@ rf_ConfigureSpareDisks(RF_ShutdownList_t &disks[i], raidPtr->numCol + i); if (ret) goto fail; - if (disks[i].status != rf_ds_optimal) { + if (disks[i].status != RF_DS_OPTIMAL) { RF_ERRORMSG1("Warning: spare disk %s failed TUR\n", &cfgPtr->spare_names[i][0]); } else { - disks[i].status = rf_ds_spare; /* change status to + disks[i].status = RF_DS_SPARE; /* change status to * spare */ DPRINTF6("Spare Disk %d: dev %s numBlocks %" PRIu64 " blockSize %d (%ld MB)\n", i, disks[i].devname, @@ -510,7 +510,7 @@ rf_AutoConfigureDisks(RF_Raid_t *raidPtr counter is not in sync with the rest, and we really consider it to be failed. */ - disks[c].status = rf_ds_failed; + disks[c].status = RF_DS_FAILED; numFailuresThisRow++; } else { if (ac->clabel->clean != RF_RAID_CLEAN) { @@ -520,7 +520,7 @@ rf_AutoConfigureDisks(RF_Raid_t *raidPtr } else { /* Didn't find it at all!! Component must really be dead */ - disks[c].status = rf_ds_failed; + disks[c].status = RF_DS_FAILED; snprintf(disks[c].devname, sizeof(disks[c].devname), "component%d", c); numFailuresThisRow++; @@ -530,7 +530,7 @@ rf_AutoConfigureDisks(RF_Raid_t *raidPtr /* XXX this should probably check to see how many failures we can handle for this configuration! */ if (numFailuresThisRow > 0) { - raidPtr->status = rf_rs_degraded; + raidPtr->status = RF_RS_DEGRADED; raidPtr->numFailures = numFailuresThisRow; } @@ -587,7 +587,7 @@ rf_ConfigureDisk(RF_Raid_t *raidPtr, cha (void) strcpy(diskPtr->devname, p); /* Let's start by claiming the component is fine and well... */ - diskPtr->status = rf_ds_optimal; + diskPtr->status = RF_DS_OPTIMAL; raidPtr->raid_cinfo[col].ci_vp = NULL; raidPtr->raid_cinfo[col].ci_dev = 0; @@ -596,7 +596,7 @@ rf_ConfigureDisk(RF_Raid_t *raidPtr, cha printf("Ignoring missing component at column %d\n", col); snprintf(diskPtr->devname, sizeof(diskPtr->devname), "component%d", col); - diskPtr->status = rf_ds_failed; + diskPtr->status = RF_DS_FAILED; return (0); } @@ -612,7 +612,7 @@ rf_ConfigureDisk(RF_Raid_t *raidPtr, cha printf("dk_lookup on device: %s failed!\n", diskPtr->devname); if (error == ENXIO) { /* the component isn't there... must be dead :-( */ - diskPtr->status = rf_ds_failed; + diskPtr->status = RF_DS_FAILED; return 0; } else { return (error); @@ -630,7 +630,7 @@ rf_ConfigureDisk(RF_Raid_t *raidPtr, cha if (raidPtr->bytesPerSector == 0) raidPtr->bytesPerSector = diskPtr->blockSize; - if (diskPtr->status == rf_ds_optimal) { + if (diskPtr->status == RF_DS_OPTIMAL) { raidPtr->raid_cinfo[col].ci_vp = vp; raidPtr->raid_cinfo[col].ci_dev = vp->v_rdev; @@ -711,12 +711,12 @@ rf_handle_hosed(RF_Raid_t *raidPtr, RF_C /* we'll fail this component, as if there are other major errors, we aren't forcing things and we'll abort the config anyways */ - if (again && raidPtr->Disks[hosed_column].status == rf_ds_failed) + if (again && raidPtr->Disks[hosed_column].status == RF_DS_FAILED) return; - raidPtr->Disks[hosed_column].status = rf_ds_failed; + raidPtr->Disks[hosed_column].status = RF_DS_FAILED; raidPtr->numFailures++; - raidPtr->status = rf_rs_degraded; + raidPtr->status = RF_RS_DEGRADED; } /* @@ -775,7 +775,7 @@ rf_CheckLabels(RF_Raid_t *raidPtr, RF_Co mod_count[0] = mod_count[1] = mod_count[2] = mod_count[3] = 0; for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status != rf_ds_optimal) + if (raidPtr->Disks[c].status != RF_DS_OPTIMAL) continue; ci_label = raidget_component_label(raidPtr, c); found=0; @@ -832,7 +832,7 @@ rf_CheckLabels(RF_Raid_t *raidPtr, RF_Co } for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status != rf_ds_optimal) + if (raidPtr->Disks[c].status != RF_DS_OPTIMAL) continue; ci_label = raidget_component_label(raidPtr, c); if (serial_number != ci_label->serial_number) { @@ -881,7 +881,7 @@ rf_CheckLabels(RF_Raid_t *raidPtr, RF_Co } for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status != rf_ds_optimal) + if (raidPtr->Disks[c].status != RF_DS_OPTIMAL) continue; ci_label = raidget_component_label(raidPtr, c); @@ -935,7 +935,7 @@ rf_CheckLabels(RF_Raid_t *raidPtr, RF_Co } for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status != rf_ds_optimal) { + if (raidPtr->Disks[c].status != RF_DS_OPTIMAL) { hosed_column = c; break; } @@ -1011,14 +1011,14 @@ rf_add_hot_spare(RF_Raid_t *raidPtr, RF_ if (ret) goto fail; - if (disks[spare_number].status != rf_ds_optimal) { + if (disks[spare_number].status != RF_DS_OPTIMAL) { RF_ERRORMSG1("Warning: spare disk %s failed TUR\n", sparePtr->component_name); rf_close_component(raidPtr, raidPtr->raid_cinfo[raidPtr->numCol+spare_number].ci_vp, 0); ret=EINVAL; goto fail; } else { - disks[spare_number].status = rf_ds_spare; + disks[spare_number].status = RF_DS_SPARE; DPRINTF6("Spare Disk %d: dev %s numBlocks %" PRIu64 " blockSize %d (%ld MB)\n", spare_number, disks[spare_number].devname, Index: rf_disks.h =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_disks.h,v retrieving revision 1.14 diff -u -p -u -r1.14 rf_disks.h --- rf_disks.h 11 Dec 2005 12:23:37 -0000 1.14 +++ rf_disks.h 14 Jan 2017 20:09:47 -0000 @@ -40,9 +40,9 @@ #include "rf_netbsd.h" /* if a disk is in any of these states, it is inaccessible */ -#define RF_DEAD_DISK(_dstat_) (((_dstat_) == rf_ds_spared) || \ - ((_dstat_) == rf_ds_reconstructing) || ((_dstat_) == rf_ds_failed) || \ - ((_dstat_) == rf_ds_dist_spared)) +#define RF_DEAD_DISK(_dstat_) (((_dstat_) == RF_DS_SPARED) || \ + ((_dstat_) == RF_DS_RECONSTRUCTING) || ((_dstat_) == RF_DS_FAILED) || \ + ((_dstat_) == RF_DS_DIST_SPARED)) int rf_ConfigureDisks(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *); int rf_ConfigureSpareDisks(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *); Index: rf_driver.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_driver.c,v retrieving revision 1.133 diff -u -p -u -r1.133 rf_driver.c --- rf_driver.c 10 Dec 2016 23:03:27 -0000 1.133 +++ rf_driver.c 14 Jan 2017 20:09:47 -0000 @@ -354,7 +354,7 @@ rf_Configure(RF_Raid_t *raidPtr, RF_Conf raidPtr->numCol = cfgPtr->numCol; raidPtr->numSpare = cfgPtr->numSpare; - raidPtr->status = rf_rs_optimal; + raidPtr->status = RF_RS_OPTIMAL; raidPtr->reconControl = NULL; DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine); @@ -703,8 +703,8 @@ rf_SetReconfiguredMode(RF_Raid_t *raidPt } rf_lock_mutex2(raidPtr->mutex); raidPtr->numFailures++; - raidPtr->Disks[col].status = rf_ds_dist_spared; - raidPtr->status = rf_rs_reconfigured; + raidPtr->Disks[col].status = RF_DS_DIST_SPARED; + raidPtr->status = RF_RS_RECONFIGURED; rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); /* install spare table only if declustering + distributed sparing * architecture. */ @@ -726,13 +726,13 @@ rf_FailDisk(RF_Raid_t *raidPtr, int fcol rf_SuspendNewRequestsAndWait(raidPtr); rf_lock_mutex2(raidPtr->mutex); - if (raidPtr->Disks[fcol].status != rf_ds_failed) { + if (raidPtr->Disks[fcol].status != RF_DS_FAILED) { /* must be failing something that is valid, or else it's already marked as failed (in which case we don't want to mark it failed again!) */ raidPtr->numFailures++; - raidPtr->Disks[fcol].status = rf_ds_failed; - raidPtr->status = rf_rs_degraded; + raidPtr->Disks[fcol].status = RF_DS_FAILED; + raidPtr->status = RF_RS_DEGRADED; } rf_unlock_mutex2(raidPtr->mutex); Index: rf_map.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_map.c,v retrieving revision 1.47 diff -u -p -u -r1.47 rf_map.c --- rf_map.c 15 Oct 2016 20:31:15 -0000 1.47 +++ rf_map.c 14 Jan 2017 20:09:47 -0000 @@ -649,7 +649,7 @@ rf_CheckStripeForFailures(RF_Raid_t *rai for (i = 0; i < layoutPtr->numDataCol + layoutPtr->numParityCol; i++) { if (diskids[i] != pcol) { if (RF_DEAD_DISK(raidPtr->Disks[diskids[i]].status)) { - if (raidPtr->status != rf_rs_reconstructing) + if (raidPtr->status != RF_RS_RECONSTRUCTING) return (1); RF_ASSERT(raidPtr->reconControl->fcol == diskids[i]); layoutPtr->map->MapSector(raidPtr, @@ -858,12 +858,12 @@ rf_ASMCheckStatus(RF_Raid_t *raidPtr, RF dstatus = disks[pda_p->col].status; - if (dstatus == rf_ds_spared) { + if (dstatus == RF_DS_SPARED) { /* if the disk has been spared, redirect access to the spare */ fcol = pda_p->col; pda_p->col = disks[fcol].spareCol; } else - if (dstatus == rf_ds_dist_spared) { + if (dstatus == RF_DS_DIST_SPARED) { /* ditto if disk has been spared to dist spare space */ #if RF_DEBUG_MAP RF_RowCol_t oc = pda_p->col; Index: rf_netbsdkintf.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_netbsdkintf.c,v retrieving revision 1.347 diff -u -p -u -r1.347 rf_netbsdkintf.c --- rf_netbsdkintf.c 19 Sep 2016 23:37:10 -0000 1.347 +++ rf_netbsdkintf.c 14 Jan 2017 20:09:47 -0000 @@ -691,7 +691,7 @@ raid_dumpblocks(device_t dev, void *va, dumpto = -1; for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status == rf_ds_optimal) { + if (raidPtr->Disks[c].status == RF_DS_OPTIMAL) { /* this might be the one */ dumpto = c; break; @@ -707,7 +707,7 @@ raid_dumpblocks(device_t dev, void *va, for (c = 0; c < raidPtr->numSpare; c++) { sparecol = raidPtr->numCol + c; - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { /* How about this one? */ scol = -1; for(j=0;jnumCol;j++) { @@ -1411,7 +1411,7 @@ raidioctl(dev_t dev, u_long cmd, void *d } rf_lock_mutex2(raidPtr->mutex); - if ((raidPtr->Disks[column].status == rf_ds_optimal) && + if ((raidPtr->Disks[column].status == RF_DS_OPTIMAL) && (raidPtr->numFailures > 0)) { /* XXX 0 above shouldn't be constant!!! */ /* some component other than this has failed. @@ -1425,7 +1425,7 @@ raidioctl(dev_t dev, u_long cmd, void *d return (EINVAL); } if (raidPtr->Disks[column].status == - rf_ds_reconstructing) { + RF_DS_RECONSTRUCTING) { printf("raid%d: Unable to reconstruct to disk at:\n", raidPtr->raidid); printf("raid%d: Col: %d Reconstruction already occurring!\n", raidPtr->raidid, column); @@ -1433,7 +1433,7 @@ raidioctl(dev_t dev, u_long cmd, void *d rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); } - if (raidPtr->Disks[column].status == rf_ds_spared) { + if (raidPtr->Disks[column].status == RF_DS_SPARED) { rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); } @@ -1479,9 +1479,9 @@ raidioctl(dev_t dev, u_long cmd, void *d } for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) { d_cfg->spares[i] = raidPtr->Disks[j]; - if (d_cfg->spares[i].status == rf_ds_rebuilding_spare) { + if (d_cfg->spares[i].status == RF_DS_REBUILDING_SPARE) { /* XXX: raidctl(8) expects to see this as a used spare */ - d_cfg->spares[i].status = rf_ds_used_spare; + d_cfg->spares[i].status = RF_DS_USED_SPARE; } } retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t)); @@ -1555,20 +1555,20 @@ raidioctl(dev_t dev, u_long cmd, void *d rf_lock_mutex2(raidPtr->mutex); - if (raidPtr->status == rf_rs_reconstructing) { + if (raidPtr->status == RF_RS_RECONSTRUCTING) { /* you can't fail a disk while we're reconstructing! */ /* XXX wrong for RAID6 */ rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); } if ((raidPtr->Disks[rr->col].status == - rf_ds_optimal) && (raidPtr->numFailures > 0)) { + RF_DS_OPTIMAL) && (raidPtr->numFailures > 0)) { /* some other component has failed. Let's not make things worse. XXX wrong for RAID6 */ rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); } - if (raidPtr->Disks[rr->col].status == rf_ds_spared) { + if (raidPtr->Disks[rr->col].status == RF_DS_SPARED) { /* Can't fail a spared disk! */ rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); @@ -1615,7 +1615,7 @@ raidioctl(dev_t dev, u_long cmd, void *d *(int *) data = 100; return(0); } - if (raidPtr->status != rf_rs_reconstructing) + if (raidPtr->status != RF_RS_RECONSTRUCTING) *(int *) data = 100; else { if (raidPtr->reconControl->numRUsTotal > 0) { @@ -1627,7 +1627,7 @@ raidioctl(dev_t dev, u_long cmd, void *d return (0); case RAIDFRAME_CHECK_RECON_STATUS_EXT: progressInfoPtr = (RF_ProgressInfo_t **) data; - if (raidPtr->status != rf_rs_reconstructing) { + if (raidPtr->status != RF_RS_RECONSTRUCTING) { progressInfo.remaining = 0; progressInfo.completed = 100; progressInfo.total = 100; @@ -1709,7 +1709,7 @@ raidioctl(dev_t dev, u_long cmd, void *d case RAIDFRAME_SET_LAST_UNIT: for (column = 0; column < raidPtr->numCol; column++) - if (raidPtr->Disks[column].status != rf_ds_optimal) + if (raidPtr->Disks[column].status != RF_DS_OPTIMAL) return EBUSY; for (column = 0; column < raidPtr->numCol; column++) { @@ -2117,9 +2117,9 @@ KernelWakeupFunc(struct buf *bp) /* and only if it wouldn't leave this RAID set completely broken */ if (((queue->raidPtr->Disks[queue->col].status == - rf_ds_optimal) || + RF_DS_OPTIMAL) || (queue->raidPtr->Disks[queue->col].status == - rf_ds_used_spare)) && + RF_DS_USED_SPARE)) && (queue->raidPtr->numFailures < queue->raidPtr->Layout.map->faultsTolerated)) { printf("raid%d: IO Error (%d). Marking %s as failed.\n", @@ -2127,8 +2127,8 @@ KernelWakeupFunc(struct buf *bp) bp->b_error, queue->raidPtr->Disks[queue->col].devname); queue->raidPtr->Disks[queue->col].status = - rf_ds_failed; - queue->raidPtr->status = rf_rs_degraded; + RF_DS_FAILED; + queue->raidPtr->status = RF_RS_DEGRADED; queue->raidPtr->numFailures++; queue->raidPtr->numNewFailures++; } else { /* Disk is already dead... */ @@ -2480,7 +2480,7 @@ rf_markalldirty(RF_Raid_t *raidPtr) failed */ if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) { clabel = raidget_component_label(raidPtr, c); - if (clabel->status == rf_ds_spared) { + if (clabel->status == RF_DS_SPARED) { /* XXX do something special... but whatever you do, don't try to access it!! */ @@ -2492,11 +2492,11 @@ rf_markalldirty(RF_Raid_t *raidPtr) for( c = 0; c < raidPtr->numSpare ; c++) { sparecol = raidPtr->numCol + c; - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { /* we claim this disk is "optimal" if it's - rf_ds_used_spare, as that means it should be + RF_DS_USED_SPARE, as that means it should be directly substitutable for the disk it replaced. We note that too... @@ -2516,9 +2516,9 @@ rf_markalldirty(RF_Raid_t *raidPtr) clabel->row = 0; clabel->column = scol; - /* Note: we *don't* change status from rf_ds_used_spare - to rf_ds_optimal */ - /* clabel.status = rf_ds_optimal; */ + /* Note: we *don't* change status from RF_DS_USED_SPARE + to RF_DS_OPTIMAL */ + /* clabel.status = RF_DS_OPTIMAL; */ raidmarkdirty(raidPtr, sparecol); } @@ -2544,11 +2544,11 @@ rf_update_component_labels(RF_Raid_t *ra raidPtr->mod_counter++; for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status == rf_ds_optimal) { + if (raidPtr->Disks[c].status == RF_DS_OPTIMAL) { clabel = raidget_component_label(raidPtr, c); /* make sure status is noted */ - clabel->status = rf_ds_optimal; - + clabel->status = RF_DS_OPTIMAL; + /* note what unit we are configured as */ if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0) clabel->last_unit = raidPtr->raidid; @@ -2566,11 +2566,11 @@ rf_update_component_labels(RF_Raid_t *ra for( c = 0; c < raidPtr->numSpare ; c++) { sparecol = raidPtr->numCol + c; /* Need to ensure that the reconstruct actually completed! */ - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { /* we claim this disk is "optimal" if it's - rf_ds_used_spare, as that means it should be + RF_DS_USED_SPARE, as that means it should be directly substitutable for the disk it replaced. We note that too... @@ -2590,7 +2590,7 @@ rf_update_component_labels(RF_Raid_t *ra raid_init_component_label(raidPtr, clabel); clabel->column = scol; - clabel->status = rf_ds_optimal; + clabel->status = RF_DS_OPTIMAL; if ((rs->sc_cflags & RAIDF_UNIT_CHANGED) == 0) clabel->last_unit = raidPtr->raidid; @@ -3370,7 +3370,7 @@ rf_set_autoconfig(RF_Raid_t *raidPtr, in raidPtr->autoconfigure = new_value; for(column=0; columnnumCol; column++) { - if (raidPtr->Disks[column].status == rf_ds_optimal) { + if (raidPtr->Disks[column].status == RF_DS_OPTIMAL) { clabel = raidget_component_label(raidPtr, column); clabel->autoconfigure = new_value; raidflush_component_label(raidPtr, column); @@ -3378,7 +3378,7 @@ rf_set_autoconfig(RF_Raid_t *raidPtr, in } for(column = 0; column < raidPtr->numSpare ; column++) { sparecol = raidPtr->numCol + column; - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { clabel = raidget_component_label(raidPtr, sparecol); clabel->autoconfigure = new_value; raidflush_component_label(raidPtr, sparecol); @@ -3396,7 +3396,7 @@ rf_set_rootpartition(RF_Raid_t *raidPtr, raidPtr->root_partition = new_value; for(column=0; columnnumCol; column++) { - if (raidPtr->Disks[column].status == rf_ds_optimal) { + if (raidPtr->Disks[column].status == RF_DS_OPTIMAL) { clabel = raidget_component_label(raidPtr, column); clabel->root_partition = new_value; raidflush_component_label(raidPtr, column); @@ -3404,7 +3404,7 @@ rf_set_rootpartition(RF_Raid_t *raidPtr, } for(column = 0; column < raidPtr->numSpare ; column++) { sparecol = raidPtr->numCol + column; - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { clabel = raidget_component_label(raidPtr, sparecol); clabel->root_partition = new_value; raidflush_component_label(raidPtr, sparecol); @@ -3464,7 +3464,7 @@ raid_init_component_label(RF_Raid_t *rai clabel->num_rows = 1; clabel->num_columns = raidPtr->numCol; clabel->clean = RF_RAID_DIRTY; /* not clean */ - clabel->status = rf_ds_optimal; /* "It's good!" */ + clabel->status = RF_DS_OPTIMAL; /* "It's good!" */ clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit; clabel->SUsPerPU = raidPtr->Layout.SUsPerPU; @@ -3694,7 +3694,7 @@ rf_sync_component_caches(RF_Raid_t *raid error = 0; for (c = 0; c < raidPtr->numCol; c++) { - if (raidPtr->Disks[c].status == rf_ds_optimal) { + if (raidPtr->Disks[c].status == RF_DS_OPTIMAL) { e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC, &force, FWRITE, NOCRED); if (e) { @@ -3711,7 +3711,7 @@ rf_sync_component_caches(RF_Raid_t *raid for( c = 0; c < raidPtr->numSpare ; c++) { sparecol = raidPtr->numCol + c; /* Need to ensure that the reconstruct actually completed! */ - if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { + if (raidPtr->Disks[sparecol].status == RF_DS_USED_SPARE) { e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp, DIOCCACHESYNC, &force, FWRITE, NOCRED); if (e) { Index: rf_paritylogging.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_paritylogging.c,v retrieving revision 1.34 diff -u -p -u -r1.34 rf_paritylogging.c --- rf_paritylogging.c 11 May 2011 06:20:33 -0000 1.34 +++ rf_paritylogging.c 14 Jan 2017 20:09:47 -0000 @@ -842,8 +842,8 @@ rf_ParityLoggingDagSelect( failedPDA = asmp->failedPDAs[0]; fcol = failedPDA->col; rstat = raidPtr->status; - prior_recon = (rstat == rf_rs_reconfigured) || ( - (rstat == rf_rs_reconstructing) ? + prior_recon = (rstat == RF_RS_RECONFIGURED) || ( + (rstat == RF_RS_RECONSTRUCTING) ? rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0 ); if (prior_recon) { Index: rf_paritymap.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_paritymap.c,v retrieving revision 1.8 diff -u -p -u -r1.8 rf_paritymap.c --- rf_paritymap.c 27 Apr 2011 07:55:15 -0000 1.8 +++ rf_paritymap.c 14 Jan 2017 20:09:47 -0000 @@ -371,7 +371,7 @@ rf_paritymap_set_params(struct rf_parity clabel->parity_map_regions = regions; /* Don't touch the disk if it's been spared */ - if (clabel->status == rf_ds_spared) + if (clabel->status == RF_DS_SPARED) continue; raidflush_component_label(raidPtr, col); @@ -379,7 +379,7 @@ rf_paritymap_set_params(struct rf_parity /* handle the spares too... */ for (col = 0; col < raidPtr->numSpare; col++) { - if (raidPtr->Disks[raidPtr->numCol+col].status == rf_ds_used_spare) { + if (raidPtr->Disks[raidPtr->numCol+col].status == RF_DS_USED_SPARE) { clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); clabel->parity_map_ntick = cooldown; clabel->parity_map_tickms = tickms; @@ -772,7 +772,7 @@ rf_paritymap_get_disable(RF_Raid_t *raid dis = 1; } for (col = 0; col < raidPtr->numSpare; col++) { - if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare) + if (raidPtr->Disks[raidPtr->numCol+col].status != RF_DS_USED_SPARE) continue; clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); if (clabel->parity_map_flags & RF_PMLABEL_DISABLE) @@ -802,7 +802,7 @@ rf_paritymap_set_disable(RF_Raid_t *raid /* update any used spares as well */ for (col = 0; col < raidPtr->numSpare; col++) { - if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare) + if (raidPtr->Disks[raidPtr->numCol+col].status != RF_DS_USED_SPARE) continue; clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); Index: rf_parityscan.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_parityscan.c,v retrieving revision 1.34 diff -u -p -u -r1.34 rf_parityscan.c --- rf_parityscan.c 1 May 2011 01:09:05 -0000 1.34 +++ rf_parityscan.c 14 Jan 2017 20:09:47 -0000 @@ -88,7 +88,7 @@ rf_RewriteParityRange(RF_Raid_t *raidPtr /* There isn't any parity. Call it "okay." */ return (RF_PARITY_OKAY); } - if (raidPtr->status != rf_rs_optimal) { + if (raidPtr->status != RF_RS_OPTIMAL) { /* * We're in degraded mode. Don't try to verify parity now! * XXX: this should be a "we don't want to", not a @@ -339,7 +339,7 @@ int rf_TryToRedirectPDA(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda, int parity) { - if (raidPtr->Disks[pda->col].status == rf_ds_reconstructing) { + if (raidPtr->Disks[pda->col].status == RF_DS_RECONSTRUCTING) { if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, pda->startSector)) { #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) { Index: rf_raid.h =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_raid.h,v retrieving revision 1.45 diff -u -p -u -r1.45 rf_raid.h --- rf_raid.h 18 Oct 2014 08:33:28 -0000 1.45 +++ rf_raid.h 14 Jan 2017 20:09:47 -0000 @@ -69,10 +69,10 @@ * each has its own status, which is one of the following. */ typedef enum RF_RowStatus_e { - rf_rs_optimal, - rf_rs_degraded, - rf_rs_reconstructing, - rf_rs_reconfigured + RF_RS_OPTIMAL, + RF_RS_DEGRADED, + RF_RS_RECONSTRUCTING, + RF_RS_RECONFIGURED } RF_RowStatus_t; struct RF_CumulativeStats_s { Index: rf_raid1.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_raid1.c,v retrieving revision 1.35 diff -u -p -u -r1.35 rf_raid1.c --- rf_raid1.c 15 Sep 2013 12:47:26 -0000 1.35 +++ rf_raid1.c 14 Jan 2017 20:09:47 -0000 @@ -200,8 +200,8 @@ rf_RAID1DagSelect(RF_Raid_t *raidPtr, RF failedPDA = asmap->failedPDAs[0]; fcol = failedPDA->col; rstat = raidPtr->status; - prior_recon = (rstat == rf_rs_reconfigured) || ( - (rstat == rf_rs_reconstructing) ? + prior_recon = (rstat == RF_RS_RECONFIGURED) || ( + (rstat == RF_RS_RECONSTRUCTING) ? rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0 ); if (prior_recon) { Index: rf_raid5.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_raid5.c,v retrieving revision 1.19 diff -u -p -u -r1.19 rf_raid5.c --- rf_raid5.c 16 Nov 2006 01:33:23 -0000 1.19 +++ rf_raid5.c 14 Jan 2017 20:09:47 -0000 @@ -190,8 +190,8 @@ rf_RaidFiveDagSelect(RF_Raid_t *raidPtr, failedPDA = asmap->failedPDAs[0]; fcol = failedPDA->col; rstat = raidPtr->status; - prior_recon = (rstat == rf_rs_reconfigured) || ( - (rstat == rf_rs_reconstructing) ? + prior_recon = (rstat == RF_RS_RECONFIGURED) || ( + (rstat == RF_RS_RECONSTRUCTING) ? rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0 ); if (prior_recon) { Index: rf_reconstruct.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_reconstruct.c,v retrieving revision 1.121 diff -u -p -u -r1.121 rf_reconstruct.c --- rf_reconstruct.c 14 Nov 2014 14:29:16 -0000 1.121 +++ rf_reconstruct.c 14 Jan 2017 20:09:48 -0000 @@ -249,10 +249,10 @@ rf_ReconstructFailedDiskBasic(RF_Raid_t * change eventually */ rf_lock_mutex2(raidPtr->mutex); - RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); + RF_ASSERT(raidPtr->Disks[col].status == RF_DS_FAILED); #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) { - if (raidPtr->status != rf_rs_degraded) { + if (raidPtr->status != RF_RS_DEGRADED) { RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col); rf_unlock_mutex2(raidPtr->mutex); return (EINVAL); @@ -261,9 +261,9 @@ rf_ReconstructFailedDiskBasic(RF_Raid_t } else { #endif for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) { - if (raidPtr->Disks[scol].status == rf_ds_spare) { + if (raidPtr->Disks[scol].status == RF_DS_SPARE) { spareDiskPtr = &raidPtr->Disks[scol]; - spareDiskPtr->status = rf_ds_rebuilding_spare; + spareDiskPtr->status = RF_DS_REBUILDING_SPARE; break; } } @@ -299,7 +299,7 @@ rf_ReconstructFailedDiskBasic(RF_Raid_t c_label->row = 0; c_label->column = col; c_label->clean = RF_RAID_DIRTY; - c_label->status = rf_ds_optimal; + c_label->status = RF_DS_OPTIMAL; rf_component_label_set_partitionsize(c_label, raidPtr->Disks[scol].partitionSize); @@ -310,13 +310,13 @@ rf_ReconstructFailedDiskBasic(RF_Raid_t /* XXX doesn't hold for RAID 6!!*/ rf_lock_mutex2(raidPtr->mutex); - /* The failed disk has already been marked as rf_ds_spared - (or rf_ds_dist_spared) in + /* The failed disk has already been marked as RF_DS_SPARED + (or RF_DS_DIST_SPARED) in rf_ContinueReconstructFailedDisk() so we just update the spare disk as being a used spare */ - spareDiskPtr->status = rf_ds_used_spare; + spareDiskPtr->status = RF_DS_USED_SPARE; raidPtr->parity_good = RF_RAID_CLEAN; rf_unlock_mutex2(raidPtr->mutex); @@ -328,10 +328,10 @@ rf_ReconstructFailedDiskBasic(RF_Raid_t rf_lock_mutex2(raidPtr->mutex); /* Failed disk goes back to "failed" status */ - raidPtr->Disks[col].status = rf_ds_failed; + raidPtr->Disks[col].status = RF_DS_FAILED; /* Spare disk goes back to "spare" status. */ - spareDiskPtr->status = rf_ds_spare; + spareDiskPtr->status = RF_DS_SPARE; rf_unlock_mutex2(raidPtr->mutex); } @@ -380,11 +380,11 @@ rf_ReconstructInPlace(RF_Raid_t *raidPtr * disk at a time for each array. */ - if (raidPtr->Disks[col].status != rf_ds_failed) { + if (raidPtr->Disks[col].status != RF_DS_FAILED) { /* "It's gone..." */ raidPtr->numFailures++; - raidPtr->Disks[col].status = rf_ds_failed; - raidPtr->status = rf_rs_degraded; + raidPtr->Disks[col].status = RF_DS_FAILED; + raidPtr->status = RF_RS_DEGRADED; rf_unlock_mutex2(raidPtr->mutex); rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); @@ -404,7 +404,7 @@ rf_ReconstructInPlace(RF_Raid_t *raidPtr /* Actually, we don't care if it's failed or not... On a RAID set with correct parity, this function should be callable on any component without ill effects. */ - /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */ + /* RF_ASSERT(raidPtr->Disks[col].status == RF_DS_FAILED); */ #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) { @@ -490,7 +490,7 @@ rf_ReconstructInPlace(RF_Raid_t *raidPtr rf_unlock_mutex2(raidPtr->mutex); spareDiskPtr = &raidPtr->Disks[col]; - spareDiskPtr->status = rf_ds_rebuilding_spare; + spareDiskPtr->status = RF_DS_REBUILDING_SPARE; printf("raid%d: initiating in-place reconstruction on column %d\n", raidPtr->raidid, col); @@ -511,10 +511,10 @@ rf_ReconstructInPlace(RF_Raid_t *raidPtr if (!rc) { rf_lock_mutex2(raidPtr->mutex); /* Need to set these here, as at this point it'll be claiming - that the disk is in rf_ds_spared! But we know better :-) */ + that the disk is in RF_DS_SPARED! But we know better :-) */ - raidPtr->Disks[col].status = rf_ds_optimal; - raidPtr->status = rf_rs_optimal; + raidPtr->Disks[col].status = RF_DS_OPTIMAL; + raidPtr->status = RF_RS_OPTIMAL; rf_unlock_mutex2(raidPtr->mutex); /* fix up the component label */ @@ -541,7 +541,7 @@ rf_ReconstructInPlace(RF_Raid_t *raidPtr /* Reconstruct-in-place failed. Disk goes back to "failed" status, regardless of what it was before. */ rf_lock_mutex2(raidPtr->mutex); - raidPtr->Disks[col].status = rf_ds_failed; + raidPtr->Disks[col].status = RF_DS_FAILED; rf_unlock_mutex2(raidPtr->mutex); } @@ -601,8 +601,8 @@ rf_ContinueReconstructFailedDisk(RF_Raid mapPtr = raidPtr->reconControl->reconMap; raidPtr->reconControl->numRUsTotal = mapPtr->totalRUs; raidPtr->reconControl->numRUsComplete = 0; - raidPtr->status = rf_rs_reconstructing; - raidPtr->Disks[col].status = rf_ds_reconstructing; + raidPtr->status = RF_RS_RECONSTRUCTING; + raidPtr->Disks[col].status = RF_DS_RECONSTRUCTING; raidPtr->Disks[col].spareCol = scol; rf_unlock_mutex2(raidPtr->mutex); @@ -841,12 +841,12 @@ rf_ContinueReconstructFailedDisk(RF_Raid rf_lock_mutex2(raidPtr->mutex); /* mark set as being degraded, rather than - rf_rs_reconstructing as we were before the problem. + RF_RS_RECONSTRUCTING as we were before the problem. After this is done we can update status of the component disks without worrying about someone trying to read from a failed component. */ - raidPtr->status = rf_rs_degraded; + raidPtr->status = RF_RS_DEGRADED; rf_unlock_mutex2(raidPtr->mutex); /* resume IO */ @@ -925,8 +925,8 @@ rf_ContinueReconstructFailedDisk(RF_Raid rf_lock_mutex2(raidPtr->mutex); raidPtr->numFailures--; ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE); - raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared; - raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal; + raidPtr->Disks[col].status = (ds) ? RF_DS_DIST_SPARED : RF_DS_SPARED; + raidPtr->status = (ds) ? RF_RS_RECONFIGURED : RF_RS_OPTIMAL; rf_unlock_mutex2(raidPtr->mutex); RF_GETTIME(etime); RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd); Index: rf_states.c =================================================================== RCS file: /cvsroot/src/sys/dev/raidframe/rf_states.c,v retrieving revision 1.50 diff -u -p -u -r1.50 rf_states.c --- rf_states.c 3 Jan 2016 08:17:24 -0000 1.50 +++ rf_states.c 14 Jan 2017 20:09:48 -0000 @@ -403,7 +403,7 @@ rf_State_Lock(RF_RaidAccessDesc_t *desc) } } if (desc->type == RF_IO_TYPE_WRITE && - raidPtr->status == rf_rs_reconstructing) { + raidPtr->status == RF_RS_RECONSTRUCTING) { if (!(asm_p->flags & RF_ASM_FLAGS_FORCE_TRIED)) { int val;