Implement parallel SMART data collection for faster execution
SMART queries are now run in parallel using background jobs: 1. First pass launches background jobs for all devices 2. Each job writes to a temp file in SMART_CACHE_DIR 3. Wait for all jobs to complete 4. Second pass reads cached data for display This significantly reduces script runtime when multiple drives are present, as SMART queries can take 1-2 seconds each. Cache directory is automatically cleaned up after use. Fixes: #15 Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -855,12 +855,32 @@ done
|
|||||||
# Sort drives by bay position (numeric bays first, then m2 slots)
|
# Sort drives by bay position (numeric bays first, then m2 slots)
|
||||||
# Combine numeric bays (sorted numerically) with m2 slots (sorted alphanumerically)
|
# Combine numeric bays (sorted numerically) with m2 slots (sorted alphanumerically)
|
||||||
all_bays=$(printf '%s\n' "${!DRIVE_MAP[@]}" | grep -E '^[0-9]+$' | sort -n; printf '%s\n' "${!DRIVE_MAP[@]}" | grep -E '^m2-' | sort)
|
all_bays=$(printf '%s\n' "${!DRIVE_MAP[@]}" | grep -E '^[0-9]+$' | sort -n; printf '%s\n' "${!DRIVE_MAP[@]}" | grep -E '^m2-' | sort)
|
||||||
|
|
||||||
|
# Parallel SMART data collection for faster execution
|
||||||
|
# Collect SMART data in background jobs, store in temp files
|
||||||
|
if [[ "$SKIP_SMART" != true ]]; then
|
||||||
|
SMART_CACHE_DIR="$(mktemp -d)"
|
||||||
|
log_info "Collecting SMART data in parallel..."
|
||||||
|
|
||||||
|
for bay in $all_bays; do
|
||||||
|
device="${DRIVE_MAP[$bay]}"
|
||||||
|
if [[ -n "$device" && "$device" != "EMPTY" && -b "/dev/$device" ]]; then
|
||||||
|
# Launch background job for each device
|
||||||
|
(get_drive_smart_info "$device" > "$SMART_CACHE_DIR/$device") &
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for all background SMART queries to complete
|
||||||
|
wait
|
||||||
|
log_info "SMART data collection complete"
|
||||||
|
fi
|
||||||
|
|
||||||
for bay in $all_bays; do
|
for bay in $all_bays; do
|
||||||
device="${DRIVE_MAP[$bay]}"
|
device="${DRIVE_MAP[$bay]}"
|
||||||
if [[ -n "$device" && "$device" != "EMPTY" && -b "/dev/$device" ]]; then
|
if [[ -n "$device" && "$device" != "EMPTY" && -b "/dev/$device" ]]; then
|
||||||
size="$(lsblk -d -n -o SIZE "/dev/$device" 2>/dev/null)"
|
size="$(lsblk -d -n -o SIZE "/dev/$device" 2>/dev/null)"
|
||||||
|
|
||||||
# Get SMART info (or defaults if skipped)
|
# Get SMART info from cache (or defaults if skipped)
|
||||||
if [[ "$SKIP_SMART" == true ]]; then
|
if [[ "$SKIP_SMART" == true ]]; then
|
||||||
type="-"
|
type="-"
|
||||||
temp="-"
|
temp="-"
|
||||||
@@ -869,7 +889,12 @@ for bay in $all_bays; do
|
|||||||
serial="-"
|
serial="-"
|
||||||
warnings=""
|
warnings=""
|
||||||
else
|
else
|
||||||
smart_info="$(get_drive_smart_info "$device")"
|
# Read from cached SMART data
|
||||||
|
if [[ -f "$SMART_CACHE_DIR/$device" ]]; then
|
||||||
|
smart_info="$(cat "$SMART_CACHE_DIR/$device")"
|
||||||
|
else
|
||||||
|
smart_info=""
|
||||||
|
fi
|
||||||
IFS='|' read -r type temp health model serial warnings <<< "$smart_info"
|
IFS='|' read -r type temp health model serial warnings <<< "$smart_info"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -926,6 +951,11 @@ for bay in $all_bays; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Clean up SMART cache directory
|
||||||
|
if [[ -n "${SMART_CACHE_DIR:-}" && -d "$SMART_CACHE_DIR" ]]; then
|
||||||
|
rm -rf "$SMART_CACHE_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
# NVMe drives (only show unmapped ones - mapped NVMe drives appear in main table)
|
# NVMe drives (only show unmapped ones - mapped NVMe drives appear in main table)
|
||||||
nvme_devices=$(lsblk -d -n -o NAME,SIZE | grep "^nvme" 2>/dev/null)
|
nvme_devices=$(lsblk -d -n -o NAME,SIZE | grep "^nvme" 2>/dev/null)
|
||||||
if [[ -n "$nvme_devices" ]]; then
|
if [[ -n "$nvme_devices" ]]; then
|
||||||
|
|||||||
Reference in New Issue
Block a user