1314 lines
57 KiB
Python
1314 lines
57 KiB
Python
#!/usr/bin/env python3
|
|
import os, sys, json, requests, psutil, socket, subprocess, logging, argparse, urllib.request, re, glob, datetime
|
|
from typing import Dict, Any, List
|
|
|
|
# Create a logger
|
|
logger = logging.getLogger(__name__)
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
# Create a console handler and set its level to DEBUG
|
|
console_handler = logging.StreamHandler()
|
|
console_handler.setLevel(logging.DEBUG)
|
|
|
|
# Create a formatter
|
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
# Add the formatter to the console handler
|
|
console_handler.setFormatter(formatter)
|
|
|
|
# Add the console handler to the logger
|
|
logger.addHandler(console_handler)
|
|
|
|
class SystemHealthMonitor:
|
|
STANDARD_WIDTH = 80
|
|
PRIORITIES = {
|
|
'CRITICAL': '1',
|
|
'HIGH': '2',
|
|
'MEDIUM': '3',
|
|
'LOW': '4'
|
|
}
|
|
ISSUE_PRIORITIES = {
|
|
'SMART_FAILURE': PRIORITIES['HIGH'],
|
|
'DISK_CRITICAL': PRIORITIES['HIGH'],
|
|
'DISK_WARNING': PRIORITIES['MEDIUM'],
|
|
'UNCORRECTABLE_ECC': PRIORITIES['HIGH'],
|
|
'CORRECTABLE_ECC': PRIORITIES['MEDIUM'],
|
|
'CPU_HIGH': PRIORITIES['LOW'],
|
|
'NETWORK_FAILURE': PRIORITIES['HIGH']
|
|
}
|
|
CONFIG = {
|
|
'TICKET_API_URL': 'http://10.10.10.45/create_ticket_api.php',
|
|
'THRESHOLDS': {
|
|
'DISK_CRITICAL': 90,
|
|
'DISK_WARNING': 80,
|
|
'LXC_CRITICAL': 90,
|
|
'LXC_WARNING': 80,
|
|
'CPU_WARNING': 80,
|
|
'TEMPERATURE_WARNING': 65
|
|
},
|
|
'NETWORKS': {
|
|
'MANAGEMENT': '10.10.10.1',
|
|
'CEPH': '10.10.90.1',
|
|
'PING_TIMEOUT': 1,
|
|
'PING_COUNT': 1
|
|
},
|
|
'EXCLUDED_MOUNTS': [
|
|
'/media',
|
|
'/mnt/pve/mediafs',
|
|
'/opt/metube_downloads'
|
|
],
|
|
'EXCLUDED_PATTERNS': [
|
|
r'/media.*',
|
|
r'/mnt/pve/mediafs.*',
|
|
r'.*/media$',
|
|
r'.*mediafs.*',
|
|
r'.*/downloads.*'
|
|
]
|
|
}
|
|
TICKET_TEMPLATES = {
|
|
'ACTION_TYPE': {
|
|
'AUTO': '[auto]',
|
|
'MANUAL': '[manual]'
|
|
},
|
|
'ENVIRONMENT': {
|
|
'PRODUCTION': '[production]'
|
|
},
|
|
'TICKET_TYPE': {
|
|
'MAINTENANCE': '[maintenance]'
|
|
},
|
|
'HARDWARE_TYPE': {
|
|
'HARDWARE': '[hardware]'
|
|
},
|
|
'SOFTWARE_TYPE': {
|
|
'SOFTWARE': '[software]'
|
|
},
|
|
'NETWORK_TYPE': {
|
|
'NETWORK': '[network]'
|
|
},
|
|
'SCOPE': {
|
|
'SINGLE_NODE': '[single-node]',
|
|
'CLUSTER_WIDE': '[cluster-wide]'
|
|
},
|
|
'DEFAULT_CATEGORY': 'Hardware',
|
|
'DEFAULT_ISSUE_TYPE': 'Problem'
|
|
}
|
|
PROBLEMATIC_FIRMWARE = {
|
|
'Samsung': {
|
|
'EVO860': ['RVT01B6Q', 'RVT02B6Q'], # Known issues with sudden performance drops
|
|
'EVO870': ['SVT01B6Q'],
|
|
'PM883': ['HXT7404Q'] # Known issues with TRIM
|
|
},
|
|
'Seagate': {
|
|
'ST8000NM': ['CC64'], # Known issues with NCQ
|
|
'ST12000NM': ['SN02']
|
|
},
|
|
'WDC': {
|
|
'WD121KRYZ': ['01.01A01'], # RAID rebuild issues
|
|
'WD141KRYZ': ['02.01A02']
|
|
}
|
|
}
|
|
MANUFACTURER_SMART_PROFILES = {
|
|
'Ridata': {
|
|
'aliases': ['Ridata', 'Ritek', 'RIDATA', 'RITEK', 'SSD 512GB'], # Add the generic model
|
|
'firmware_patterns': ['HT3618B7', 'HT36'], # Add firmware pattern matching
|
|
'wear_leveling_behavior': 'countup',
|
|
'wear_leveling_baseline': 0,
|
|
'wear_leveling_thresholds': {
|
|
'warning': 500000, # Much higher threshold for countup behavior
|
|
'critical': 1000000 # Very high threshold
|
|
},
|
|
'attributes': {
|
|
'Wear_Leveling_Count': {
|
|
'behavior': 'countup',
|
|
'baseline': 0,
|
|
'warning_threshold': 500000,
|
|
'critical_threshold': 1000000,
|
|
'description': 'Total wear leveling operations performed (countup from 0)',
|
|
'ignore_on_new_drive': True # Don't alert on new drives
|
|
}
|
|
}
|
|
},
|
|
'Samsung': {
|
|
'aliases': ['Samsung', 'SAMSUNG'],
|
|
'wear_leveling_behavior': 'countup',
|
|
'wear_leveling_baseline': 0,
|
|
'wear_leveling_thresholds': {
|
|
'warning': 2000,
|
|
'critical': 3000
|
|
},
|
|
'attributes': {
|
|
'Wear_Leveling_Count': {
|
|
'behavior': 'countup',
|
|
'baseline': 0,
|
|
'warning_threshold': 2000,
|
|
'critical_threshold': 3000,
|
|
'description': 'Total wear leveling operations performed'
|
|
}
|
|
}
|
|
},
|
|
'Intel': {
|
|
'aliases': ['Intel', 'INTEL'],
|
|
'wear_leveling_behavior': 'percentage',
|
|
'wear_leveling_baseline': 100,
|
|
'wear_leveling_thresholds': {
|
|
'warning': 30,
|
|
'critical': 10
|
|
},
|
|
'attributes': {
|
|
'Media_Wearout_Indicator': {
|
|
'behavior': 'countdown',
|
|
'baseline': 100,
|
|
'warning_threshold': 30,
|
|
'critical_threshold': 10,
|
|
'description': 'Percentage of rated life remaining'
|
|
}
|
|
}
|
|
},
|
|
'Micron': {
|
|
'aliases': ['Micron', 'MICRON', 'Crucial', 'CRUCIAL'],
|
|
'wear_leveling_behavior': 'percentage',
|
|
'wear_leveling_baseline': 100,
|
|
'wear_leveling_thresholds': {
|
|
'warning': 30,
|
|
'critical': 10
|
|
}
|
|
},
|
|
'Generic': { # Fallback for unknown manufacturers
|
|
'aliases': ['Unknown', 'Generic'],
|
|
'wear_leveling_behavior': 'unknown',
|
|
'wear_leveling_baseline': None,
|
|
'wear_leveling_thresholds': {
|
|
'warning': None, # Don't trigger on unknown
|
|
'critical': None
|
|
}
|
|
}
|
|
}
|
|
SEVERITY_INDICATORS = {
|
|
'CRITICAL': '🔴',
|
|
'WARNING': '🟡',
|
|
'HEALTHY': '🟢',
|
|
'UNKNOWN': '⚪'
|
|
}
|
|
SMART_DESCRIPTIONS = {
|
|
'Reported_Uncorrect': """
|
|
Number of errors that could not be recovered using hardware ECC.
|
|
Impact:
|
|
- Indicates permanent data loss in affected sectors
|
|
- High correlation with drive hardware failure
|
|
- Critical reliability indicator
|
|
|
|
Recommended Actions:
|
|
1. Backup critical data immediately
|
|
2. Check drive logs for related errors
|
|
3. Plan for drive replacement
|
|
4. Monitor for error count increases
|
|
""",
|
|
|
|
'Reallocated_Sector_Ct': """
|
|
Number of sectors that have been reallocated due to errors.
|
|
Impact:
|
|
- High counts indicate degrading media
|
|
- Each reallocation uses one of the drive's limited spare sectors
|
|
- Rapid increases suggest accelerating drive wear
|
|
|
|
Recommended Actions:
|
|
1. Monitor rate of increase
|
|
2. Check drive temperature
|
|
3. Plan replacement if count grows rapidly
|
|
""",
|
|
|
|
'Current_Pending_Sector': """
|
|
Sectors waiting to be reallocated due to read/write errors.
|
|
Impact:
|
|
- Indicates potentially unstable sectors
|
|
- May result in data loss if unrecoverable
|
|
- Should be monitored for increases
|
|
|
|
Recommended Actions:
|
|
1. Backup affected files
|
|
2. Run extended SMART tests
|
|
3. Monitor for conversion to reallocated sectors
|
|
""",
|
|
|
|
'Offline_Uncorrectable': """
|
|
Count of uncorrectable errors detected during offline data collection.
|
|
Impact:
|
|
- Direct indicator of media reliability issues
|
|
- May affect data integrity
|
|
- High values suggest drive replacement needed
|
|
|
|
Recommended Actions:
|
|
1. Run extended SMART tests
|
|
2. Check drive logs
|
|
3. Plan replacement if count is increasing
|
|
""",
|
|
|
|
'Spin_Retry_Count': """
|
|
Number of spin start retry attempts.
|
|
Impact:
|
|
- Indicates potential motor or bearing issues
|
|
- May predict imminent mechanical failure
|
|
- Increasing values suggest degrading drive health
|
|
|
|
Recommended Actions:
|
|
1. Monitor for rapid increases
|
|
2. Check drive temperature
|
|
3. Plan replacement if count grows rapidly
|
|
""",
|
|
|
|
'Power_On_Hours': """
|
|
Total number of hours the device has been powered on.
|
|
Impact:
|
|
- Normal aging metric
|
|
- Used to gauge overall drive lifetime
|
|
- Compare against manufacturer's MTBF rating
|
|
|
|
Recommended Actions:
|
|
1. Compare to warranty period
|
|
2. Plan replacement if approaching rated lifetime
|
|
""",
|
|
|
|
'Media_Wearout_Indicator': """
|
|
Percentage of drive's rated life remaining (SSDs).
|
|
Impact:
|
|
- 100 indicates new drive
|
|
- 0 indicates exceeded rated writes
|
|
- Critical for SSD lifecycle management
|
|
|
|
Recommended Actions:
|
|
1. Plan replacement below 20%
|
|
2. Monitor write workload
|
|
3. Consider workload redistribution
|
|
""",
|
|
|
|
'Temperature_Celsius': """
|
|
Current drive temperature.
|
|
Impact:
|
|
- High temperatures accelerate wear
|
|
- Optimal range: 20-45°C
|
|
- Sustained high temps reduce lifespan
|
|
|
|
Recommended Actions:
|
|
1. Check system cooling
|
|
2. Verify airflow
|
|
3. Monitor for sustained high temperatures
|
|
""",
|
|
|
|
'Available_Spare': """
|
|
Percentage of spare blocks remaining (SSDs).
|
|
Impact:
|
|
- Critical for SSD endurance
|
|
- Low values indicate approaching end-of-life
|
|
- Rapid decreases suggest excessive writes
|
|
|
|
Recommended Actions:
|
|
1. Plan replacement if below 20%
|
|
2. Monitor write patterns
|
|
3. Consider workload changes
|
|
""",
|
|
|
|
'Program_Fail_Count': """
|
|
Number of flash program operation failures.
|
|
Impact:
|
|
- Indicates NAND cell reliability
|
|
- Important for SSD health assessment
|
|
- Increasing values suggest flash degradation
|
|
|
|
Recommended Actions:
|
|
1. Monitor rate of increase
|
|
2. Check firmware updates
|
|
3. Plan replacement if rapidly increasing
|
|
""",
|
|
|
|
'Erase_Fail_Count': """
|
|
Number of flash erase operation failures.
|
|
Impact:
|
|
- Related to NAND block health
|
|
- Critical for SSD reliability
|
|
- High counts suggest failing flash blocks
|
|
|
|
Recommended Actions:
|
|
1. Monitor count increases
|
|
2. Check firmware version
|
|
3. Plan replacement if count is high
|
|
""",
|
|
|
|
'Load_Cycle_Count': """
|
|
Number of power cycles and head load/unload events.
|
|
Impact:
|
|
- Normal operation metric
|
|
- High counts may indicate power management issues
|
|
- Compare against rated cycles (typically 600k-1M)
|
|
|
|
Recommended Actions:
|
|
1. Review power management settings
|
|
2. Monitor rate of increase
|
|
3. Plan replacement near rated limit
|
|
""",
|
|
|
|
'Wear_Leveling_Count': """
|
|
SSD block erase distribution metric.
|
|
Impact:
|
|
- Indicates wear pattern uniformity
|
|
- Interpretation varies by manufacturer
|
|
- Critical for SSD longevity
|
|
|
|
Recommended Actions:
|
|
1. Monitor trend over time
|
|
2. Compare with manufacturer baseline
|
|
3. Check workload distribution
|
|
|
|
Note: Different manufacturers use different counting methods:
|
|
- Some count up from 0 (Samsung, etc.)
|
|
- Others count down from baseline (Ridata, etc.)
|
|
- Always check manufacturer specifications
|
|
"""
|
|
}
|
|
|
|
def __init__(self,
|
|
ticket_api_url: str = 'http://10.10.10.45/create_ticket_api.php',
|
|
dry_run: bool = False):
|
|
"""
|
|
Initialize the system health monitor.
|
|
|
|
:param ticket_api_url: URL for the ticket creation API.
|
|
:param dry_run: If True, simulate API calls without sending requests.
|
|
"""
|
|
self.ticket_api_url = ticket_api_url
|
|
self.dry_run = dry_run
|
|
|
|
def run(self):
|
|
"""
|
|
Perform a one-shot health check of the system.
|
|
"""
|
|
try:
|
|
# Perform health checks and gather the report
|
|
health_report = self.perform_health_checks()
|
|
|
|
# Create tickets for any detected critical issues
|
|
self._create_tickets_for_issues(health_report)
|
|
except Exception as e:
|
|
import traceback
|
|
logger.error(f"Unexpected error during health check: {e}")
|
|
logger.error(traceback.format_exc())
|
|
|
|
def perform_health_checks(self) -> Dict[str, Any]:
|
|
"""
|
|
Perform comprehensive system health checks and return a report.
|
|
"""
|
|
health_report = {
|
|
'drives_health': self._check_drives_health(),
|
|
'memory_health': self._check_memory_usage(),
|
|
'cpu_health': self._check_cpu_usage(),
|
|
'network_health': self._check_network_status(),
|
|
'lxc_health': self._check_lxc_storage()
|
|
}
|
|
|
|
if self.dry_run:
|
|
logger.info("\n=== System Health Summary ===")
|
|
logger.info(f"Overall Drive Health: {health_report['drives_health']['overall_status']}")
|
|
|
|
# Summarized drive information with usage
|
|
logger.info("\nDrive Status:")
|
|
for drive in health_report['drives_health']['drives']:
|
|
issues = drive.get('smart_issues', [])
|
|
temp = f", {drive.get('temperature')}°C" if drive.get('temperature') else ""
|
|
status = "⚠️ " if issues else "✓ "
|
|
|
|
# Disk usage information
|
|
usage_info = ""
|
|
if drive.get('partitions'):
|
|
for partition in drive['partitions']:
|
|
usage_info += f"\n └─ {partition['mountpoint']}: {partition['used_space']}/{partition['total_space']} ({partition['usage_percent']}% used)"
|
|
|
|
logger.info(f"{status}{drive['device']}{temp} - SMART: {drive['smart_status']}{usage_info}")
|
|
if issues:
|
|
logger.info(f" Issues: {', '.join(issues)}")
|
|
|
|
logger.info(f"\nMemory: {health_report['memory_health']['memory_percent']}% used")
|
|
if health_report['memory_health'].get('has_ecc'):
|
|
logger.info("ECC Memory: Present")
|
|
if health_report['memory_health'].get('ecc_errors'):
|
|
logger.info(f"ECC Errors: {len(health_report['memory_health']['ecc_errors'])} found")
|
|
|
|
logger.info(f"\nCPU Usage: {health_report['cpu_health']['cpu_usage_percent']}%")
|
|
|
|
logger.info("\nNetwork Status:")
|
|
logger.info(f"Management: {health_report['network_health']['management_network']['status']}")
|
|
logger.info(f"Ceph: {health_report['network_health']['ceph_network']['status']}")
|
|
logger.info("\n=== End Summary ===")
|
|
|
|
return health_report
|
|
|
|
def _get_drive_details(self, device: str) -> Dict[str, str]:
|
|
"""
|
|
Get detailed drive information using smartctl
|
|
"""
|
|
drive_details = {
|
|
'model': None,
|
|
'serial': None,
|
|
'capacity': None,
|
|
'firmware': None,
|
|
'type': None, # SSD or HDD
|
|
'smart_capable': False
|
|
}
|
|
|
|
try:
|
|
# First check if device supports SMART
|
|
capability_result = subprocess.run(
|
|
['smartctl', '-i', device],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
text=True
|
|
)
|
|
|
|
# Check if smartctl failed completely
|
|
if capability_result.returncode not in [0, 4]: # 0 = success, 4 = some SMART errors but readable
|
|
logger.debug(f"smartctl failed for {device}: return code {capability_result.returncode}")
|
|
return drive_details
|
|
|
|
output = capability_result.stdout
|
|
|
|
# Check if SMART is supported
|
|
if "SMART support is: Enabled" in output or "SMART support is: Available" in output:
|
|
drive_details['smart_capable'] = True
|
|
elif "SMART support is: Unavailable" in output or "does not support SMART" in output:
|
|
logger.debug(f"Device {device} does not support SMART")
|
|
return drive_details
|
|
|
|
for line in output.split('\n'):
|
|
if 'Device Model' in line or 'Model Number' in line:
|
|
drive_details['model'] = line.split(':')[1].strip()
|
|
elif 'Serial Number' in line:
|
|
drive_details['serial'] = line.split(':')[1].strip()
|
|
elif 'User Capacity' in line:
|
|
# Extract capacity from brackets
|
|
capacity_match = re.search(r'\[(.*?)\]', line)
|
|
if capacity_match:
|
|
drive_details['capacity'] = capacity_match.group(1)
|
|
elif 'Firmware Version' in line:
|
|
drive_details['firmware'] = line.split(':')[1].strip()
|
|
elif 'Rotation Rate' in line:
|
|
if 'Solid State Device' in line:
|
|
drive_details['type'] = 'SSD'
|
|
else:
|
|
drive_details['type'] = 'HDD'
|
|
|
|
except Exception as e:
|
|
logger.debug(f"Error getting drive details for {device}: {e}")
|
|
|
|
return drive_details
|
|
|
|
def make_box(self, title: str, content: str, content_width: int = 70) -> str:
|
|
"""Create a formatted box with title and content."""
|
|
return f"""
|
|
┏━ {title} {'━' * (content_width - len(title) - 3)}┓
|
|
{content}
|
|
┗{'━' * content_width}┛"""
|
|
|
|
# Format each section using the consistent width
|
|
sections = {
|
|
'DRIVE SPECIFICATIONS': ...,
|
|
'SMART STATUS': ...,
|
|
'PARTITION INFO': ...
|
|
}
|
|
|
|
# Each content line should pad to content_width
|
|
for section, content in sections.items():
|
|
formatted_content = '\n'.join(f"┃ {line:<{content_width-2}}┃" for line in content.split('\n'))
|
|
description += make_box(section, formatted_content)
|
|
|
|
def _get_issue_type(self, issue: str) -> str:
|
|
if "SMART" in issue:
|
|
return "SMART Health Issue"
|
|
elif "Drive" in issue:
|
|
return "Storage Issue"
|
|
elif "ECC" in issue:
|
|
return "Memory Issue"
|
|
elif "CPU" in issue:
|
|
return "Performance Issue"
|
|
elif "Network" in issue:
|
|
return "Network Issue"
|
|
return "Hardware Issue"
|
|
|
|
def _get_impact_level(self, issue: str) -> str:
|
|
if "CRITICAL" in issue or "UNHEALTHY" in issue:
|
|
return "🔴 Critical - Immediate Action Required"
|
|
elif "WARNING" in issue:
|
|
return "🟡 Warning - Action Needed Soon"
|
|
return "🟢 Low - Monitor Only"
|
|
|
|
def _generate_detailed_description(self, issue: str, health_report: Dict[str, Any]) -> str:
|
|
hostname = socket.gethostname()
|
|
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
priority = "⚠ HIGH" if "CRITICAL" in issue else "● MEDIUM"
|
|
|
|
content_width = self.STANDARD_WIDTH - 2
|
|
banner = f"""
|
|
┏{'━' * content_width}┓
|
|
┃{' HARDWARE MONITORING ALERT TICKET '.center(content_width)}┃
|
|
┣{'━' * content_width}┫
|
|
┃ Host : {hostname:<{content_width-13}}┃
|
|
┃ Generated : {timestamp:<{content_width-13}}┃
|
|
┃ Priority : {priority:<{content_width-13}}┃
|
|
┗{'━' * content_width}┛"""
|
|
|
|
executive_summary = f"""
|
|
┏━ EXECUTIVE SUMMARY ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
|
┃ Issue Type │ {self._get_issue_type(issue)} ┃
|
|
┃ Impact Level │ {self._get_impact_level(issue)} ┃
|
|
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
"""
|
|
description = banner + executive_summary
|
|
|
|
# Add relevant SMART descriptions
|
|
for attr in self.SMART_DESCRIPTIONS:
|
|
if attr in issue:
|
|
description += f"\n{attr}:\n{self.SMART_DESCRIPTIONS[attr]}\n"
|
|
|
|
if "SMART" in issue:
|
|
description += """
|
|
SMART (Self-Monitoring, Analysis, and Reporting Technology) Attribute Details:
|
|
- Possible drive failure!
|
|
"""
|
|
|
|
if "Drive" in issue and "/dev/" in issue:
|
|
try:
|
|
device = re.search(r'/dev/[a-zA-Z0-9]+', issue).group(0) if '/dev/' in issue else None
|
|
drive_info = next((d for d in health_report['drives_health']['drives'] if d['device'] == device), None)
|
|
|
|
if drive_info:
|
|
drive_details = self._get_drive_details(device)
|
|
|
|
smart_data = {
|
|
'attributes': drive_info.get('smart_attributes', {}),
|
|
'performance_metrics': drive_info.get('performance_metrics', {}),
|
|
'last_test_date': drive_info.get('last_test_date', 'N/A')
|
|
}
|
|
|
|
power_on_hours = smart_data['attributes'].get('Power_On_Hours', 'N/A')
|
|
last_test_date = smart_data.get('last_test_date', 'N/A')
|
|
age = f"{int(power_on_hours/24/365) if isinstance(power_on_hours, (int, float)) else 'N/A'} years" if power_on_hours != 'N/A' else 'N/A'
|
|
|
|
# Fix the formatting issue by ensuring all values are strings and not None
|
|
device_safe = device or 'N/A'
|
|
model_safe = drive_details.get('model') or 'N/A'
|
|
serial_safe = drive_details.get('serial') or 'N/A'
|
|
capacity_safe = drive_details.get('capacity') or 'N/A'
|
|
type_safe = drive_details.get('type') or 'N/A'
|
|
firmware_safe = drive_details.get('firmware') or 'N/A'
|
|
|
|
description += f"""
|
|
┏━ DRIVE SPECIFICATIONS ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
|
┃ Device Path │ {device_safe:<60} ┃
|
|
┃ Model │ {model_safe:<60} ┃
|
|
┃ Serial │ {serial_safe:<60} ┃
|
|
┃ Capacity │ {capacity_safe:<60} ┃
|
|
┃ Type │ {type_safe:<60} ┃
|
|
┃ Firmware │ {firmware_safe:<60} ┃
|
|
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
"""
|
|
|
|
if drive_info:
|
|
perf_metrics = {
|
|
'read_speed': drive_info.get('performance_metrics', {}).get('read_speed', 'N/A'),
|
|
'write_speed': drive_info.get('performance_metrics', {}).get('write_speed', 'N/A'),
|
|
'access_time': drive_info.get('performance_metrics', {}).get('access_time', 'N/A'),
|
|
'iops': drive_info.get('performance_metrics', {}).get('iops', 'N/A')
|
|
}
|
|
|
|
power_on_safe = f"{power_on_hours} hours" if power_on_hours != 'N/A' else 'N/A'
|
|
last_test_safe = last_test_date or 'N/A'
|
|
age_safe = age or 'N/A'
|
|
|
|
description += f"""
|
|
┏━ DRIVE TIMELINE ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
|
┃ Power-On Hours │ {power_on_safe:<56} ┃
|
|
┃ Last SMART Test │ {last_test_safe:<56} ┃
|
|
┃ Drive Age │ {age_safe:<56} ┃
|
|
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
"""
|
|
|
|
smart_status_safe = drive_info.get('smart_status') or 'N/A'
|
|
temp_safe = f"{drive_info.get('temperature')}°C" if drive_info.get('temperature') else 'N/A'
|
|
|
|
description += f"""
|
|
┏━ SMART STATUS ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
|
┃ Status │ {smart_status_safe:<60} ┃
|
|
┃ Temperature │ {temp_safe:<60} ┃
|
|
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
"""
|
|
|
|
if drive_info.get('smart_attributes'):
|
|
description += "\n┏━ SMART ATTRIBUTES " + "━" * 48 + "┓\n"
|
|
for attr, value in drive_info['smart_attributes'].items():
|
|
attr_safe = str(attr).replace('_', ' ') if attr else 'Unknown'
|
|
value_safe = str(value) if value is not None else 'N/A'
|
|
description += f"┃ {attr_safe:<25} │ {value_safe:<37} ┃\n"
|
|
description += "┗" + "━" * 71 + "┛\n"
|
|
|
|
if drive_info.get('partitions'):
|
|
for partition in drive_info['partitions']:
|
|
usage_percent = partition.get('usage_percent', 0)
|
|
blocks = int(usage_percent / 5) # 20 blocks total = 100%
|
|
usage_meter = '█' * blocks + '░' * (20 - blocks)
|
|
|
|
mountpoint_safe = partition.get('mountpoint') or 'N/A'
|
|
fstype_safe = partition.get('fstype') or 'N/A'
|
|
total_space_safe = partition.get('total_space') or 'N/A'
|
|
used_space_safe = partition.get('used_space') or 'N/A'
|
|
free_space_safe = partition.get('free_space') or 'N/A'
|
|
|
|
description += f"""
|
|
┏━ PARTITION [{mountpoint_safe:<60}] ━┓
|
|
┃ Filesystem │ {fstype_safe:<60} ┃
|
|
┃ Usage Meter │ [{usage_meter:<58}] ┃
|
|
┃ Total Space │ {total_space_safe:<60} ┃
|
|
┃ Used Space │ {used_space_safe:<60} ┃
|
|
┃ Free Space │ {free_space_safe:<60} ┃
|
|
┃ Usage │ {usage_percent}%{'':<57} ┃
|
|
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
|
"""
|
|
|
|
firmware_info = self._check_disk_firmware(device)
|
|
if firmware_info['is_problematic']:
|
|
description += "\n┏━ FIRMWARE ALERTS " + "━" * 48 + "┓\n"
|
|
for issue_item in firmware_info['known_issues']:
|
|
issue_safe = str(issue_item) if issue_item else 'Unknown issue'
|
|
description += f"┃ ⚠ {issue_safe:<67} ┃\n"
|
|
description += "┗" + "━" * 71 + "┛\n"
|
|
except Exception as e:
|
|
description += f"\nError generating drive details: {str(e)}\n"
|
|
|
|
if "Temperature" in issue:
|
|
description += """
|
|
High drive temperatures can:
|
|
- Reduce drive lifespan
|
|
- Cause performance degradation
|
|
- Lead to data corruption in extreme cases
|
|
Optimal temperature range: 20-45°C
|
|
"""
|
|
|
|
if "ECC" in issue:
|
|
description += """
|
|
ECC (Error Correction Code) Memory Issues:
|
|
- Correctable: Memory errors that were successfully fixed
|
|
- Uncorrectable: Serious memory errors that could not be corrected
|
|
Frequent ECC corrections may indicate degrading memory modules
|
|
"""
|
|
|
|
if "CPU" in issue:
|
|
description += """
|
|
High CPU usage sustained over time can indicate:
|
|
- Resource constraints
|
|
- Runaway processes
|
|
- Need for performance optimization
|
|
- Potential cooling issues
|
|
"""
|
|
|
|
if "Network" in issue:
|
|
description += """
|
|
Network connectivity issues can impact:
|
|
- Cluster communication
|
|
- Data replication
|
|
- Service availability
|
|
- Management access
|
|
"""
|
|
|
|
if "Disk" in issue:
|
|
for partition in health_report.get('drives_health', {}).get('drives', []):
|
|
if partition.get('mountpoint') in issue:
|
|
description += f"\n=== Disk Metrics ===\n"
|
|
description += f"Disk Device: {partition['device']}\n"
|
|
description += f"Mount Point: {partition['mountpoint']}\n"
|
|
description += f"Total Space: {partition['total_space']}\n"
|
|
description += f"Used Space: {partition['used_space']}\n"
|
|
description += f"Free Space: {partition['free_space']}\n"
|
|
description += f"Usage Percent: {partition['usage_percent']}%\n"
|
|
|
|
return description
|
|
|
|
def _create_tickets_for_issues(self, health_report: Dict[str, Any]):
|
|
issues = self._detect_issues(health_report)
|
|
if not issues:
|
|
logger.info("No issues detected.")
|
|
return
|
|
|
|
hostname = socket.gethostname()
|
|
action_type = self.TICKET_TEMPLATES['ACTION_TYPE']
|
|
environment = self.TICKET_TEMPLATES['ENVIRONMENT']
|
|
ticket_type = self.TICKET_TEMPLATES['TICKET_TYPE']
|
|
hardware_type = self.TICKET_TEMPLATES['HARDWARE_TYPE']
|
|
software_type = self.TICKET_TEMPLATES['SOFTWARE_TYPE']
|
|
|
|
for issue in issues:
|
|
if issue.lower().startswith('critical') or 'critical' in issue.upper():
|
|
priority = self.PRIORITIES['CRITICAL']
|
|
elif issue.lower().startswith('warning') or 'warning' in issue.lower():
|
|
# all warnings become LOW priority (4)
|
|
priority = self.PRIORITIES['LOW']
|
|
else:
|
|
# everything else stays at MEDIUM (3)
|
|
priority = self.PRIORITIES['MEDIUM']
|
|
|
|
category = self.TICKET_TEMPLATES['DEFAULT_CATEGORY']
|
|
issue_type = self.TICKET_TEMPLATES['DEFAULT_ISSUE_TYPE']
|
|
scope = self.TICKET_TEMPLATES['SCOPE']['SINGLE_NODE']
|
|
|
|
drive_size = ""
|
|
if "Drive" in issue and "/dev/" in issue:
|
|
device = re.search(r'/dev/[a-zA-Z0-9]+', issue).group(0)
|
|
drive_details = self._get_drive_details(device)
|
|
if drive_details['capacity']:
|
|
drive_size = f"[{drive_details['capacity']}] "
|
|
|
|
# Determine if this is a hardware or software issue
|
|
issue_category = 'SOFTWARE' if 'LXC' in issue else 'HARDWARE'
|
|
|
|
# Use the correct template based on issue category
|
|
category_template = hardware_type['HARDWARE'] if issue_category == 'HARDWARE' else software_type['SOFTWARE']
|
|
|
|
ticket_title = (
|
|
f"[{hostname}]"
|
|
f"{action_type['AUTO']}"
|
|
f"{category_template}"
|
|
f"{issue}"
|
|
f"{scope}"
|
|
f"{environment['PRODUCTION']}"
|
|
f"{ticket_type['MAINTENANCE']}"
|
|
)
|
|
description = self._generate_detailed_description(issue, health_report)
|
|
|
|
ticket_payload = {
|
|
"title": ticket_title,
|
|
"description": description,
|
|
"priority": priority,
|
|
"status": "Open",
|
|
"category": category,
|
|
"type": issue_type
|
|
}
|
|
|
|
if self.dry_run:
|
|
logger.info("Dry-run mode enabled. Simulating ticket creation:")
|
|
logger.info(json.dumps(ticket_payload, indent=4))
|
|
else:
|
|
try:
|
|
response = requests.post(
|
|
self.ticket_api_url,
|
|
json=ticket_payload,
|
|
headers={'Content-Type': 'application/json'}
|
|
)
|
|
|
|
response_data = response.json()
|
|
|
|
if response_data.get('success'):
|
|
logger.info(f"Ticket created successfully: {ticket_title}")
|
|
logger.info(f"Ticket ID: {response_data.get('ticket_id')}")
|
|
elif response_data.get('error') == 'Duplicate ticket':
|
|
logger.info(f"Duplicate ticket detected - existing ticket ID: {response_data.get('existing_ticket_id')}")
|
|
continue
|
|
else:
|
|
logger.error(f"Failed to create ticket: {response_data.get('error')}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating ticket: {e}")
|
|
|
|
def _detect_issues(self, health_report: Dict[str, Any]) -> List[str]:
|
|
"""
|
|
Detect issues in the health report including non-critical issues.
|
|
|
|
:param health_report: The comprehensive health report from the checks.
|
|
:return: List of issue descriptions detected during checks.
|
|
"""
|
|
issues = []
|
|
|
|
# Check for drive-related issues
|
|
for drive in health_report.get('drives_health', {}).get('drives', []):
|
|
# Skip drives with ERROR or NOT_SUPPORTED status - these are likely virtual/unsupported devices
|
|
if drive.get('smart_status') in ['ERROR', 'NOT_SUPPORTED']:
|
|
logger.debug(f"Skipping issue detection for drive {drive['device']} with status {drive.get('smart_status')}")
|
|
continue
|
|
|
|
# Only report issues for drives with valid SMART status
|
|
if drive.get('smart_issues') and drive.get('smart_status') in ['HEALTHY', 'UNHEALTHY', 'UNKNOWN']:
|
|
# Filter out generic error messages that don't indicate real hardware issues
|
|
filtered_issues = []
|
|
for issue in drive['smart_issues']:
|
|
if not any(skip_phrase in issue for skip_phrase in [
|
|
"Error checking SMART:",
|
|
"Unable to read device information",
|
|
"SMART not supported",
|
|
"timed out"
|
|
]):
|
|
filtered_issues.append(issue)
|
|
|
|
if filtered_issues:
|
|
issues.append(f"Drive {drive['device']} has SMART issues: {', '.join(filtered_issues)}")
|
|
|
|
# Check temperature regardless of SMART status
|
|
if drive.get('temperature') and drive['temperature'] > self.CONFIG['THRESHOLDS']['TEMPERATURE_WARNING']:
|
|
issues.append(f"Drive {drive['device']} temperature is high: {drive['temperature']}°C")
|
|
|
|
# Check for ECC memory errors
|
|
memory_health = health_report.get('memory_health', {})
|
|
if memory_health.get('has_ecc') and memory_health.get('ecc_errors'):
|
|
issues.extend(memory_health['ecc_errors'])
|
|
|
|
# Check for CPU-related issues
|
|
cpu_health = health_report.get('cpu_health', {})
|
|
if cpu_health and cpu_health.get('cpu_usage_percent', 0) > self.CONFIG['THRESHOLDS']['CPU_WARNING']:
|
|
issues.append("CPU usage is above threshold")
|
|
|
|
# Check for network-related issues
|
|
network_health = health_report.get('network_health', {})
|
|
for network in ['management_network', 'ceph_network']:
|
|
if network_health.get(network, {}).get('issues'):
|
|
issues.extend(network_health[network]['issues'])
|
|
|
|
lxc_health = health_report.get('lxc_health', {})
|
|
if lxc_health.get('status') in ['WARNING', 'CRITICAL']:
|
|
issues.extend(lxc_health.get('issues', []))
|
|
|
|
logger.info("=== Issue Detection Started ===")
|
|
logger.info(f"Checking drives: {len(health_report['drives_health']['drives'])} found")
|
|
logger.info(f"Memory status: {health_report['memory_health']['status']}")
|
|
logger.info(f"CPU status: {health_report['cpu_health']['status']}")
|
|
logger.info(f"Network status: {health_report['network_health']}")
|
|
logger.info(f"Detected issues: {issues}")
|
|
logger.info("=== Issue Detection Completed ===\n")
|
|
|
|
return issues
|
|
|
|
def _get_all_disks(self) -> List[str]:
|
|
"""
|
|
Get all physical disks using multiple detection methods.
|
|
"""
|
|
disks = set()
|
|
|
|
# Method 1: Use lsblk to get physical disks
|
|
try:
|
|
result = subprocess.run(
|
|
['lsblk', '-d', '-n', '-o', 'NAME'],
|
|
stdout=subprocess.PIPE,
|
|
text=True
|
|
)
|
|
disks.update(f"/dev/{disk}" for disk in result.stdout.strip().split('\n'))
|
|
logger.debug(f"Disks found via lsblk: {disks}")
|
|
except Exception as e:
|
|
logger.debug(f"lsblk detection failed: {e}")
|
|
|
|
# Method 2: Direct device scanning
|
|
for pattern in ['/dev/sd*', '/dev/nvme*n*']:
|
|
try:
|
|
matches = glob.glob(pattern)
|
|
disks.update(d for d in matches if not d[-1].isdigit())
|
|
logger.debug(f"Disks found via glob {pattern}: {matches}")
|
|
except Exception as e:
|
|
logger.debug(f"Glob detection failed for {pattern}: {e}")
|
|
|
|
return list(disks)
|
|
|
|
def _is_excluded_mount(self, mountpoint: str) -> bool:
|
|
"""Check if a mountpoint should be excluded from monitoring."""
|
|
# Check exact matches
|
|
if mountpoint in self.CONFIG['EXCLUDED_MOUNTS']:
|
|
return True
|
|
|
|
# Check patterns
|
|
for pattern in self.CONFIG['EXCLUDED_PATTERNS']:
|
|
if re.match(pattern, mountpoint):
|
|
return True
|
|
return False
|
|
|
|
def _parse_size(self, size_str: str) -> float:
|
|
"""
|
|
Parse size string with units to bytes.
|
|
|
|
:param size_str: String containing size with unit (e.g. '15.7G', '21.8T')
|
|
:return: Size in bytes as float
|
|
"""
|
|
logger.debug(f"Parsing size string: {size_str}")
|
|
|
|
try:
|
|
# Skip non-size strings
|
|
if not isinstance(size_str, str):
|
|
logger.debug(f"Not a string: {size_str}")
|
|
return 0.0
|
|
|
|
if not any(unit in size_str.upper() for unit in ['B', 'K', 'M', 'G', 'T']):
|
|
logger.debug(f"No valid size unit found in: {size_str}")
|
|
return 0.0
|
|
|
|
# Define multipliers for units
|
|
multipliers = {
|
|
'B': 1,
|
|
'K': 1024,
|
|
'M': 1024**2,
|
|
'G': 1024**3,
|
|
'T': 1024**4
|
|
}
|
|
|
|
# Extract numeric value and unit
|
|
match = re.match(r'(\d+\.?\d*)', size_str)
|
|
if not match:
|
|
logger.debug(f"Could not extract numeric value from: {size_str}")
|
|
return 0.0
|
|
|
|
value = float(match.group(1))
|
|
|
|
unit_match = re.search(r'([BKMGT])', size_str.upper())
|
|
if not unit_match:
|
|
logger.debug(f"Could not extract unit from: {size_str}")
|
|
return 0.0
|
|
|
|
unit = unit_match.group(1)
|
|
|
|
logger.debug(f"Extracted value: {value}, unit: {unit}")
|
|
|
|
# Convert to bytes
|
|
bytes_value = value * multipliers.get(unit, 0)
|
|
logger.debug(f"Converted size to bytes: {bytes_value}")
|
|
|
|
return bytes_value
|
|
|
|
except (ValueError, AttributeError, TypeError) as e:
|
|
logger.debug(f"Failed to parse size string: {size_str}")
|
|
logger.debug(f"Parse error details: {str(e)}")
|
|
return 0.0
|
|
|
|
def _is_physical_disk(self, device_path):
|
|
"""
|
|
Check if the device is a physical disk, excluding logical volumes and special devices.
|
|
|
|
:param device_path: Path to the device
|
|
:return: Boolean indicating if it's a relevant physical disk
|
|
"""
|
|
logger.debug(f"Checking device: {device_path}")
|
|
|
|
# Exclude known non-physical or special devices
|
|
excluded_patterns = [
|
|
r'/dev/mapper/', # LVM devices
|
|
r'/dev/dm-', # Device mapper devices
|
|
r'/dev/loop', # Loop devices
|
|
r'/dev/rbd', # Ceph RBD devices
|
|
r'/boot', # Boot partitions
|
|
r'/boot/efi', # EFI partitions
|
|
r'[0-9]+$' # Partition numbers
|
|
]
|
|
|
|
if any(re.search(pattern, device_path) for pattern in excluded_patterns):
|
|
logger.debug(f"Device {device_path} excluded due to pattern match")
|
|
return False
|
|
|
|
# Match physical devices
|
|
physical_patterns = [
|
|
r'/dev/sd[a-z]+$', # SATA/SAS drives
|
|
r'/dev/nvme\d+n\d+$', # NVMe drives
|
|
r'/dev/mmcblk\d+$', # MMC/SD cards
|
|
r'/dev/hd[a-z]+$' # IDE drives (legacy)
|
|
]
|
|
|
|
is_physical = any(re.match(pattern, device_path) for pattern in physical_patterns)
|
|
logger.debug(f"Device {device_path} physical disk check result: {is_physical}")
|
|
|
|
return is_physical
|
|
|
|
def _check_disk_firmware(self, device: str) -> Dict[str, Any]:
|
|
"""
|
|
Check disk firmware version against known problematic versions.
|
|
"""
|
|
firmware_info = {
|
|
'version': None,
|
|
'model': None,
|
|
'manufacturer': None,
|
|
'is_problematic': False,
|
|
'known_issues': []
|
|
}
|
|
|
|
MANUFACTURER_PATTERNS = {
|
|
'Western Digital': ['WDC', 'Western Digital', 'Ultrastar'],
|
|
'Samsung': ['Samsung', 'SAMSUNG'],
|
|
'Seagate': ['Seagate', 'ST'],
|
|
'Intel': ['Intel', 'INTEL'],
|
|
'Micron': ['Micron', 'Crucial'],
|
|
'Toshiba': ['Toshiba', 'TOSHIBA']
|
|
}
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
['smartctl', '-i', device],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
text=True
|
|
)
|
|
|
|
model_line = None
|
|
for line in result.stdout.split('\n'):
|
|
if 'Firmware Version:' in line:
|
|
firmware_info['version'] = line.split(':')[1].strip()
|
|
elif 'Model Family:' in line:
|
|
model_line = line
|
|
firmware_info['model'] = line.split(':')[1].strip()
|
|
elif 'Device Model:' in line and not firmware_info['model']:
|
|
model_line = line
|
|
firmware_info['model'] = line.split(':')[1].strip()
|
|
|
|
# Determine manufacturer
|
|
if model_line:
|
|
for manufacturer, patterns in MANUFACTURER_PATTERNS.items():
|
|
if any(pattern in model_line for pattern in patterns):
|
|
firmware_info['manufacturer'] = manufacturer
|
|
break
|
|
|
|
# Check against known problematic versions
|
|
if firmware_info['manufacturer'] and firmware_info['model']:
|
|
# Check if manufacturer exists in our problematic firmware database
|
|
if firmware_info['manufacturer'] in self.PROBLEMATIC_FIRMWARE:
|
|
for model, versions in self.PROBLEMATIC_FIRMWARE[firmware_info['manufacturer']].items():
|
|
if model in firmware_info['model'] and firmware_info['version'] in versions:
|
|
firmware_info['is_problematic'] = True
|
|
firmware_info['known_issues'].append(
|
|
f"Known problematic firmware version {firmware_info['version']} "
|
|
f"for {firmware_info['model']}"
|
|
)
|
|
|
|
logger.debug(f"=== Firmware Check for {device} ===")
|
|
logger.debug(f"Firmware version: {firmware_info['version']}")
|
|
logger.debug(f"Model: {firmware_info['model']}")
|
|
logger.debug(f"Manufacturer: {firmware_info['manufacturer']}")
|
|
logger.debug(f"Known issues: {firmware_info['known_issues']}")
|
|
logger.debug("=== End Firmware Check ===\n")
|
|
|
|
except Exception as e:
|
|
firmware_info['known_issues'].append(f"Error checking firmware: {str(e)}")
|
|
|
|
return firmware_info
|
|
|
|
def _parse_smart_value(self, raw_value: str) -> int:
|
|
"""
|
|
Parse SMART values handling different formats including NVMe temperature readings
|
|
"""
|
|
try:
|
|
# Handle temperature values with °C
|
|
if isinstance(raw_value, str) and '°C' in raw_value:
|
|
# Extract only the numeric portion before °C
|
|
temp_value = raw_value.split('°C')[0].strip()
|
|
return int(temp_value)
|
|
# Handle time format (e.g., '15589h+17m+33.939s')
|
|
if 'h+' in raw_value:
|
|
return int(raw_value.split('h+')[0])
|
|
# Handle hex values
|
|
if '0x' in raw_value:
|
|
return int(raw_value, 16)
|
|
# Handle basic numbers
|
|
return int(raw_value)
|
|
except ValueError:
|
|
logger.debug(f"Could not parse SMART value: {raw_value}")
|
|
return 0
|
|
|
|
def _get_manufacturer_profile(self, model: str, manufacturer: str = None, firmware: str = None) -> Dict[str, Any]:
|
|
"""
|
|
Get manufacturer-specific SMART profile based on drive model/manufacturer/firmware.
|
|
"""
|
|
# Check each manufacturer profile
|
|
for mfg, profile in self.MANUFACTURER_SMART_PROFILES.items():
|
|
# Check firmware patterns first (most specific for OEM drives)
|
|
if firmware and 'firmware_patterns' in profile:
|
|
for pattern in profile['firmware_patterns']:
|
|
if pattern in firmware:
|
|
logger.debug(f"Matched manufacturer profile: {mfg} for firmware: {firmware}")
|
|
return profile
|
|
|
|
# Check model/manufacturer aliases
|
|
for alias in profile['aliases']:
|
|
if alias.lower() in model.lower() or (manufacturer and alias.lower() in manufacturer.lower()):
|
|
logger.debug(f"Matched manufacturer profile: {mfg} for model: {model}")
|
|
return profile
|
|
|
|
# Return generic profile if no match
|
|
logger.debug(f"No specific profile found for {model}, using Generic profile")
|
|
return self.MANUFACTURER_SMART_PROFILES['Generic']
|
|
|
|
def _is_new_drive(self, power_on_hours: int) -> bool:
|
|
"""
|
|
Determine if a drive is considered "new" based on power-on hours.
|
|
"""
|
|
return power_on_hours < 720 # Less than 1 week of runtime
|
|
|
|
def _check_smart_health(self, device: str) -> Dict[str, Any]:
|
|
"""
|
|
Enhanced SMART health check with better error handling.
|
|
"""
|
|
smart_health = {
|
|
'status': 'UNKNOWN',
|
|
'severity': 'NORMAL',
|
|
'issues': [],
|
|
'temp': None,
|
|
'attributes': {},
|
|
'manufacturer_profile': None
|
|
}
|
|
|
|
try:
|
|
# First verify the device is SMART-capable
|
|
drive_details = self._get_drive_details(device)
|
|
if not drive_details.get('smart_capable', False):
|
|
smart_health['status'] = 'NOT_SUPPORTED'
|
|
smart_health['issues'].append("SMART not supported on this device")
|
|
return smart_health
|
|
|
|
# If we have no model info, the device might not be responding properly
|
|
if not drive_details.get('model'):
|
|
smart_health['status'] = 'ERROR'
|
|
smart_health['issues'].append("Unable to read device information")
|
|
return smart_health
|
|
|
|
manufacturer_profile = self._get_manufacturer_profile(
|
|
drive_details.get('model', ''),
|
|
drive_details.get('manufacturer', '')
|
|
)
|
|
smart_health['manufacturer_profile'] = manufacturer_profile
|
|
|
|
# Get firmware information
|
|
firmware_info = self._check_disk_firmware(device)
|
|
if firmware_info['is_problematic']:
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].extend(firmware_info['known_issues'])
|
|
|
|
# Get detailed SMART data with timeout
|
|
result = subprocess.run(
|
|
['smartctl', '-A', '-H', '-l', 'error', '-l', 'background', device],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
text=True,
|
|
timeout=30 # 30 second timeout
|
|
)
|
|
|
|
output = result.stdout
|
|
|
|
# Check overall health status
|
|
if 'FAILED' in output and 'PASSED' not in output:
|
|
smart_health['status'] = 'UNHEALTHY'
|
|
smart_health['severity'] = 'CRITICAL'
|
|
smart_health['issues'].append("SMART overall health check failed")
|
|
elif 'PASSED' in output:
|
|
smart_health['status'] = 'HEALTHY'
|
|
else:
|
|
smart_health['status'] = 'UNKNOWN'
|
|
|
|
# Parse SMART attributes with manufacturer-specific handling
|
|
power_on_hours = 0
|
|
|
|
for line in output.split('\n'):
|
|
# Extract Power_On_Hours first to determine if drive is new
|
|
if 'Power_On_Hours' in line:
|
|
parts = line.split()
|
|
if len(parts) >= 10:
|
|
power_on_hours = self._parse_smart_value(parts[9])
|
|
smart_health['attributes']['Power_On_Hours'] = power_on_hours
|
|
|
|
# Check if this is a new drive
|
|
is_new_drive = self._is_new_drive(power_on_hours)
|
|
logger.debug(f"Drive {device} power-on hours: {power_on_hours}, is_new_drive: {is_new_drive}")
|
|
|
|
# Define base SMART thresholds (for non-manufacturer specific attributes)
|
|
BASE_SMART_THRESHOLDS = {
|
|
'Reallocated_Sector_Ct': {'warning': 5, 'critical': 10},
|
|
'Current_Pending_Sector': {'warning': 1, 'critical': 5},
|
|
'Offline_Uncorrectable': {'warning': 1, 'critical': 2},
|
|
'Reported_Uncorrect': {'warning': 1, 'critical': 10},
|
|
'Spin_Retry_Count': {'warning': 1, 'critical': 5},
|
|
'Power_Cycle_Count': {'warning': 5000, 'critical': 10000},
|
|
'Power_On_Hours': {'warning': 61320, 'critical': 70080}, # ~7-8 years
|
|
'Temperature_Celsius': {'warning': 65, 'critical': 75},
|
|
'Available_Spare': {'warning': 30, 'critical': 10},
|
|
'Program_Fail_Count': {'warning': 10, 'critical': 20},
|
|
'Erase_Fail_Count': {'warning': 10, 'critical': 20},
|
|
'Load_Cycle_Count': {'warning': 900000, 'critical': 1000000},
|
|
'SSD_Life_Left': {'warning': 30, 'critical': 10}
|
|
}
|
|
|
|
# Parse all SMART attributes
|
|
for line in output.split('\n'):
|
|
# Handle manufacturer-specific Wear_Leveling_Count
|
|
if 'Wear_Leveling_Count' in line:
|
|
parts = line.split()
|
|
if len(parts) >= 10:
|
|
raw_value = self._parse_smart_value(parts[9])
|
|
smart_health['attributes']['Wear_Leveling_Count'] = raw_value
|
|
|
|
# Get manufacturer-specific thresholds
|
|
wear_attr = manufacturer_profile.get('attributes', {}).get('Wear_Leveling_Count', {})
|
|
|
|
# Skip evaluation if this is a new drive and manufacturer profile says to ignore
|
|
if is_new_drive and wear_attr.get('ignore_on_new_drive', False):
|
|
logger.debug(f"Skipping Wear_Leveling_Count evaluation for new drive: {raw_value}")
|
|
continue
|
|
|
|
warning_threshold = wear_attr.get('warning_threshold')
|
|
critical_threshold = wear_attr.get('critical_threshold')
|
|
|
|
if warning_threshold and critical_threshold:
|
|
behavior = wear_attr.get('behavior', 'countup')
|
|
|
|
if behavior == 'countup':
|
|
if raw_value >= critical_threshold:
|
|
smart_health['severity'] = 'CRITICAL'
|
|
smart_health['issues'].append(f"Critical wear leveling count: {raw_value}")
|
|
elif raw_value >= warning_threshold:
|
|
if smart_health['severity'] != 'CRITICAL':
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].append(f"High wear leveling count: {raw_value}")
|
|
elif behavior == 'countdown':
|
|
if raw_value <= critical_threshold:
|
|
smart_health['severity'] = 'CRITICAL'
|
|
smart_health['issues'].append(f"Critical wear leveling remaining: {raw_value}")
|
|
elif raw_value <= warning_threshold:
|
|
if smart_health['severity'] != 'CRITICAL':
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].append(f"Low wear leveling remaining: {raw_value}")
|
|
|
|
# Handle all other standard SMART attributes
|
|
for attr, thresholds in BASE_SMART_THRESHOLDS.items():
|
|
if attr in line and attr != 'Wear_Leveling_Count': # Skip wear leveling as it's handled above
|
|
parts = line.split()
|
|
if len(parts) >= 10:
|
|
raw_value = self._parse_smart_value(parts[9])
|
|
smart_health['attributes'][attr] = raw_value
|
|
|
|
if attr == 'Temperature_Celsius':
|
|
smart_health['temp'] = raw_value
|
|
if raw_value >= thresholds['critical']:
|
|
smart_health['severity'] = 'CRITICAL'
|
|
smart_health['issues'].append(f"Critical temperature: {raw_value}°C")
|
|
elif raw_value >= thresholds['warning']:
|
|
if smart_health['severity'] != 'CRITICAL':
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].append(f"High temperature: {raw_value}°C")
|
|
else:
|
|
if raw_value >= thresholds['critical']:
|
|
smart_health['severity'] = 'CRITICAL'
|
|
smart_health['issues'].append(f"Critical {attr}: {raw_value}")
|
|
elif raw_value >= thresholds['warning']:
|
|
if smart_health['severity'] != 'CRITICAL':
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].append(f"Warning {attr}: {raw_value}")
|
|
|
|
# Check for recent SMART errors
|
|
error_log_pattern = r"Error \d+ occurred at disk power-on lifetime: (\d+) hours"
|
|
error_matches = re.finditer(error_log_pattern, output)
|
|
recent_errors = []
|
|
|
|
for match in error_matches:
|
|
error_hour = int(match.group(1))
|
|
current_hours = smart_health['attributes'].get('Power_On_Hours', 0)
|
|
if current_hours - error_hour < 168: # Errors within last week
|
|
recent_errors.append(match.group(0))
|
|
|
|
if recent_errors:
|
|
smart_health['severity'] = 'WARNING'
|
|
smart_health['issues'].extend(recent_errors)
|
|
|
|
logger.debug(f"=== SMART Health Check for {device} ===")
|
|
logger.debug(f"Manufacturer profile: {manufacturer_profile.get('aliases', ['Unknown'])[0] if manufacturer_profile else 'None'}")
|
|
logger.debug("Raw SMART attributes:")
|
|
for attr, value in smart_health['attributes']. |