Files
hwmonDaemon/hwmonDaemon.py

1549 lines
66 KiB
Python

#!/usr/bin/env python3
import os, sys, json, requests, psutil, socket, subprocess, logging, argparse, urllib.request, re, glob, datetime
from typing import Dict, Any, List
# Create a logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a console handler and set its level to DEBUG
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# Create a formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Add the formatter to the console handler
console_handler.setFormatter(formatter)
# Add the console handler to the logger
logger.addHandler(console_handler)
class SystemHealthMonitor:
STANDARD_WIDTH = 80
PRIORITIES = {
'CRITICAL': '1',
'HIGH': '2',
'MEDIUM': '3',
'LOW': '4'
}
ISSUE_PRIORITIES = {
'SMART_FAILURE': PRIORITIES['HIGH'],
'DISK_CRITICAL': PRIORITIES['HIGH'],
'DISK_WARNING': PRIORITIES['MEDIUM'],
'UNCORRECTABLE_ECC': PRIORITIES['HIGH'],
'CORRECTABLE_ECC': PRIORITIES['MEDIUM'],
'CPU_HIGH': PRIORITIES['LOW'],
'NETWORK_FAILURE': PRIORITIES['HIGH']
}
CONFIG = {
'TICKET_API_URL': 'http://10.10.10.45/create_ticket_api.php',
'THRESHOLDS': {
'DISK_CRITICAL': 90,
'DISK_WARNING': 80,
'LXC_CRITICAL': 90,
'LXC_WARNING': 80,
'CPU_WARNING': 80,
'TEMPERATURE_WARNING': 65
},
'NETWORKS': {
'MANAGEMENT': '10.10.10.1',
'CEPH': '10.10.90.1',
'PING_TIMEOUT': 1,
'PING_COUNT': 1
},
'EXCLUDED_MOUNTS': [
'/media',
'/mnt/pve/mediafs',
'/opt/metube_downloads'
],
'EXCLUDED_PATTERNS': [
r'/media.*',
r'/mnt/pve/mediafs.*',
r'.*/media$',
r'.*mediafs.*',
r'.*/downloads.*'
]
}
TICKET_TEMPLATES = {
'ACTION_TYPE': {
'AUTO': '[auto]',
'MANUAL': '[manual]'
},
'ENVIRONMENT': {
'PRODUCTION': '[production]'
},
'TICKET_TYPE': {
'MAINTENANCE': '[maintenance]'
},
'HARDWARE_TYPE': {
'HARDWARE': '[hardware]'
},
'SOFTWARE_TYPE': {
'SOFTWARE': '[software]'
},
'NETWORK_TYPE': {
'NETWORK': '[network]'
},
'SCOPE': {
'SINGLE_NODE': '[single-node]',
'CLUSTER_WIDE': '[cluster-wide]'
},
'DEFAULT_CATEGORY': 'Hardware',
'DEFAULT_ISSUE_TYPE': 'Problem'
}
PROBLEMATIC_FIRMWARE = {
'Samsung': {
'EVO860': ['RVT01B6Q', 'RVT02B6Q'], # Known issues with sudden performance drops
'EVO870': ['SVT01B6Q'],
'PM883': ['HXT7404Q'] # Known issues with TRIM
},
'Seagate': {
'ST8000NM': ['CC64'], # Known issues with NCQ
'ST12000NM': ['SN02']
},
'WDC': {
'WD121KRYZ': ['01.01A01'], # RAID rebuild issues
'WD141KRYZ': ['02.01A02']
}
}
SEVERITY_INDICATORS = {
'CRITICAL': '🔴',
'WARNING': '🟡',
'HEALTHY': '🟢',
'UNKNOWN': ''
}
SMART_DESCRIPTIONS = {
'Reported_Uncorrect': """
Number of errors that could not be recovered using hardware ECC.
Impact:
- Indicates permanent data loss in affected sectors
- High correlation with drive hardware failure
- Critical reliability indicator
Recommended Actions:
1. Backup critical data immediately
2. Check drive logs for related errors
3. Plan for drive replacement
4. Monitor for error count increases
""",
'Reallocated_Sector_Ct': """
Number of sectors that have been reallocated due to errors.
Impact:
- High counts indicate degrading media
- Each reallocation uses one of the drive's limited spare sectors
- Rapid increases suggest accelerating drive wear
Recommended Actions:
1. Monitor rate of increase
2. Check drive temperature
3. Plan replacement if count grows rapidly
""",
'Current_Pending_Sector': """
Sectors waiting to be reallocated due to read/write errors.
Impact:
- Indicates potentially unstable sectors
- May result in data loss if unrecoverable
- Should be monitored for increases
Recommended Actions:
1. Backup affected files
2. Run extended SMART tests
3. Monitor for conversion to reallocated sectors
""",
'Offline_Uncorrectable': """
Count of uncorrectable errors detected during offline data collection.
Impact:
- Direct indicator of media reliability issues
- May affect data integrity
- High values suggest drive replacement needed
Recommended Actions:
1. Run extended SMART tests
2. Check drive logs
3. Plan replacement if count is increasing
""",
'Spin_Retry_Count': """
Number of spin start retry attempts.
Impact:
- Indicates potential motor or bearing issues
- May predict imminent mechanical failure
- Increasing values suggest degrading drive health
Recommended Actions:
1. Monitor for rapid increases
2. Check drive temperature
3. Plan replacement if count grows rapidly
""",
'Power_On_Hours': """
Total number of hours the device has been powered on.
Impact:
- Normal aging metric
- Used to gauge overall drive lifetime
- Compare against manufacturer's MTBF rating
Recommended Actions:
1. Compare to warranty period
2. Plan replacement if approaching rated lifetime
""",
'Media_Wearout_Indicator': """
Percentage of drive's rated life remaining (SSDs).
Impact:
- 100 indicates new drive
- 0 indicates exceeded rated writes
- Critical for SSD lifecycle management
Recommended Actions:
1. Plan replacement below 20%
2. Monitor write workload
3. Consider workload redistribution
""",
'Temperature_Celsius': """
Current drive temperature.
Impact:
- High temperatures accelerate wear
- Optimal range: 20-45°C
- Sustained high temps reduce lifespan
Recommended Actions:
1. Check system cooling
2. Verify airflow
3. Monitor for sustained high temperatures
""",
'Available_Spare': """
Percentage of spare blocks remaining (SSDs).
Impact:
- Critical for SSD endurance
- Low values indicate approaching end-of-life
- Rapid decreases suggest excessive writes
Recommended Actions:
1. Plan replacement if below 20%
2. Monitor write patterns
3. Consider workload changes
""",
'Program_Fail_Count': """
Number of flash program operation failures.
Impact:
- Indicates NAND cell reliability
- Important for SSD health assessment
- Increasing values suggest flash degradation
Recommended Actions:
1. Monitor rate of increase
2. Check firmware updates
3. Plan replacement if rapidly increasing
""",
'Erase_Fail_Count': """
Number of flash erase operation failures.
Impact:
- Related to NAND block health
- Critical for SSD reliability
- High counts suggest failing flash blocks
Recommended Actions:
1. Monitor count increases
2. Check firmware version
3. Plan replacement if count is high
""",
'Load_Cycle_Count': """
Number of power cycles and head load/unload events.
Impact:
- Normal operation metric
- High counts may indicate power management issues
- Compare against rated cycles (typically 600k-1M)
Recommended Actions:
1. Review power management settings
2. Monitor rate of increase
3. Plan replacement near rated limit
""",
'Wear_Leveling_Count': """
SSD block erase distribution metric.
Impact:
- Indicates wear pattern uniformity
- Higher values show more balanced wear
- Critical for SSD longevity
Recommended Actions:
1. Monitor trend over time
2. Compare with similar drives
3. Check workload distribution
"""
}
def __init__(self,
ticket_api_url: str = 'http://10.10.10.45/create_ticket_api.php',
dry_run: bool = False):
"""
Initialize the system health monitor.
:param ticket_api_url: URL for the ticket creation API.
:param dry_run: If True, simulate API calls without sending requests.
"""
self.ticket_api_url = ticket_api_url
self.dry_run = dry_run
def run(self):
"""
Perform a one-shot health check of the system.
"""
try:
# Perform health checks and gather the report
health_report = self.perform_health_checks()
# Create tickets for any detected critical issues
self._create_tickets_for_issues(health_report)
except Exception as e:
import traceback
logger.error(f"Unexpected error during health check: {e}")
logger.error(traceback.format_exc())
def perform_health_checks(self) -> Dict[str, Any]:
"""
Perform comprehensive system health checks and return a report.
"""
health_report = {
'drives_health': self._check_drives_health(),
'memory_health': self._check_memory_usage(),
'cpu_health': self._check_cpu_usage(),
'network_health': self._check_network_status(),
'lxc_health': self._check_lxc_storage()
}
if self.dry_run:
logger.info("\n=== System Health Summary ===")
logger.info(f"Overall Drive Health: {health_report['drives_health']['overall_status']}")
# Summarized drive information with usage
logger.info("\nDrive Status:")
for drive in health_report['drives_health']['drives']:
issues = drive.get('smart_issues', [])
temp = f", {drive.get('temperature')}°C" if drive.get('temperature') else ""
status = "⚠️ " if issues else ""
# Disk usage information
usage_info = ""
if drive.get('partitions'):
for partition in drive['partitions']:
usage_info += f"\n └─ {partition['mountpoint']}: {partition['used_space']}/{partition['total_space']} ({partition['usage_percent']}% used)"
logger.info(f"{status}{drive['device']}{temp} - SMART: {drive['smart_status']}{usage_info}")
if issues:
logger.info(f" Issues: {', '.join(issues)}")
logger.info(f"\nMemory: {health_report['memory_health']['memory_percent']}% used")
if health_report['memory_health'].get('has_ecc'):
logger.info("ECC Memory: Present")
if health_report['memory_health'].get('ecc_errors'):
logger.info(f"ECC Errors: {len(health_report['memory_health']['ecc_errors'])} found")
logger.info(f"\nCPU Usage: {health_report['cpu_health']['cpu_usage_percent']}%")
logger.info("\nNetwork Status:")
logger.info(f"Management: {health_report['network_health']['management_network']['status']}")
logger.info(f"Ceph: {health_report['network_health']['ceph_network']['status']}")
logger.info("\n=== End Summary ===")
return health_report
def _get_drive_details(self, device: str) -> Dict[str, str]:
"""
Get detailed drive information using smartctl
"""
drive_details = {
'model': None,
'serial': None,
'capacity': None,
'firmware': None,
'type': None # SSD or HDD
}
try:
result = subprocess.run(
['smartctl', '-i', device],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
for line in result.stdout.split('\n'):
if 'Device Model' in line:
drive_details['model'] = line.split(':')[1].strip()
elif 'Serial Number' in line:
drive_details['serial'] = line.split(':')[1].strip()
elif 'User Capacity' in line:
drive_details['capacity'] = line.split('[')[1].split(']')[0]
elif 'Firmware Version' in line:
drive_details['firmware'] = line.split(':')[1].strip()
elif 'Rotation Rate' in line:
drive_details['type'] = 'SSD' if 'Solid State Device' in line else 'HDD'
except Exception as e:
logger.debug(f"Error getting drive details: {e}")
return drive_details
def make_box(self, title: str, content: str, content_width: int = 70) -> str:
"""Create a formatted box with title and content."""
return f"""
┏━ {title} {'' * (content_width - len(title) - 3)}
{content}
{'' * content_width}"""
# Format each section using the consistent width
sections = {
'DRIVE SPECIFICATIONS': ...,
'SMART STATUS': ...,
'PARTITION INFO': ...
}
# Each content line should pad to content_width
for section, content in sections.items():
formatted_content = '\n'.join(f"{line:<{content_width-2}}" for line in content.split('\n'))
description += make_box(section, formatted_content)
def _get_issue_type(self, issue: str) -> str:
if "SMART" in issue:
return "SMART Health Issue"
elif "Drive" in issue:
return "Storage Issue"
elif "ECC" in issue:
return "Memory Issue"
elif "CPU" in issue:
return "Performance Issue"
elif "Network" in issue:
return "Network Issue"
return "Hardware Issue"
def _get_impact_level(self, issue: str) -> str:
if "CRITICAL" in issue or "UNHEALTHY" in issue:
return "🔴 Critical - Immediate Action Required"
elif "WARNING" in issue:
return "🟡 Warning - Action Needed Soon"
return "🟢 Low - Monitor Only"
def _generate_detailed_description(self, issue: str, health_report: Dict[str, Any]) -> str:
hostname = socket.gethostname()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
priority = "⚠ HIGH" if "CRITICAL" in issue else "● MEDIUM"
content_width = self.STANDARD_WIDTH - 2
banner = f"""
{'' * content_width}
{' HARDWARE MONITORING ALERT TICKET '.center(content_width)}
{'' * content_width}
┃ Host : {hostname:<{content_width-13}}
┃ Generated : {timestamp:<{content_width-13}}
┃ Priority : {priority:<{content_width-13}}
{'' * content_width}"""
executive_summary = f"""
┏━ EXECUTIVE SUMMARY ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Issue Type │ {self._get_issue_type(issue)}
┃ Impact Level │ {self._get_impact_level(issue)}
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
"""
description = banner + executive_summary
# Add relevant SMART descriptions
for attr in self.SMART_DESCRIPTIONS:
if attr in issue:
description += f"\n{attr}:\n{self.SMART_DESCRIPTIONS[attr]}\n"
if "SMART" in issue:
description += """
SMART (Self-Monitoring, Analysis, and Reporting Technology) Attribute Details:
- Possible drive failure!
"""
if "Drive" in issue and "/dev/" in issue:
try:
device = re.search(r'/dev/[a-zA-Z0-9]+', issue).group(0) if '/dev/' in issue else None
drive_info = next((d for d in health_report['drives_health']['drives'] if d['device'] == device), None)
if drive_info:
drive_details = self._get_drive_details(device)
smart_data = {
'attributes': drive_info.get('smart_attributes', {}),
'performance_metrics': drive_info.get('performance_metrics', {}),
'last_test_date': drive_info.get('last_test_date', 'N/A')
}
power_on_hours = smart_data['attributes'].get('Power_On_Hours', 'N/A')
last_test_date = smart_data.get('last_test_date', 'N/A')
age = f"{int(power_on_hours/24/365) if isinstance(power_on_hours, (int, float)) else 'N/A'} years" if power_on_hours != 'N/A' else 'N/A'
description += """
┏━ DRIVE SPECIFICATIONS ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Device Path │ {:<60}
┃ Model │ {:<60}
┃ Serial │ {:<60}
┃ Capacity │ {:<60}
┃ Type │ {:<60}
┃ Firmware │ {:<60}
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
""".format(
device,
drive_details.get('model', 'N/A'),
drive_details.get('serial', 'N/A'),
drive_details.get('capacity', 'N/A'),
drive_details.get('type', 'N/A'),
drive_details.get('firmware', 'N/A')
)
if drive_info:
perf_metrics = {
'read_speed': drive_info.get('performance_metrics', {}).get('read_speed', 'N/A'),
'write_speed': drive_info.get('performance_metrics', {}).get('write_speed', 'N/A'),
'access_time': drive_info.get('performance_metrics', {}).get('access_time', 'N/A'),
'iops': drive_info.get('performance_metrics', {}).get('iops', 'N/A')
}
description += """
┏━ DRIVE TIMELINE ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Power-On Hours │ {:<56}
┃ Last SMART Test │ {:<56}
┃ Drive Age │ {:<56}
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
""".format(
f"{power_on_hours} hours" if power_on_hours != 'N/A' else 'N/A',
last_test_date,
age
)
description += """
┏━ SMART STATUS ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Status │ {:<60}
┃ Temperature │ {:<60}
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
""".format(
drive_info.get('smart_status', 'N/A'),
f"{drive_info.get('temperature')}°C" if drive_info.get('temperature') else 'N/A'
)
if drive_info.get('smart_attributes'):
description += "\n┏━ SMART ATTRIBUTES " + "" * 48 + "\n"
for attr, value in drive_info['smart_attributes'].items():
description += "{:<25}{:<37}\n".format(
attr.replace('_', ' '), value
)
description += "" + "" * 71 + "\n"
if drive_info.get('partitions'):
for partition in drive_info['partitions']:
usage_percent = partition.get('usage_percent', 0)
blocks = int(usage_percent / 5) # 20 blocks total = 100%
usage_meter = '' * blocks + '' * (20 - blocks)
description += """
┏━ PARTITION [{:<60}] ━┓
┃ Filesystem │ {:<60}
┃ Usage Meter │ [{:<58}] ┃
┃ Total Space │ {:<60}
┃ Used Space │ {:<60}
┃ Free Space │ {:<60}
┃ Usage │ {:<60}
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
""".format(
partition.get('mountpoint', 'N/A'),
partition.get('fstype', 'N/A'),
usage_meter,
partition.get('total_space', 'N/A'),
partition.get('used_space', 'N/A'),
partition.get('free_space', 'N/A'),
f"{usage_percent}%"
)
firmware_info = self._check_disk_firmware(device)
if firmware_info['is_problematic']:
description += "\n┏━ FIRMWARE ALERTS " + "" * 48 + "\n"
for issue in firmware_info['known_issues']:
description += "┃ ⚠ {:<67}\n".format(issue)
description += "" + "" * 71 + "\n"
except Exception as e:
description += f"\nError generating drive details: {str(e)}\n"
if "Temperature" in issue:
description += """
High drive temperatures can:
- Reduce drive lifespan
- Cause performance degradation
- Lead to data corruption in extreme cases
Optimal temperature range: 20-45°C
"""
if "ECC" in issue:
description += """
ECC (Error Correction Code) Memory Issues:
- Correctable: Memory errors that were successfully fixed
- Uncorrectable: Serious memory errors that could not be corrected
Frequent ECC corrections may indicate degrading memory modules
"""
if "CPU" in issue:
description += """
High CPU usage sustained over time can indicate:
- Resource constraints
- Runaway processes
- Need for performance optimization
- Potential cooling issues
"""
if "Network" in issue:
description += """
Network connectivity issues can impact:
- Cluster communication
- Data replication
- Service availability
- Management access
"""
if "Disk" in issue:
for partition in health_report.get('drives_health', {}).get('drives', []):
if partition.get('mountpoint') in issue:
description += f"\n=== Disk Metrics ===\n"
description += f"Disk Device: {partition['device']}\n"
description += f"Mount Point: {partition['mountpoint']}\n"
description += f"Total Space: {partition['total_space']}\n"
description += f"Used Space: {partition['used_space']}\n"
description += f"Free Space: {partition['free_space']}\n"
description += f"Usage Percent: {partition['usage_percent']}%\n"
return description
def _create_tickets_for_issues(self, health_report: Dict[str, Any]):
issues = self._detect_issues(health_report)
if not issues:
logger.info("No issues detected.")
return
hostname = socket.gethostname()
action_type = self.TICKET_TEMPLATES['ACTION_TYPE']
environment = self.TICKET_TEMPLATES['ENVIRONMENT']
ticket_type = self.TICKET_TEMPLATES['TICKET_TYPE']
hardware_type = self.TICKET_TEMPLATES['HARDWARE_TYPE']
software_type = self.TICKET_TEMPLATES['SOFTWARE_TYPE']
for issue in issues:
if issue.lower().startswith('critical') or 'critical' in issue.upper():
priority = self.PRIORITIES['CRITICAL']
elif issue.lower().startswith('warning') or 'warning' in issue.lower():
# all warnings become LOW priority (4)
priority = self.PRIORITIES['LOW']
else:
# everything else stays at MEDIUM (3)
priority = self.PRIORITIES['MEDIUM']
category = self.TICKET_TEMPLATES['DEFAULT_CATEGORY']
issue_type = self.TICKET_TEMPLATES['DEFAULT_ISSUE_TYPE']
scope = self.TICKET_TEMPLATES['SCOPE']['SINGLE_NODE']
drive_size = ""
if "Drive" in issue and "/dev/" in issue:
device = re.search(r'/dev/[a-zA-Z0-9]+', issue).group(0)
drive_details = self._get_drive_details(device)
if drive_details['capacity']:
drive_size = f"[{drive_details['capacity']}] "
# Determine if this is a hardware or software issue
issue_category = 'SOFTWARE' if 'LXC' in issue else 'HARDWARE'
# Use the correct template based on issue category
category_template = hardware_type['HARDWARE'] if issue_category == 'HARDWARE' else software_type['SOFTWARE']
ticket_title = (
f"[{hostname}]"
f"{action_type['AUTO']}"
f"{category_template}"
f"{issue}"
f"{scope}"
f"{environment['PRODUCTION']}"
f"{ticket_type['MAINTENANCE']}"
)
description = self._generate_detailed_description(issue, health_report)
ticket_payload = {
"title": ticket_title,
"description": description,
"priority": priority,
"status": "Open",
"category": category,
"type": issue_type
}
if self.dry_run:
logger.info("Dry-run mode enabled. Simulating ticket creation:")
logger.info(json.dumps(ticket_payload, indent=4))
else:
try:
response = requests.post(
self.ticket_api_url,
json=ticket_payload,
headers={'Content-Type': 'application/json'}
)
response_data = response.json()
if response_data.get('success'):
logger.info(f"Ticket created successfully: {ticket_title}")
logger.info(f"Ticket ID: {response_data.get('ticket_id')}")
elif response_data.get('error') == 'Duplicate ticket':
logger.info(f"Duplicate ticket detected - existing ticket ID: {response_data.get('existing_ticket_id')}")
continue
else:
logger.error(f"Failed to create ticket: {response_data.get('error')}")
except Exception as e:
logger.error(f"Error creating ticket: {e}")
def _detect_issues(self, health_report: Dict[str, Any]) -> List[str]:
"""
Detect issues in the health report including non-critical issues.
:param health_report: The comprehensive health report from the checks.
:return: List of issue descriptions detected during checks.
"""
issues = []
# Check for drive-related issues
for drive in health_report.get('drives_health', {}).get('drives', []):
if drive.get('smart_issues'):
issues.append(f"Drive {drive['device']} has SMART issues: {', '.join(drive['smart_issues'])}")
if drive.get('temperature') and drive['temperature'] > self.CONFIG['THRESHOLDS']['TEMPERATURE_WARNING']:
issues.append(f"Drive {drive['device']} temperature is high: {drive['temperature']}°C")
# Check for ECC memory errors
memory_health = health_report.get('memory_health', {})
if memory_health.get('has_ecc') and memory_health.get('ecc_errors'):
issues.extend(memory_health['ecc_errors'])
# Check for CPU-related issues
cpu_health = health_report.get('cpu_health', {})
if cpu_health and cpu_health.get('cpu_usage_percent', 0) > self.CONFIG['THRESHOLDS']['CPU_WARNING']:
issues.append("CPU usage is above threshold")
# Check for network-related issues
network_health = health_report.get('network_health', {})
for network in ['management_network', 'ceph_network']:
if network_health.get(network, {}).get('issues'):
issues.extend(network_health[network]['issues'])
lxc_health = health_report.get('lxc_health', {})
if lxc_health.get('status') in ['WARNING', 'CRITICAL']:
issues.extend(lxc_health.get('issues', []))
logger.info("=== Issue Detection Started ===")
logger.info(f"Checking drives: {len(health_report['drives_health']['drives'])} found")
logger.info(f"Memory status: {health_report['memory_health']['status']}")
logger.info(f"CPU status: {health_report['cpu_health']['status']}")
logger.info(f"Network status: {health_report['network_health']}")
logger.info(f"Detected issues: {issues}")
logger.info("=== Issue Detection Completed ===\n")
return issues
def _get_all_disks(self) -> List[str]:
"""
Get all physical disks using multiple detection methods.
"""
disks = set()
# Method 1: Use lsblk to get physical disks
try:
result = subprocess.run(
['lsblk', '-d', '-n', '-o', 'NAME'],
stdout=subprocess.PIPE,
text=True
)
disks.update(f"/dev/{disk}" for disk in result.stdout.strip().split('\n'))
logger.debug(f"Disks found via lsblk: {disks}")
except Exception as e:
logger.debug(f"lsblk detection failed: {e}")
# Method 2: Direct device scanning
for pattern in ['/dev/sd*', '/dev/nvme*n*']:
try:
matches = glob.glob(pattern)
disks.update(d for d in matches if not d[-1].isdigit())
logger.debug(f"Disks found via glob {pattern}: {matches}")
except Exception as e:
logger.debug(f"Glob detection failed for {pattern}: {e}")
return list(disks)
def _is_excluded_mount(self, mountpoint: str) -> bool:
"""Check if a mountpoint should be excluded from monitoring."""
# Check exact matches
if mountpoint in self.CONFIG['EXCLUDED_MOUNTS']:
return True
# Check patterns
for pattern in self.CONFIG['EXCLUDED_PATTERNS']:
if re.match(pattern, mountpoint):
return True
return False
def _parse_size(self, size_str: str) -> float:
"""
Parse size string with units to bytes.
:param size_str: String containing size with unit (e.g. '15.7G', '21.8T')
:return: Size in bytes as float
"""
logger.debug(f"Parsing size string: {size_str}")
try:
# Skip non-size strings
if not isinstance(size_str, str):
logger.debug(f"Not a string: {size_str}")
return 0.0
if not any(unit in size_str.upper() for unit in ['B', 'K', 'M', 'G', 'T']):
logger.debug(f"No valid size unit found in: {size_str}")
return 0.0
# Define multipliers for units
multipliers = {
'B': 1,
'K': 1024,
'M': 1024**2,
'G': 1024**3,
'T': 1024**4
}
# Extract numeric value and unit
match = re.match(r'(\d+\.?\d*)', size_str)
if not match:
logger.debug(f"Could not extract numeric value from: {size_str}")
return 0.0
value = float(match.group(1))
unit_match = re.search(r'([BKMGT])', size_str.upper())
if not unit_match:
logger.debug(f"Could not extract unit from: {size_str}")
return 0.0
unit = unit_match.group(1)
logger.debug(f"Extracted value: {value}, unit: {unit}")
# Convert to bytes
bytes_value = value * multipliers.get(unit, 0)
logger.debug(f"Converted size to bytes: {bytes_value}")
return bytes_value
except (ValueError, AttributeError, TypeError) as e:
logger.debug(f"Failed to parse size string: {size_str}")
logger.debug(f"Parse error details: {str(e)}")
return 0.0
def _is_physical_disk(self, device_path):
"""
Check if the device is a physical disk, excluding logical volumes and special devices.
:param device_path: Path to the device
:return: Boolean indicating if it's a relevant physical disk
"""
logger.debug(f"Checking device: {device_path}")
# Exclude known non-physical or special devices
excluded_patterns = [
r'/dev/mapper/', # LVM devices
r'/dev/dm-', # Device mapper devices
r'/dev/loop', # Loop devices
r'/dev/rbd', # Ceph RBD devices
r'/boot', # Boot partitions
r'/boot/efi', # EFI partitions
r'[0-9]+$' # Partition numbers
]
if any(re.search(pattern, device_path) for pattern in excluded_patterns):
logger.debug(f"Device {device_path} excluded due to pattern match")
return False
# Match physical devices
physical_patterns = [
r'/dev/sd[a-z]+$', # SATA/SAS drives
r'/dev/nvme\d+n\d+$', # NVMe drives
r'/dev/mmcblk\d+$', # MMC/SD cards
r'/dev/hd[a-z]+$' # IDE drives (legacy)
]
is_physical = any(re.match(pattern, device_path) for pattern in physical_patterns)
logger.debug(f"Device {device_path} physical disk check result: {is_physical}")
return is_physical
def _check_disk_firmware(self, device: str) -> Dict[str, Any]:
"""
Check disk firmware version against known problematic versions.
"""
firmware_info = {
'version': None,
'model': None,
'manufacturer': None,
'is_problematic': False,
'known_issues': []
}
MANUFACTURER_PATTERNS = {
'Western Digital': ['WDC', 'Western Digital', 'Ultrastar'],
'Samsung': ['Samsung', 'SAMSUNG'],
'Seagate': ['Seagate', 'ST'],
'Intel': ['Intel', 'INTEL'],
'Micron': ['Micron', 'Crucial'],
'Toshiba': ['Toshiba', 'TOSHIBA']
}
try:
result = subprocess.run(
['smartctl', '-i', device],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
model_line = None
for line in result.stdout.split('\n'):
if 'Firmware Version:' in line:
firmware_info['version'] = line.split(':')[1].strip()
elif 'Model Family:' in line:
model_line = line
firmware_info['model'] = line.split(':')[1].strip()
elif 'Device Model:' in line and not firmware_info['model']:
model_line = line
firmware_info['model'] = line.split(':')[1].strip()
# Determine manufacturer
if model_line:
for manufacturer, patterns in MANUFACTURER_PATTERNS.items():
if any(pattern in model_line for pattern in patterns):
firmware_info['manufacturer'] = manufacturer
break
# Check against known problematic versions
if firmware_info['manufacturer'] and firmware_info['model']:
# Check if manufacturer exists in our problematic firmware database
if firmware_info['manufacturer'] in self.PROBLEMATIC_FIRMWARE:
for model, versions in self.PROBLEMATIC_FIRMWARE[firmware_info['manufacturer']].items():
if model in firmware_info['model'] and firmware_info['version'] in versions:
firmware_info['is_problematic'] = True
firmware_info['known_issues'].append(
f"Known problematic firmware version {firmware_info['version']} "
f"for {firmware_info['model']}"
)
logger.debug(f"=== Firmware Check for {device} ===")
logger.debug(f"Firmware version: {firmware_info['version']}")
logger.debug(f"Model: {firmware_info['model']}")
logger.debug(f"Manufacturer: {firmware_info['manufacturer']}")
logger.debug(f"Known issues: {firmware_info['known_issues']}")
logger.debug("=== End Firmware Check ===\n")
except Exception as e:
firmware_info['known_issues'].append(f"Error checking firmware: {str(e)}")
return firmware_info
def _parse_smart_value(self, raw_value: str) -> int:
"""
Parse SMART values handling different formats including NVMe temperature readings
"""
try:
# Handle temperature values with °C
if isinstance(raw_value, str) and '°C' in raw_value:
# Extract only the numeric portion before °C
temp_value = raw_value.split('°C')[0].strip()
return int(temp_value)
# Handle time format (e.g., '15589h+17m+33.939s')
if 'h+' in raw_value:
return int(raw_value.split('h+')[0])
# Handle hex values
if '0x' in raw_value:
return int(raw_value, 16)
# Handle basic numbers
return int(raw_value)
except ValueError:
logger.debug(f"Could not parse SMART value: {raw_value}")
return 0
def _check_smart_health(self, device: str) -> Dict[str, Any]:
"""
Enhanced SMART health check with detailed failure thresholds.
"""
smart_health = {
'status': 'HEALTHY',
'severity': 'NORMAL',
'issues': [],
'temp': None,
'attributes': {}
}
# Define critical SMART attributes and their thresholds
SMART_THRESHOLDS = {
'Reallocated_Sector_Ct': {'warning': 5, 'critical': 10},
'Current_Pending_Sector': {'warning': 1, 'critical': 5},
'Offline_Uncorrectable': {'warning': 1, 'critical': 2},
'Reported_Uncorrect': {'warning': 1, 'critical': 10},
'Spin_Retry_Count': {'warning': 1, 'critical': 5},
# 'Command_Timeout': {'warning': 5, 'critical': 10}, # Removed
'Power_Cycle_Count': {'warning': 5000, 'critical': 10000},
'Power_On_Hours': {'warning': 61320, 'critical': 70080}, # ~7-8 years
'Media_Wearout_Indicator': {'warning': 30, 'critical': 10},
'Temperature_Celsius': {'warning': 65, 'critical': 75},
'Host_Writes_32MiB': {'warning': 50000000, 'critical': 100000000},
'Wear_Leveling_Count': {'warning': 2000, 'critical': 3000},
'Available_Spare': {'warning': 30, 'critical': 10},
'Program_Fail_Count': {'warning': 10, 'critical': 20},
'Erase_Fail_Count': {'warning': 10, 'critical': 20},
# 'Raw_Read_Error_Rate': {'warning': 50, 'critical': 100}, # Removed
# 'Seek_Error_Rate': {'warning': 50, 'critical': 100}, # Removed
'Load_Cycle_Count': {'warning': 900000, 'critical': 1000000},
'SSD_Life_Left': {'warning': 30, 'critical': 10}
}
try:
# Get firmware information
firmware_info = self._check_disk_firmware(device)
if firmware_info['is_problematic']:
smart_health['severity'] = 'WARNING'
smart_health['issues'].extend(firmware_info['known_issues'])
# Get detailed SMART data including performance metrics
result = subprocess.run(
['smartctl', '-A', '-H', '-l', 'error', '-l', 'background', device],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
output = result.stdout
# Check overall health status
if 'FAILED' in output and 'PASSED' not in output:
smart_health['status'] = 'UNHEALTHY'
smart_health['severity'] = 'CRITICAL'
smart_health['issues'].append("SMART overall health check failed")
# Parse SMART attributes with thresholds
for line in output.split('\n'):
if 'Reported_Uncorrect' in line:
parts = line.split()
raw_value = self._parse_smart_value(parts[9])
logger.debug(f"Found Reported_Uncorrect value: {raw_value}")
smart_health['attributes']['Reported_Uncorrect'] = raw_value
if raw_value >= SMART_THRESHOLDS['Reported_Uncorrect']['critical']:
smart_health['status'] = 'UNHEALTHY'
smart_health['severity'] = 'CRITICAL'
smart_health['issues'].append(f"Critical uncorrectable errors: {raw_value}")
elif raw_value >= SMART_THRESHOLDS['Reported_Uncorrect']['warning']:
if smart_health['severity'] != 'CRITICAL':
smart_health['severity'] = 'WARNING'
smart_health['issues'].append(f"Warning: uncorrectable errors detected: {raw_value}")
for attr, thresholds in SMART_THRESHOLDS.items():
if attr in line:
parts = line.split()
if len(parts) >= 10:
raw_value = self._parse_smart_value(parts[9])
smart_health['attributes'][attr] = raw_value
if attr == 'Temperature_Celsius':
smart_health['temp'] = raw_value
if raw_value >= thresholds['critical']:
smart_health['severity'] = 'CRITICAL'
smart_health['issues'].append(f"Critical temperature: {raw_value}°C")
elif raw_value >= thresholds['warning']:
smart_health['severity'] = 'WARNING'
smart_health['issues'].append(f"High temperature: {raw_value}°C")
else:
if raw_value >= thresholds['critical']:
smart_health['severity'] = 'CRITICAL'
smart_health['issues'].append(f"Critical {attr}: {raw_value}")
elif raw_value >= thresholds['warning']:
if smart_health['severity'] != 'CRITICAL':
smart_health['severity'] = 'WARNING'
smart_health['issues'].append(f"Warning {attr}: {raw_value}")
# Check for recent SMART errors
error_log_pattern = r"Error \d+ occurred at disk power-on lifetime: (\d+) hours"
error_matches = re.finditer(error_log_pattern, output)
recent_errors = []
for match in error_matches:
error_hour = int(match.group(1))
current_hours = smart_health['attributes'].get('Power_On_Hours', 0)
if current_hours - error_hour < 168: # Errors within last week
recent_errors.append(match.group(0))
if recent_errors:
smart_health['severity'] = 'WARNING'
smart_health['issues'].extend(recent_errors)
logger.debug(f"=== SMART Health Check for {device} ===")
logger.debug("Raw SMART attributes:")
for attr, value in smart_health['attributes'].items():
logger.debug(f"{attr}: {value}")
logger.debug(f"Temperature: {smart_health['temp']}°C")
logger.debug(f"Detected Issues: {smart_health['issues']}")
logger.debug("=== End SMART Check ===\n")
# Special handling for NVMe drives
if 'nvme' in device:
nvme_result = subprocess.run(
['nvme', 'smart-log', device],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
logger.debug(f"NVMe smart-log raw output for {device}:")
logger.debug(nvme_result.stdout)
# Add this line to initialize the temperature attribute
smart_health['attributes']['Temperature_Celsius'] = None
for line in nvme_result.stdout.split('\n'):
if 'temperature' in line.lower():
temp_str = line.split(':')[1].strip()
logger.debug(f"Raw temperature string: {temp_str}")
# Extract first temperature value
temp_value = int(''.join(c for c in temp_str if c.isdigit())[0:2])
logger.debug(f"Parsed temperature value: {temp_value}")
# Set both temperature fields
smart_health['temp'] = temp_value
smart_health['attributes']['Temperature_Celsius'] = temp_value
logger.debug(f"Final temperature recorded: {smart_health['temp']}")
break
except Exception as e:
smart_health['status'] = 'ERROR'
smart_health['severity'] = 'UNKNOWN'
smart_health['issues'].append(f"Error checking SMART: {str(e)}")
return smart_health
def _check_drives_health(self) -> Dict[str, Any]:
drives_health = {'overall_status': 'NORMAL', 'drives': []}
try:
# Get physical disks only
physical_disks = [disk for disk in self._get_all_disks()
if disk.startswith(('/dev/sd', '/dev/nvme'))]
logger.debug(f"Checking physical disks: {physical_disks}")
# Get ALL partition information including device mapper
partitions = psutil.disk_partitions(all=True)
# Create mapping of base devices to their partitions
device_partitions = {}
for part in partitions:
# Extract base device (e.g., /dev/sda from /dev/sda1)
base_device = re.match(r'(/dev/[a-z]+)', part.device)
if base_device:
base_dev = base_device.group(1)
if base_dev not in device_partitions:
device_partitions[base_dev] = []
device_partitions[base_dev].append(part)
overall_status = 'NORMAL'
for disk in physical_disks:
drive_report = {
'device': disk,
'partitions': [],
'smart_status': 'UNKNOWN',
'usage_percent': 0
}
# Add partition information if available
if disk in device_partitions:
total_used = 0
total_space = 0
for partition in device_partitions[disk]:
try:
usage = psutil.disk_usage(partition.mountpoint)
total_used += usage.used
total_space += usage.total
part_info = {
'device': partition.device,
'mountpoint': partition.mountpoint,
'fstype': partition.fstype,
'total_space': self._convert_bytes(usage.total),
'used_space': self._convert_bytes(usage.used),
'free_space': self._convert_bytes(usage.free),
'usage_percent': usage.percent
}
drive_report['partitions'].append(part_info)
except Exception as e:
logger.debug(f"Error getting partition usage for {partition.device}: {e}")
# Calculate overall drive usage percentage
if total_space > 0:
drive_report['usage_percent'] = (total_used / total_space) * 100
# Check SMART health
smart_health = self._check_smart_health(disk)
drive_report.update({
'smart_status': smart_health['status'],
'smart_issues': smart_health['issues'],
'temperature': smart_health['temp'],
'smart_attributes': smart_health['attributes']
})
if smart_health['status'] == 'UNHEALTHY':
overall_status = 'CRITICAL'
elif smart_health['issues'] and overall_status != 'CRITICAL':
overall_status = 'WARNING'
drives_health['drives'].append(drive_report)
drives_health['overall_status'] = overall_status
except Exception as e:
logger.error(f"Error checking drives health: {str(e)}")
return drives_health
@staticmethod
def _convert_bytes(bytes_value: int, suffix: str = 'B') -> str:
"""
Convert bytes to a human-readable format.
:param bytes_value: Number of bytes to convert.
:param suffix: Suffix to append (default is 'B' for bytes).
:return: Formatted string with the size in human-readable form.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(bytes_value) < 1024.0:
return f"{bytes_value:.1f}{unit}{suffix}"
bytes_value /= 1024.0
return f"{bytes_value:.1f}Y{suffix}"
def _convert_size_to_bytes(self, size_str: str) -> float:
"""Convert size string with units to bytes"""
units = {'B': 1, 'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4}
size = float(size_str[:-1])
unit = size_str[-1].upper()
return size * units[unit]
def _check_memory_usage(self) -> Dict[str, Any]:
"""
Check for ECC memory errors if ECC memory is present.
"""
memory_health = {
'has_ecc': False,
'ecc_errors': [],
'status': 'OK',
'total_memory': self._convert_bytes(psutil.virtual_memory().total),
'used_memory': self._convert_bytes(psutil.virtual_memory().used),
'memory_percent': psutil.virtual_memory().percent
}
try:
# First check using dmidecode
result = subprocess.run(
['dmidecode', '--type', 'memory'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if 'Error Correction Type: Multi-bit ECC' in result.stdout:
memory_health['has_ecc'] = True
# If dmidecode didn't find ECC, try the edac method as backup
if not memory_health['has_ecc']:
edac_path = '/sys/devices/system/edac/mc'
if os.path.exists(edac_path) and os.listdir(edac_path):
for mc_dir in glob.glob('/sys/devices/system/edac/mc/mc[0-9]*'):
if os.path.exists(f"{mc_dir}/csrow0"):
memory_health['has_ecc'] = True
break
# If ECC is present, check for errors
if memory_health['has_ecc']:
for mc_dir in glob.glob('/sys/devices/system/edac/mc/mc[0-9]*'):
if os.path.exists(f"{mc_dir}/csrow0"):
ue_count = self._read_ecc_count(f"{mc_dir}/csrow0/ue_count")
if ue_count > 0:
memory_health['status'] = 'CRITICAL'
memory_health['ecc_errors'].append(
f"Uncorrectable ECC errors detected in {os.path.basename(mc_dir)}: {ue_count}"
)
ce_count = self._read_ecc_count(f"{mc_dir}/csrow0/ce_count")
if ce_count > 0:
if memory_health['status'] != 'CRITICAL':
memory_health['status'] = 'WARNING'
memory_health['ecc_errors'].append(
f"Correctable ECC errors detected in {os.path.basename(mc_dir)}: {ce_count}"
)
except Exception as e:
memory_health['status'] = 'ERROR'
memory_health['ecc_errors'].append(f"Error checking ECC status: {str(e)}")
return memory_health
def _read_ecc_count(self, filepath: str) -> int:
"""
Read ECC error count from a file.
:param filepath: Path to the ECC count file
:return: Number of ECC errors
"""
try:
with open(filepath, 'r') as f:
return int(f.read().strip())
except:
return 0
def _check_cpu_usage(self) -> Dict[str, Any]:
"""
Check CPU usage and return health metrics.
:return: Dictionary with CPU health metrics.
"""
cpu_usage_percent = psutil.cpu_percent(interval=1)
cpu_health = {
'cpu_usage_percent': cpu_usage_percent,
'status': 'OK' if cpu_usage_percent < self.CONFIG['THRESHOLDS']['CPU_WARNING'] else 'WARNING'
}
return cpu_health
def _check_network_status(self) -> Dict[str, Any]:
"""
Check the status of network interfaces and report any issues.
:return: Dictionary containing network health metrics and any issues found.
"""
network_health = {
'management_network': {
'issues': [],
'status': 'OK',
'latency': None
},
'ceph_network': {
'issues': [],
'status': 'OK',
'latency': None
}
}
try:
# Check management network connectivity
mgmt_result = subprocess.run(
[
"ping",
"-c", str(self.CONFIG['NETWORKS']['PING_COUNT']),
"-W", str(self.CONFIG['NETWORKS']['PING_TIMEOUT']),
self.CONFIG['NETWORKS']['MANAGEMENT']
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if mgmt_result.returncode != 0:
network_health['management_network']['status'] = 'CRITICAL'
network_health['management_network']['issues'].append(
"Management network is unreachable"
)
# Check Ceph network connectivity
ceph_result = subprocess.run(
[
"ping",
"-c", str(self.CONFIG['NETWORKS']['PING_COUNT']),
"-W", str(self.CONFIG['NETWORKS']['PING_TIMEOUT']),
self.CONFIG['NETWORKS']['CEPH']
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if ceph_result.returncode != 0:
network_health['ceph_network']['status'] = 'CRITICAL'
network_health['ceph_network']['issues'].append(
"Ceph network is unreachable"
)
return network_health
except Exception as e:
logger.error(f"Network health check failed: {e}")
return {
'status': 'ERROR',
'error': str(e)
}
def _check_lxc_storage(self) -> Dict[str, Any]:
"""
Check storage utilization for all running LXC containers
"""
logger.debug("Starting LXC storage check")
lxc_health = {
'status': 'OK',
'containers': [],
'issues': []
}
try:
logger.debug("Executing 'pct list' command")
result = subprocess.run(
['pct', 'list'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
logger.debug(f"pct list output:\n{result.stdout}")
for line in result.stdout.split('\n')[1:]:
logger.debug(f"Raw LXC line: {line}")
if not line.strip():
continue
parts = line.split()
if len(parts) < 2:
logger.debug(f"Skipping invalid line: {line}")
continue
vmid, status = parts[0], parts[1]
logger.debug(f"Processing container VMID: {vmid}, Status: {status}")
if status.lower() == 'running':
logger.debug(f"Checking container {vmid} disk usage")
disk_info = subprocess.run(
['pct', 'df', vmid],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
logger.debug(f"Raw disk info output:\n{disk_info.stdout}")
container_info = {
'vmid': vmid,
'filesystems': []
}
for fs_line in disk_info.stdout.split('\n')[1:]:
if not fs_line.strip() or 'MP' in fs_line:
logger.debug(f"Skipping line: {fs_line}")
continue
logger.debug(f"Processing filesystem line: {fs_line}")
#parts = fs_line.split()
columns = line.split()
logger.debug(f"Split parts: {parts}")
if len(columns) >= 6:
try:
# Skip excluded mounts
if parts[0].startswith('appPool:') or '/mnt/pve/mediaf' in parts[0]:
continue
# Get the mountpoint (last column)
if len(parts) > 5:
# The mountpoint is the last column
mountpoint = columns[-1]
else:
mountpoint = "/"
# Skip excluded mountpoints
if self._is_excluded_mount(mountpoint):
logger.debug(f"Skipping excluded mount: {mountpoint}")
continue
# Parse size values safely
total_space = self._parse_size(columns[-5])
used_space = self._parse_size(columns[-4])
available_space = self._parse_size(columns[-3])
# Parse percentage safely
try:
usage_percent = float(columns[-2].rstrip('%'))
except (ValueError, IndexError):
# Calculate percentage if parsing fails
usage_percent = (used_space / total_space * 100) if total_space > 0 else 0
filesystem = {
'mountpoint': mountpoint,
'total_space': total_space,
'used_space': used_space,
'available': available_space,
'usage_percent': usage_percent
}
container_info['filesystems'].append(filesystem)
# Check thresholds
if usage_percent >= self.CONFIG['THRESHOLDS']['LXC_CRITICAL']:
lxc_health['status'] = 'CRITICAL'
issue = f"LXC {vmid} critical storage usage: {usage_percent:.1f}% on {mountpoint}"
lxc_health['issues'].append(issue)
elif usage_percent >= self.CONFIG['THRESHOLDS']['LXC_WARNING']:
if lxc_health['status'] != 'CRITICAL':
lxc_health['status'] = 'WARNING'
issue = f"LXC {vmid} high storage usage: {usage_percent:.1f}% on {mountpoint}"
lxc_health['issues'].append(issue)
logger.debug(f"Filesystem details: {filesystem}")
except Exception as e:
logger.debug(f"Error processing line: {str(e)}")
logger.debug(f"Full exception: {repr(e)}")
continue
# Only add container info if we have filesystem data
if container_info['filesystems']:
lxc_health['containers'].append(container_info)
logger.debug(f"Added container info for VMID {vmid}")
logger.debug("=== LXC Storage Check Summary ===")
logger.debug(f"Status: {lxc_health['status']}")
logger.debug(f"Total containers checked: {len(lxc_health['containers'])}")
logger.debug(f"Issues found: {len(lxc_health['issues'])}")
logger.debug("=== End LXC Storage Check ===")
except Exception as e:
logger.debug(f"Critical error during LXC storage check: {str(e)}")
lxc_health['status'] = 'ERROR'
error_msg = f"Error checking LXC storage: {str(e)}"
lxc_health['issues'].append(error_msg)
return lxc_health
def main():
parser = argparse.ArgumentParser(description="System Health Monitor")
parser.add_argument(
"--dry-run",
action="store_true",
help="Enable dry-run mode (simulate ticket creation without actual API calls)."
)
args = parser.parse_args()
monitor = SystemHealthMonitor(
ticket_api_url=SystemHealthMonitor.CONFIG['TICKET_API_URL'],
dry_run=args.dry_run
)
monitor.run()
if __name__ == "__main__":
main()