Files
hwmonDaemon/hwmonDaemon.py

511 lines
21 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import os, sys, json, requests, psutil, socket, subprocess, logging, argparse, urllib.request, re
from typing import Dict, Any, List
2024-12-05 15:30:16 -05:00
# Create a logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a console handler and set its level to DEBUG
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# Create a formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Add the formatter to the console handler
console_handler.setFormatter(formatter)
# Add the console handler to the logger
logger.addHandler(console_handler)
class SystemHealthMonitor:
def __init__(self,
ticket_api_url: str = 'http://10.10.10.45/create_ticket_api.php',
2024-12-05 20:30:47 -05:00
state_file: str = '/tmp/last_health_check.json',
dry_run: bool = False):
"""
Initialize the system health monitor.
2024-12-05 20:30:47 -05:00
:param ticket_api_url: URL for the ticket creation API.
:param state_file: File path to track the last health check results.
2024-12-05 20:30:47 -05:00
:param dry_run: If True, simulate API calls without sending requests.
"""
self.ticket_api_url = ticket_api_url
self.state_file = state_file
2024-12-05 20:30:47 -05:00
self.dry_run = dry_run
def run(self):
"""
Perform a one-shot health check of the system.
"""
try:
# Perform health checks and gather the report
health_report = self.perform_health_checks()
# Create tickets for any detected critical issues
self._create_tickets_for_issues(health_report)
except Exception as e:
print(f"Unexpected error during health check: {e}")
def perform_health_checks(self) -> Dict[str, Any]:
"""
Perform comprehensive system health checks and return a report.
:return: Dictionary containing results of various health checks.
"""
health_report = {
2024-12-04 21:20:52 -05:00
'drives_health': self._check_drives_health(),
'memory_health': self._check_memory_usage(),
'cpu_health': self._check_cpu_usage(),
'network_health': self._check_network_status()
}
return health_report
2024-12-05 15:30:16 -05:00
def _generate_detailed_description(self, issue: str, health_report: Dict[str, Any]) -> str:
"""
Generate a detailed description for the issue based on the health report.
:param issue: The issue description.
:param health_report: The comprehensive health report from the checks.
:return: A detailed description for the issue.
"""
description = issue + "\n\n"
if "Disk" in issue:
for partition in health_report.get('drives_health', {}).get('drives', []):
if partition.get('mountpoint') in issue:
description += f"Disk Device: {partition['device']}\n"
description += f"Mount Point: {partition['mountpoint']}\n"
description += f"Total Space: {partition['total_space']}\n"
description += f"Used Space: {partition['used_space']}\n"
description += f"Free Space: {partition['free_space']}\n"
description += f"Usage Percent: {partition['usage_percent']}%\n"
if partition.get('smart_status') == 'UNHEALTHY':
try:
# Get additional disk information using smartctl
result = subprocess.run(
['smartctl', '-a', partition['device']],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
output = result.stdout + result.stderr
description += "\nSMART Information:\n"
description += output
except Exception as e:
description += f"Error getting SMART information: {str(e)}\n"
break
elif "Memory" in issue:
memory_health = health_report.get('memory_health', {})
description += f"Total Memory: {memory_health['total_memory']}\n"
description += f"Used Memory: {memory_health['used_memory']}\n"
description += f"Memory Usage Percent: {memory_health['memory_percent']}%\n"
elif "CPU" in issue:
cpu_health = health_report.get('cpu_health', {})
description += f"CPU Usage Percent: {cpu_health['cpu_usage_percent']}%\n"
elif "Network" in issue:
network_health = health_report.get('network_health', {})
for network in ['management_network', 'ceph_network']:
if network_health[network]['issues']:
description += f"{network.replace('_', ' ').title()} Issues:\n"
description += "\n".join(network_health[network]['issues'])
description += "\n"
return description
def _create_tickets_for_issues(self, health_report: Dict[str, Any]):
"""
Create tickets for detected issues with dynamic parameters based on severity.
2024-12-05 20:30:47 -05:00
:param health_report: The comprehensive health report from the checks.
"""
issues = self._detect_issues(health_report)
if not issues:
2024-12-05 20:39:10 -05:00
logger.info("No issues detected.")
return
hostname = socket.gethostname() # Get the current hostname
action_type = "[auto]" # Default action type for automatic checks
scope = "[cluster-wide]" # Scope of the issues
environment = "[production]" # Environment where the issues were found
ticket_type = "[maintenance]" # Type of the ticket being created
for issue in issues:
# Determine priority, category, and type based on the issue detected
2024-12-04 22:00:59 -05:00
priority = "4" # Default to low priority
category = "Other"
issue_type = "Task"
2024-12-05 20:30:47 -05:00
if "Disk" in issue:
2024-12-04 22:00:59 -05:00
priority = "3" # Medium priority for disk issues
category = "Hardware"
issue_type = "Incident"
elif "Memory" in issue:
2024-12-04 22:00:59 -05:00
priority = "4" # Low priority for memory issues
category = "Hardware"
issue_type = "Incident"
elif "CPU" in issue:
2024-12-04 22:00:59 -05:00
priority = "4" # Low priority for CPU issues
category = "Hardware"
issue_type = "Incident"
elif "issues" in issue: # Any network issues
2024-12-04 22:00:59 -05:00
priority = "2" # High priority for network issues
category = "Network"
issue_type = "Problem"
# Create the ticket title with relevant details
ticket_title = f"[{hostname}]{action_type}[{issue_type}] {issue} {scope}{environment}{ticket_type}"
2024-12-05 15:30:16 -05:00
description = self._generate_detailed_description(issue, health_report)
2024-12-05 20:30:47 -05:00
ticket_payload = {
"title": ticket_title,
2024-12-05 15:30:16 -05:00
"description": description,
"priority": priority,
"status": "Open",
"category": category,
"type": issue_type
}
2024-12-05 20:30:47 -05:00
if self.dry_run:
# Dry-run mode: log the payload instead of sending it
logger.info("Dry-run mode enabled. Simulating ticket creation:")
logger.info(json.dumps(ticket_payload, indent=4))
print("Dry-run: Ticket payload:")
print(json.dumps(ticket_payload, indent=4))
else:
# Perform actual API request
try:
response = requests.post(
self.ticket_api_url,
json=ticket_payload,
headers={'Content-Type': 'application/json'}
)
print(f"Response status code: {response.status_code}")
print(f"Response body: {response.text}")
if response.status_code in [200, 201]:
print(f"Ticket created successfully: {ticket_title}")
else:
print(f"Failed to create ticket. Status code: {response.status_code}")
print(f"Response: {response.text}")
except Exception as e:
print(f"Error creating ticket: {e}")
def _detect_issues(self, health_report: Dict[str, Any]) -> List[str]:
"""
Detect issues in the health report including non-critical issues.
:param health_report: The comprehensive health report from the checks.
:return: List of issue descriptions detected during checks.
"""
issues = []
# Check for drive-related issues
for partition in health_report.get('drives_health', {}).get('drives', []):
if partition.get('usage_status') == 'CRITICAL_HIGH_USAGE':
issues.append(
f"Disk {partition['mountpoint']} is {partition['usage_percent']}% full"
)
elif partition.get('usage_status') == 'WARNING_HIGH_USAGE':
issues.append(
f"Disk {partition['mountpoint']} is {partition['usage_percent']}% full (Warning)"
)
if partition.get('smart_status') == 'UNHEALTHY':
issues.append(f"Disk {partition['mountpoint']} has an unhealthy SMART status")
# Check for memory-related issues
memory_health = health_report.get('memory_health', {})
if memory_health and memory_health.get('memory_percent', 0) > 80:
issues.append("Memory usage is above 80%")
# Check for CPU-related issues
cpu_health = health_report.get('cpu_health', {})
if cpu_health and cpu_health.get('cpu_usage_percent', 0) > 80:
issues.append("CPU usage is above 80%")
# Check for network-related issues
network_health = health_report.get('network_health', {})
for network in ['management_network', 'ceph_network']:
if network_health.get(network, {}).get('issues'):
issues.extend(network_health[network]['issues'])
return issues
def _is_physical_disk(self, device_path):
"""
Check if the device is a physical SATA, NVMe, or MMC disk.
:param device_path: Path to the device
:return: Boolean indicating if it's a physical disk
"""
return bool(re.match(r'/dev/(sd[a-z]|nvme\d+n\d+|mmcblk\d+)', device_path))
def _check_smart_health(self, device: str) -> Dict[str, Any]:
"""
Check comprehensive SMART health metrics for a drive.
"""
smart_health = {
'status': 'HEALTHY',
'issues': [],
'temp': None,
'attributes': {}
}
try:
result = subprocess.run(
['smartctl', '-A', '-H', device],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
output = result.stdout
2024-12-05 21:09:37 -05:00
# Check overall SMART status first
if 'FAILED' in output and not 'PASSED' in output:
smart_health['status'] = 'UNHEALTHY'
smart_health['issues'].append("SMART overall health check failed")
# Define critical attributes and their thresholds
critical_attributes = {
'Reallocated_Sector_Ct': {'threshold': 0, 'critical': True},
'Current_Pending_Sector': {'threshold': 0, 'critical': True},
'Offline_Uncorrectable': {'threshold': 0, 'critical': True},
'Reported_Uncorrect': {'threshold': 0, 'critical': True},
'Command_Timeout': {'threshold': 5, 'critical': False},
'Temperature_Celsius': {'threshold': 65, 'critical': False},
'Wear_Leveling_Count': {'threshold': 10, 'critical': True},
'Media_Wearout_Indicator': {'threshold': 20, 'critical': True}
}
for line in output.split('\n'):
2024-12-05 21:09:37 -05:00
# Skip header lines
if 'ATTRIBUTE_NAME' in line or '===' in line:
continue
for attr_name, limits in critical_attributes.items():
if attr_name in line:
parts = line.split()
if len(parts) >= 10:
value = int(parts[9]) # Raw value
normalized = int(parts[3]) # Normalized value
2024-12-05 21:09:37 -05:00
smart_health['attributes'][attr_name] = {
'raw': value,
'normalized': normalized
}
# Check thresholds
if attr_name == 'Temperature_Celsius':
smart_health['temp'] = value
2024-12-05 21:09:37 -05:00
if value > limits['threshold']:
smart_health['issues'].append(
f"Drive temperature critical: {value}°C"
)
elif value > limits['threshold']:
if limits['critical']:
smart_health['status'] = 'UNHEALTHY'
smart_health['issues'].append(
f"{attr_name} above threshold: {value}"
)
# Check for very low normalized values
if normalized <= 10 and attr_name != 'Temperature_Celsius':
smart_health['issues'].append(
f"{attr_name} normalized value critical: {normalized}"
)
# Check if WHEN_FAILED is present and not in the past
if 'WHEN_FAILED' in output:
for line in output.split('\n'):
if 'WHEN_FAILED' in line and 'In_the_past' not in line and '-' not in line:
smart_health['status'] = 'UNHEALTHY'
smart_health['issues'].append(f"Current failure detected: {line}")
except Exception as e:
smart_health['status'] = 'ERROR'
smart_health['issues'].append(f"Error checking SMART: {str(e)}")
return smart_health
2024-12-04 21:20:52 -05:00
def _check_drives_health(self) -> Dict[str, Any]:
"""
Check overall health of physical SATA and NVMe drives including disk usage and SMART status.
2024-12-05 15:30:16 -05:00
:return: Combined health report of all drives and their status.
"""
2024-12-04 21:20:52 -05:00
drives_health = {'overall_status': 'NORMAL', 'drives': []}
try:
partitions = [p for p in psutil.disk_partitions() if self._is_physical_disk(p.device)]
2024-12-04 21:20:52 -05:00
overall_status = 'NORMAL'
for partition in partitions:
2024-12-04 21:20:52 -05:00
drive_report = {
'device': partition.device,
'mountpoint': partition.mountpoint
}
# Check disk usage
usage = psutil.disk_usage(partition.mountpoint)
disk_usage_status = 'NORMAL'
if usage.percent > 90:
disk_usage_status = 'CRITICAL_HIGH_USAGE'
elif usage.percent > 80:
disk_usage_status = 'WARNING_HIGH_USAGE'
drive_report.update({
'total_space': self._convert_bytes(usage.total),
'used_space': self._convert_bytes(usage.used),
'free_space': self._convert_bytes(usage.free),
'usage_percent': usage.percent,
'usage_status': disk_usage_status
})
# Check SMART health
smart_health = self._check_smart_health(partition.device)
drive_report.update({
'smart_status': smart_health['status'],
'smart_issues': smart_health['issues'],
'temperature': smart_health['temp'],
'smart_attributes': smart_health['attributes']
})
# Update overall status
if smart_health['status'] == 'UNHEALTHY' or disk_usage_status == 'CRITICAL_HIGH_USAGE':
overall_status = 'CRITICAL'
elif disk_usage_status == 'WARNING_HIGH_USAGE' and overall_status != 'CRITICAL':
overall_status = 'WARNING'
2024-12-04 21:20:52 -05:00
drives_health['drives'].append(drive_report)
2024-12-04 21:20:52 -05:00
drives_health['overall_status'] = overall_status
2024-12-05 15:36:02 -05:00
except Exception as e:
logger.error(f"Error checking drives health: {str(e)}")
2024-12-05 15:36:02 -05:00
return drives_health
2024-12-04 21:20:52 -05:00
2024-12-05 20:18:14 -05:00
@staticmethod
2024-12-05 15:30:16 -05:00
def _convert_bytes(bytes_value: int, suffix: str = 'B') -> str:
"""
Convert bytes to a human-readable format.
2024-12-05 15:30:16 -05:00
:param bytes_value: Number of bytes to convert.
:param suffix: Suffix to append (default is 'B' for bytes).
:return: Formatted string with the size in human-readable form.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(bytes_value) < 1024.0:
return f"{bytes_value:.1f}{unit}{suffix}"
bytes_value /= 1024.0
return f"{bytes_value:.1f}Y{suffix}"
def _check_memory_usage(self) -> Dict[str, Any]:
"""
Check memory usage and return health metrics.
:return: Dictionary with memory health metrics.
"""
memory_info = psutil.virtual_memory()
memory_health = {
'total_memory': self._convert_bytes(memory_info.total),
'used_memory': self._convert_bytes(memory_info.used),
'memory_percent': memory_info.percent,
2024-12-05 15:30:16 -05:00
'status': 'OK' if memory_info.percent < 90 else 'WARNING'
}
return memory_health
def _check_cpu_usage(self) -> Dict[str, Any]:
"""
Check CPU usage and return health metrics.
:return: Dictionary with CPU health metrics.
"""
cpu_usage_percent = psutil.cpu_percent(interval=1)
cpu_health = {
'cpu_usage_percent': cpu_usage_percent,
2024-12-05 15:30:16 -05:00
'status': 'OK' if cpu_usage_percent < 90 else 'WARNING'
}
return cpu_health
2024-12-05 20:18:14 -05:00
2024-12-05 15:36:02 -05:00
def _check_network_status(self) -> Dict[str, Any]:
"""
Check the status of network interfaces and report any issues.
2024-12-05 15:30:16 -05:00
:return: Dictionary containing network health metrics and any issues found.
"""
network_health = {
'management_network': {'issues': []},
'ceph_network': {'issues': []}
}
2024-12-05 15:30:16 -05:00
try:
# Check management network connectivity
2024-12-05 15:36:02 -05:00
proc = subprocess.run(["ping", "-c", "1", "10.10.10.1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2024-12-05 15:30:16 -05:00
if proc.returncode != 0:
network_health['management_network']['issues'].append(
"Management network is unreachable."
)
# Check Ceph network connectivity
2024-12-05 15:36:02 -05:00
proc = subprocess.run(["ping", "-c", "1", "10.10.90.1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2024-12-05 15:30:16 -05:00
if proc.returncode != 0:
network_health['ceph_network']['issues'].append(
"Ceph network is unreachable."
)
return network_health
except Exception as e:
print(f"Network health check failed: {e}")
return {'error': str(e)}
2024-12-05 15:30:16 -05:00
def main():
try:
2024-12-05 20:42:50 -05:00
# Argument parser for CLI options
parser = argparse.ArgumentParser(description="System Health Monitor")
parser.add_argument(
"--dry-run",
action="store_true",
help="Enable dry-run mode (simulate ticket creation without actual API calls)."
)
args = parser.parse_args()
2024-12-05 15:30:16 -05:00
# Parse command-line arguments or read from configuration file
ticket_api_url = "http://10.10.10.45/create_ticket_api.php"
state_file = "/tmp/last_health_check.json"
# Instantiate the SystemHealthMonitor class
2024-12-05 20:42:50 -05:00
monitor = SystemHealthMonitor(
ticket_api_url=ticket_api_url,
state_file=state_file,
dry_run=args.dry_run # Pass the dry-run flag
)
2024-12-05 15:30:16 -05:00
# Run the health checks
monitor.run()
2024-12-05 20:39:10 -05:00
# Check network health synchronously
network_health = monitor._check_network_status()
logger.info(f"Network health: {network_health}")
2024-12-05 15:30:16 -05:00
except Exception as e:
2024-12-05 20:39:10 -05:00
logger.error(f"An unexpected error occurred: {e}")
2024-12-05 15:30:16 -05:00
sys.exit(1)
if __name__ == "__main__":
2024-12-05 20:39:10 -05:00
# Argument parser for CLI options
parser = argparse.ArgumentParser(description="System Health Monitor")
parser.add_argument(
"--dry-run",
action="store_true",
help="Enable dry-run mode (simulate ticket creation without actual API calls)."
)
args = parser.parse_args()
# Set dry-run mode if specified
dry_run_mode = args.dry_run
2024-12-05 15:30:16 -05:00
main()