"""
Metrics Collection and Monitoring for ArXiv Scraper Service

Comprehensive metrics collection, monitoring, and alerting system.
"""

import time
import threading
import psutil
import logging
from datetime import datetime, timezone, timedelta
from typing import Dict, List, Any, Optional, Callable
from collections import defaultdict, deque
import json
from pathlib import Path

from .exceptions import ResourceError


class MetricsCollector:
    """
    Comprehensive metrics collector for scraper service monitoring.
    
    Features:
    - Performance metrics (timing, throughput)
    - System metrics (CPU, memory, disk)
    - Application metrics (papers processed, errors)
    - Time-series data with configurable retention
    - Alert threshold monitoring
    """
    
    def __init__(self, retention_hours: int = 24):
        """Initialize metrics collector.
        
        Args:
            retention_hours: Hours to retain time-series data
        """
        self.retention_hours = retention_hours
        self.logger = logging.getLogger(__name__)
        
        # Metrics storage
        self._counters = defaultdict(int)
        self._gauges = defaultdict(float)
        self._histograms = defaultdict(list)
        self._timers = {}
        
        # Time-series storage (circular buffer)
        self._timeseries = defaultdict(lambda: deque(maxlen=1000))
        
        # Alert thresholds
        self._alert_thresholds = {}
        self._alert_callbacks = []
        
        # Cleanup timer
        self._cleanup_thread = None
        self._cleanup_interval = 3600  # 1 hour
        self._shutdown_event = threading.Event()
        
        # Start background cleanup
        self._start_cleanup_thread()
        
        self.logger.info("Metrics collector initialized")
    
    def _start_cleanup_thread(self):
        """Start background thread for data cleanup."""
        def cleanup_worker():
            while not self._shutdown_event.wait(self._cleanup_interval):
                try:
                    self._cleanup_old_data()
                except Exception as e:
                    self.logger.error(f"Error in metrics cleanup: {e}")
        
        self._cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True)
        self._cleanup_thread.start()
    
    def _cleanup_old_data(self):
        """Clean up old time-series data."""
        cutoff_time = datetime.now(timezone.utc) - timedelta(hours=self.retention_hours)
        cutoff_timestamp = cutoff_time.timestamp()
        
        cleaned_count = 0
        
        for key, data in self._timeseries.items():
            # Remove old entries
            while data and data[0]['timestamp'] < cutoff_timestamp:
                data.popleft()
                cleaned_count += 1
        
        if cleaned_count > 0:
            self.logger.debug(f"Cleaned {cleaned_count} old metric entries")
    
    # Counter methods
    def increment(self, name: str, value: int = 1, tags: Optional[Dict[str, str]] = None):
        """Increment a counter."""
        key = self._make_key(name, tags)
        self._counters[key] += value
        
        # Record time-series data
        self._record_timeseries(key, value, 'counter')
    
    def get_counter(self, name: str, tags: Optional[Dict[str, str]] = None) -> int:
        """Get counter value."""
        key = self._make_key(name, tags)
        return self._counters[key]
    
    # Gauge methods
    def set_gauge(self, name: str, value: float, tags: Optional[Dict[str, str]] = None):
        """Set gauge value."""
        key = self._make_key(name, tags)
        self._gauges[key] = value
        
        # Record time-series data
        self._record_timeseries(key, value, 'gauge')
        
        # Check alert thresholds
        self._check_alert_thresholds(key, value)
    
    def get_gauge(self, name: str, tags: Optional[Dict[str, str]] = None) -> float:
        """Get gauge value."""
        key = self._make_key(name, tags)
        return self._gauges[key]
    
    # Histogram methods
    def record_histogram(self, name: str, value: float, tags: Optional[Dict[str, str]] = None):
        """Record histogram value."""
        key = self._make_key(name, tags)
        self._histograms[key].append(value)
        
        # Keep only recent values (last 1000)
        if len(self._histograms[key]) > 1000:
            self._histograms[key] = self._histograms[key][-1000:]
        
        # Record time-series data
        self._record_timeseries(key, value, 'histogram')
    
    def get_histogram_stats(self, name: str, tags: Optional[Dict[str, str]] = None) -> Dict[str, float]:
        """Get histogram statistics."""
        key = self._make_key(name, tags)
        values = self._histograms[key]
        
        if not values:
            return {'count': 0, 'min': 0, 'max': 0, 'mean': 0, 'p50': 0, 'p95': 0, 'p99': 0}
        
        sorted_values = sorted(values)
        count = len(values)
        
        return {
            'count': count,
            'min': min(values),
            'max': max(values),
            'mean': sum(values) / count,
            'p50': self._percentile(sorted_values, 50),
            'p95': self._percentile(sorted_values, 95),
            'p99': self._percentile(sorted_values, 99)
        }
    
    # Timer methods
    def start_timer(self, name: str, tags: Optional[Dict[str, str]] = None) -> str:
        """Start a timer and return timer ID."""
        timer_id = f"{name}_{int(time.time() * 1000000)}"  # Microsecond precision
        key = self._make_key(name, tags)
        
        self._timers[timer_id] = {
            'key': key,
            'start_time': time.time(),
            'tags': tags
        }
        
        return timer_id
    
    def stop_timer(self, timer_id: str) -> float:
        """Stop timer and record duration."""
        if timer_id not in self._timers:
            self.logger.warning(f"Timer not found: {timer_id}")
            return 0.0
        
        timer_info = self._timers.pop(timer_id)
        duration = time.time() - timer_info['start_time']
        
        # Record as histogram
        self.record_histogram(timer_info['key'], duration, timer_info['tags'])
        
        return duration
    
    def time_function(self, name: str, tags: Optional[Dict[str, str]] = None):
        """Decorator to time function execution."""
        def decorator(func: Callable) -> Callable:
            def wrapper(*args, **kwargs):
                timer_id = self.start_timer(name, tags)
                try:
                    result = func(*args, **kwargs)
                    duration = self.stop_timer(timer_id)
                    self.logger.debug(f"Function {func.__name__} took {duration:.3f}s")
                    return result
                except Exception as e:
                    self.stop_timer(timer_id)
                    self.increment('function_errors', tags={'function': func.__name__})
                    raise
            return wrapper
        return decorator
    
    # System metrics
    def collect_system_metrics(self):
        """Collect system performance metrics."""
        try:
            # CPU usage
            cpu_percent = psutil.cpu_percent(interval=1)
            self.set_gauge('system.cpu.usage_percent', cpu_percent)
            
            # Memory usage
            memory = psutil.virtual_memory()
            self.set_gauge('system.memory.usage_percent', memory.percent)
            self.set_gauge('system.memory.available_mb', memory.available / 1024 / 1024)
            self.set_gauge('system.memory.used_mb', memory.used / 1024 / 1024)
            
            # Disk usage
            disk = psutil.disk_usage('/')
            self.set_gauge('system.disk.usage_percent', disk.percent)
            self.set_gauge('system.disk.free_gb', disk.free / 1024 / 1024 / 1024)
            
            # Network I/O
            network = psutil.net_io_counters()
            self.set_gauge('system.network.bytes_sent', network.bytes_sent)
            self.set_gauge('system.network.bytes_recv', network.bytes_recv)
            
            # Process-specific metrics
            process = psutil.Process()
            self.set_gauge('process.cpu.usage_percent', process.cpu_percent())
            
            memory_info = process.memory_info()
            self.set_gauge('process.memory.rss_mb', memory_info.rss / 1024 / 1024)
            self.set_gauge('process.memory.vms_mb', memory_info.vms / 1024 / 1024)
            
            # File descriptors
            try:
                fd_count = process.num_fds()
                self.set_gauge('process.file_descriptors', fd_count)
            except AttributeError:
                pass  # Not available on Windows
            
        except Exception as e:
            self.logger.error(f"Error collecting system metrics: {e}")
    
    # Application-specific metrics
    def record_scraping_metrics(self, papers_processed: int, papers_stored: int,
                              papers_skipped: int, errors: int, duration: float):
        """Record scraping session metrics."""
        self.increment('scraping.papers_processed', papers_processed)
        self.increment('scraping.papers_stored', papers_stored)
        self.increment('scraping.papers_skipped', papers_skipped)
        self.increment('scraping.errors', errors)
        self.record_histogram('scraping.session_duration', duration)
        
        # Calculate derived metrics
        if papers_processed > 0:
            success_rate = (papers_processed - errors) / papers_processed
            self.set_gauge('scraping.success_rate', success_rate)
            
            throughput = papers_processed / duration if duration > 0 else 0
            self.set_gauge('scraping.throughput_per_second', throughput)
    
    def record_database_metrics(self, query_duration: float, query_type: str,
                              rows_affected: int = 0):
        """Record database operation metrics."""
        self.record_histogram(
            'database.query_duration',
            query_duration,
            tags={'query_type': query_type}
        )
        self.increment(
            'database.queries',
            tags={'query_type': query_type}
        )
        
        if rows_affected > 0:
            self.record_histogram(
                'database.rows_affected',
                rows_affected,
                tags={'query_type': query_type}
            )
    
    def record_oai_metrics(self, request_duration: float, status_code: int,
                          records_fetched: int = 0):
        """Record OAI-PMH request metrics."""
        self.record_histogram('oai.request_duration', request_duration)
        self.increment(
            'oai.requests',
            tags={'status_code': str(status_code)}
        )
        
        if records_fetched > 0:
            self.increment('oai.records_fetched', records_fetched)
    
    # Alert management
    def set_alert_threshold(self, metric_name: str, threshold_value: float,
                          comparison: str = 'gt', tags: Optional[Dict[str, str]] = None):
        """Set alert threshold for a metric."""
        key = self._make_key(metric_name, tags)
        
        self._alert_thresholds[key] = {
            'threshold': threshold_value,
            'comparison': comparison,  # 'gt', 'lt', 'eq'
            'metric_name': metric_name,
            'tags': tags
        }
    
    def add_alert_callback(self, callback: Callable[[str, float, Dict[str, Any]], None]):
        """Add callback for alert notifications."""
        self._alert_callbacks.append(callback)
    
    def _check_alert_thresholds(self, key: str, value: float):
        """Check if value exceeds alert thresholds."""
        if key not in self._alert_thresholds:
            return
        
        threshold_info = self._alert_thresholds[key]
        threshold = threshold_info['threshold']
        comparison = threshold_info['comparison']
        
        alert_triggered = False
        
        if comparison == 'gt' and value > threshold:
            alert_triggered = True
        elif comparison == 'lt' and value < threshold:
            alert_triggered = True
        elif comparison == 'eq' and value == threshold:
            alert_triggered = True
        
        if alert_triggered:
            for callback in self._alert_callbacks:
                try:
                    callback(key, value, threshold_info)
                except Exception as e:
                    self.logger.error(f"Error in alert callback: {e}")
    
    # Data export and reporting
    def get_current_metrics(self) -> Dict[str, Any]:
        """Get current metrics snapshot."""
        return {
            'timestamp': datetime.now(timezone.utc).isoformat(),
            'counters': dict(self._counters),
            'gauges': dict(self._gauges),
            'histogram_stats': {
                key: self.get_histogram_stats(key.split('|')[0], 
                                             self._parse_tags(key) if '|' in key else None)
                for key in self._histograms.keys()
            }
        }
    
    def get_time_series(self, metric_name: str, tags: Optional[Dict[str, str]] = None,
                       hours: int = 1) -> List[Dict[str, Any]]:
        """Get time-series data for a metric."""
        key = self._make_key(metric_name, tags)
        data = self._timeseries.get(key, [])
        
        # Filter by time range
        cutoff_time = datetime.now(timezone.utc) - timedelta(hours=hours)
        cutoff_timestamp = cutoff_time.timestamp()
        
        return [entry for entry in data if entry['timestamp'] >= cutoff_timestamp]
    
    def export_metrics(self, file_path: str, format: str = 'json'):
        """Export metrics to file."""
        metrics_data = self.get_current_metrics()
        
        file_path = Path(file_path)
        file_path.parent.mkdir(parents=True, exist_ok=True)
        
        if format.lower() == 'json':
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(metrics_data, f, indent=2, default=str)
        else:
            raise ValueError(f"Unsupported export format: {format}")
        
        self.logger.info(f"Metrics exported to {file_path}")
    
    def get_health_status(self) -> Dict[str, Any]:
        """Get overall system health status."""
        # Collect current system metrics
        self.collect_system_metrics()
        
        # Define health thresholds
        health_status = {
            'overall': 'healthy',
            'issues': [],
            'metrics': {}
        }
        
        # Check system health
        cpu_usage = self.get_gauge('system.cpu.usage_percent')
        memory_usage = self.get_gauge('system.memory.usage_percent')
        disk_usage = self.get_gauge('system.disk.usage_percent')
        
        health_status['metrics'] = {
            'cpu_usage': cpu_usage,
            'memory_usage': memory_usage,
            'disk_usage': disk_usage
        }
        
        # Check thresholds
        if cpu_usage > 80:
            health_status['issues'].append(f"High CPU usage: {cpu_usage:.1f}%")
            health_status['overall'] = 'warning' if health_status['overall'] == 'healthy' else 'critical'
        
        if memory_usage > 80:
            health_status['issues'].append(f"High memory usage: {memory_usage:.1f}%")
            health_status['overall'] = 'warning' if health_status['overall'] == 'healthy' else 'critical'
        
        if disk_usage > 90:
            health_status['issues'].append(f"High disk usage: {disk_usage:.1f}%")
            health_status['overall'] = 'critical'
        
        return health_status
    
    # Utility methods
    def _make_key(self, name: str, tags: Optional[Dict[str, str]] = None) -> str:
        """Create metric key with optional tags."""
        if not tags:
            return name
        
        tag_string = ','.join(f"{k}={v}" for k, v in sorted(tags.items()))
        return f"{name}|{tag_string}"
    
    def _parse_tags(self, key: str) -> Optional[Dict[str, str]]:
        """Parse tags from metric key."""
        if '|' not in key:
            return None
        
        _, tag_string = key.split('|', 1)
        tags = {}
        
        for tag in tag_string.split(','):
            if '=' in tag:
                k, v = tag.split('=', 1)
                tags[k] = v
        
        return tags
    
    def _record_timeseries(self, key: str, value: float, metric_type: str):
        """Record time-series data point."""
        entry = {
            'timestamp': time.time(),
            'value': value,
            'type': metric_type
        }
        
        self._timeseries[key].append(entry)
    
    def _percentile(self, sorted_values: List[float], percentile: float) -> float:
        """Calculate percentile from sorted values."""
        if not sorted_values:
            return 0.0
        
        k = (len(sorted_values) - 1) * (percentile / 100)
        f = int(k)
        c = k - f
        
        if f == len(sorted_values) - 1:
            return sorted_values[f]
        
        return sorted_values[f] * (1 - c) + sorted_values[f + 1] * c
    
    def shutdown(self):
        """Shutdown metrics collector."""
        self.logger.info("Shutting down metrics collector")
        self._shutdown_event.set()
        
        if self._cleanup_thread and self._cleanup_thread.is_alive():
            self._cleanup_thread.join(timeout=5)
        
        # Final cleanup
        self._cleanup_old_data()


# Global metrics instance
_global_metrics = None

def get_global_metrics() -> MetricsCollector:
    """Get global metrics collector instance."""
    global _global_metrics
    if _global_metrics is None:
        _global_metrics = MetricsCollector()
    return _global_metrics