"""
Metrics Collector
Collects and exposes metrics for monitoring the ArXiv subscription platform
"""

import time
import logging
from typing import Dict, Any
from datetime import datetime, timedelta
import asyncio

from prometheus_client import Counter, Histogram, Gauge, generate_latest, CONTENT_TYPE_LATEST
from fastapi import FastAPI, Response
import psycopg2
from psycopg2.extras import RealDictCursor

from config import get_config

logger = logging.getLogger(__name__)

class MetricsCollector:
    """
    Collects and exposes metrics for Prometheus monitoring
    """
    
    def __init__(self):
        self.config = get_config()
        
        # HTTP metrics
        self.http_requests_total = Counter(
            'http_requests_total',
            'Total HTTP requests',
            ['method', 'endpoint', 'status']
        )
        
        self.http_request_duration_seconds = Histogram(
            'http_request_duration_seconds',
            'HTTP request duration in seconds',
            ['method', 'endpoint']
        )
        
        # Database metrics
        self.db_connections_active = Gauge(
            'postgresql_connections_active',
            'Active PostgreSQL connections'
        )
        
        self.db_connections_idle = Gauge(
            'postgresql_connections_idle', 
            'Idle PostgreSQL connections'
        )
        
        self.db_connections_max = Gauge(
            'postgresql_connections_max',
            'Maximum PostgreSQL connections'
        )
        
        self.db_query_duration_seconds = Histogram(
            'postgresql_query_duration_seconds',
            'PostgreSQL query duration in seconds',
            ['query_type']
        )
        
        # ArXiv scraper metrics
        self.scraper_papers_processed_total = Counter(
            'arxiv_scraper_papers_processed_total',
            'Total papers processed by scraper'
        )
        
        self.scraper_errors_total = Counter(
            'arxiv_scraper_errors_total',
            'Total scraper errors'
        )
        
        self.scraper_last_successful_run_timestamp = Gauge(
            'arxiv_scraper_last_successful_run_timestamp',
            'Timestamp of last successful scraper run'
        )
        
        self.papers_scraped_today = Gauge(
            'arxiv_papers_scraped_today',
            'Number of papers scraped today'
        )
        
        # AI classification metrics
        self.ai_classification_success_total = Counter(
            'arxiv_ai_classification_success_total',
            'Total successful AI classifications'
        )
        
        self.ai_classification_failures_total = Counter(
            'arxiv_ai_classification_failures_total',
            'Total AI classification failures'
        )
        
        self.ai_classification_duration_seconds = Histogram(
            'arxiv_ai_classification_duration_seconds',
            'AI classification duration in seconds'
        )
        
        # Email service metrics
        self.email_queue_pending_count = Gauge(
            'arxiv_email_queue_pending_count',
            'Number of pending emails in queue'
        )
        
        self.emails_sent_total = Counter(
            'arxiv_emails_sent_total',
            'Total emails sent',
            ['template', 'status']
        )
        
        self.email_delivery_duration_seconds = Histogram(
            'arxiv_email_delivery_duration_seconds',
            'Email delivery duration in seconds'
        )
        
        # User metrics
        self.user_registrations_total = Counter(
            'arxiv_user_registrations_total',
            'Total user registrations'
        )
        
        self.user_deletions_total = Counter(
            'arxiv_user_deletions_total',
            'Total user account deletions'
        )
        
        self.active_users_count = Gauge(
            'arxiv_active_users_count',
            'Number of active users'
        )
        
        self.user_sessions_active = Gauge(
            'arxiv_user_sessions_active',
            'Number of active user sessions'
        )
        
        # Recommendation metrics
        self.recommendations_generated_total = Counter(
            'arxiv_recommendations_generated_total',
            'Total recommendations generated'
        )
        
        self.user_recommendations_last_updated_timestamp = Gauge(
            'arxiv_user_recommendations_last_updated_timestamp',
            'Timestamp of last user recommendations update'
        )
        
        # Business metrics
        self.papers_saved_total = Counter(
            'arxiv_papers_saved_total',
            'Total papers saved by users'
        )
        
        self.paper_views_total = Counter(
            'arxiv_paper_views_total',
            'Total paper views'
        )
        
        self.searches_performed_total = Counter(
            'arxiv_searches_performed_total',
            'Total searches performed'
        )
        
        # System metrics
        self.pipeline_execution_duration_seconds = Histogram(
            'arxiv_pipeline_execution_duration_seconds',
            'Data pipeline execution duration in seconds'
        )
        
        self.pipeline_last_execution_timestamp = Gauge(
            'arxiv_pipeline_last_execution_timestamp',
            'Timestamp of last pipeline execution'
        )
        
        # Start background metrics collection
        self.collection_task = None
    
    def record_http_request(self, method: str, endpoint: str, status: int, duration: float):
        """Record HTTP request metrics"""
        self.http_requests_total.labels(
            method=method, 
            endpoint=endpoint, 
            status=status
        ).inc()
        
        self.http_request_duration_seconds.labels(
            method=method,
            endpoint=endpoint
        ).observe(duration)
    
    def record_db_query(self, query_type: str, duration: float):
        """Record database query metrics"""
        self.db_query_duration_seconds.labels(query_type=query_type).observe(duration)
    
    def record_scraper_success(self, papers_count: int):
        """Record successful scraper run"""
        self.scraper_papers_processed_total.inc(papers_count)
        self.scraper_last_successful_run_timestamp.set(time.time())
    
    def record_scraper_error(self):
        """Record scraper error"""
        self.scraper_errors_total.inc()
    
    def record_ai_classification(self, success: bool, duration: float):
        """Record AI classification attempt"""
        if success:
            self.ai_classification_success_total.inc()
        else:
            self.ai_classification_failures_total.inc()
        
        self.ai_classification_duration_seconds.observe(duration)
    
    def record_email_sent(self, template: str, status: str, duration: float = None):
        """Record email sending"""
        self.emails_sent_total.labels(template=template, status=status).inc()
        
        if duration is not None:
            self.email_delivery_duration_seconds.observe(duration)
    
    def record_user_registration(self):
        """Record user registration"""
        self.user_registrations_total.inc()
    
    def record_user_deletion(self):
        """Record user account deletion"""
        self.user_deletions_total.inc()
    
    def record_recommendation_generation(self, count: int):
        """Record recommendation generation"""
        self.recommendations_generated_total.inc(count)
        self.user_recommendations_last_updated_timestamp.set(time.time())
    
    def record_paper_saved(self):
        """Record paper being saved"""
        self.papers_saved_total.inc()
    
    def record_paper_view(self):
        """Record paper view"""
        self.paper_views_total.inc()
    
    def record_search_performed(self):
        """Record search performed"""
        self.searches_performed_total.inc()
    
    def record_pipeline_execution(self, duration: float):
        """Record pipeline execution"""
        self.pipeline_execution_duration_seconds.observe(duration)
        self.pipeline_last_execution_timestamp.set(time.time())
    
    async def update_database_metrics(self):
        """Update database-related metrics"""
        try:
            conn = psycopg2.connect(
                self.config.database.url,
                cursor_factory=RealDictCursor
            )
            
            with conn.cursor() as cur:
                # Connection metrics
                cur.execute("""
                    SELECT state, count(*) as count 
                    FROM pg_stat_activity 
                    WHERE datname = current_database()
                    GROUP BY state
                """)
                
                connection_states = dict(cur.fetchall())
                self.db_connections_active.set(connection_states.get('active', 0))
                self.db_connections_idle.set(connection_states.get('idle', 0))
                
                # Max connections
                cur.execute("SHOW max_connections")
                max_conn = cur.fetchone()['max_connections']
                self.db_connections_max.set(int(max_conn))
                
                # Active users (last 24 hours)
                cur.execute("""
                    SELECT COUNT(*) FROM user_profiles 
                    WHERE last_login >= NOW() - INTERVAL '24 hours'
                    AND is_active = true
                """)
                active_users = cur.fetchone()['count']
                self.active_users_count.set(active_users)
                
                # Active sessions
                cur.execute("""
                    SELECT COUNT(*) FROM user_sessions 
                    WHERE is_active = true 
                    AND expires_at > NOW()
                """)
                active_sessions = cur.fetchone()['count']
                self.user_sessions_active.set(active_sessions)
                
                # Papers scraped today
                cur.execute("""
                    SELECT COUNT(*) FROM papers 
                    WHERE created_at >= CURRENT_DATE
                """)
                papers_today = cur.fetchone()['count']
                self.papers_scraped_today.set(papers_today)
                
                # Email queue size
                cur.execute("""
                    SELECT COUNT(*) FROM notification_queue 
                    WHERE status = 'pending'
                """)
                pending_emails = cur.fetchone()['count']
                self.email_queue_pending_count.set(pending_emails)
            
            conn.close()
            
        except Exception as e:
            logger.error(f"Failed to update database metrics: {e}")
    
    async def start_collection(self):
        """Start background metrics collection"""
        if self.collection_task is None:
            self.collection_task = asyncio.create_task(self._collection_loop())
    
    async def stop_collection(self):
        """Stop background metrics collection"""
        if self.collection_task:
            self.collection_task.cancel()
            try:
                await self.collection_task
            except asyncio.CancelledError:
                pass
            self.collection_task = None
    
    async def _collection_loop(self):
        """Background metrics collection loop"""
        while True:
            try:
                await self.update_database_metrics()
                await asyncio.sleep(30)  # Update every 30 seconds
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Metrics collection error: {e}")
                await asyncio.sleep(30)
    
    def get_metrics(self) -> str:
        """Get metrics in Prometheus format"""
        return generate_latest()

# Global metrics collector instance
metrics_collector = MetricsCollector()

# FastAPI app for metrics endpoint
metrics_app = FastAPI(title="ArXiv Platform Metrics")

@metrics_app.get("/metrics")
async def get_metrics():
    """Prometheus metrics endpoint"""
    metrics_data = metrics_collector.get_metrics()
    return Response(
        content=metrics_data,
        media_type=CONTENT_TYPE_LATEST
    )

@metrics_app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {
        "status": "healthy",
        "timestamp": datetime.utcnow().isoformat(),
        "metrics_collection": metrics_collector.collection_task is not None
    }

@metrics_app.on_event("startup")
async def startup_event():
    """Start metrics collection on startup"""
    await metrics_collector.start_collection()
    logger.info("Metrics collection started")

@metrics_app.on_event("shutdown") 
async def shutdown_event():
    """Stop metrics collection on shutdown"""
    await metrics_collector.stop_collection()
    logger.info("Metrics collection stopped")

# Middleware for automatic HTTP metrics collection
from starlette.middleware.base import BaseHTTPMiddleware

class MetricsMiddleware(BaseHTTPMiddleware):
    """Middleware to automatically collect HTTP metrics"""
    
    async def dispatch(self, request, call_next):
        start_time = time.time()
        
        response = await call_next(request)
        
        duration = time.time() - start_time
        
        # Record metrics
        metrics_collector.record_http_request(
            method=request.method,
            endpoint=str(request.url.path),
            status=response.status_code,
            duration=duration
        )
        
        return response

# Export for use in main application
def get_metrics_collector() -> MetricsCollector:
    """Get the global metrics collector instance"""
    return metrics_collector