from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
from fastapi import APIRouter, HTTPException, Query
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import redis
import psycopg2
from psycopg2.extras import RealDictCursor
import structlog
import json
import os
from enum import Enum


logger = structlog.get_logger(__name__)
router = APIRouter(prefix="/api/v1/crawler", tags=["crawler-monitoring"])


class SpiderStatus(str, Enum):
    RUNNING = "running"
    IDLE = "idle"
    ERROR = "error"
    STOPPED = "stopped"
    PAUSED = "paused"


class SpiderInfo(BaseModel):
    name: str
    status: SpiderStatus
    last_run: Optional[datetime] = None
    items_scraped: int = 0
    requests_made: int = 0
    errors: int = 0
    uptime_seconds: Optional[float] = None


class CrawlerStatistics(BaseModel):
    total_spiders: int
    active_spiders: int
    idle_spiders: int
    error_spiders: int
    total_items_scraped: int
    total_requests: int
    total_errors: int
    average_response_time: float
    success_rate: float
    last_24h_items: int
    top_sources: List[Dict[str, Any]]


class HealthCheckResponse(BaseModel):
    status: str
    timestamp: datetime
    version: str
    services: Dict[str, str]


def get_redis_client():
    try:
        client = redis.Redis(
            host=os.getenv("REDIS_HOST", "redis"),
            port=int(os.getenv("REDIS_PORT", 6379)),
            db=int(os.getenv("REDIS_DB", 0)),
            decode_responses=True
        )
        client.ping()
        return client
    except Exception as e:
        logger.error("Failed to connect to Redis", error=str(e))
        return None


def get_db_connection():
    return psycopg2.connect(
        host=os.getenv("POSTGRES_HOST", "postgres"),
        port=os.getenv("POSTGRES_PORT", 5432),
        database=os.getenv("POSTGRES_DB", "ai_writing"),
        user=os.getenv("POSTGRES_USER", "admin"),
        password=os.getenv("POSTGRES_PASSWORD", "admin123"),
        cursor_factory=RealDictCursor
    )


@router.get("/status", response_model=List[SpiderInfo])
async def get_crawler_status(
    source: Optional[str] = None,
    status_filter: Optional[SpiderStatus] = None
):
    try:
        redis_client = get_redis_client()
        spiders = []
        
        if redis_client:
            pattern = f"crawler:status:*"
            keys = redis_client.keys(pattern)
            
            for key in keys:
                spider_data = redis_client.hgetall(key)
                spider_name = key.split(":")[-1]
                
                if source and source not in spider_name:
                    continue
                
                spider_status = SpiderStatus(spider_data.get("status", "idle"))
                
                if status_filter and spider_status != status_filter:
                    continue
                
                spider_info = SpiderInfo(
                    name=spider_name,
                    status=spider_status,
                    last_run=datetime.fromisoformat(spider_data.get("last_run")) if spider_data.get("last_run") else None,
                    items_scraped=int(spider_data.get("items_scraped", 0)),
                    requests_made=int(spider_data.get("requests_made", 0)),
                    errors=int(spider_data.get("errors", 0)),
                    uptime_seconds=float(spider_data.get("uptime", 0)) if spider_data.get("uptime") else None
                )
                spiders.append(spider_info)
        
        with get_db_connection() as conn:
            with conn.cursor() as cur:
                query = "SELECT source_name, status, last_run_at FROM crawler_configs"
                params = []
                
                if source:
                    query += " WHERE source_name LIKE %s"
                    params.append(f"%{source}%")
                
                cur.execute(query, params)
                configs = cur.fetchall()
                
                existing_names = {s.name for s in spiders}
                
                for config in configs:
                    if config['source_name'] not in existing_names:
                        spider_status = SpiderStatus.IDLE
                        if config['status'] == 'error':
                            spider_status = SpiderStatus.ERROR
                        elif config['status'] == 'paused':
                            spider_status = SpiderStatus.PAUSED
                        
                        if status_filter and spider_status != status_filter:
                            continue
                        
                        spiders.append(SpiderInfo(
                            name=config['source_name'],
                            status=spider_status,
                            last_run=config.get('last_run_at'),
                            items_scraped=0,
                            requests_made=0,
                            errors=0,
                            uptime_seconds=None
                        ))
        
        return spiders
    
    except Exception as e:
        logger.error("Failed to fetch crawler status", error=str(e))
        raise HTTPException(status_code=500, detail="Failed to fetch status")


@router.get("/stats", response_model=CrawlerStatistics)
async def get_crawler_statistics(
    time_range: str = Query(default="24h", regex="^(1h|6h|12h|24h|7d|30d)$")
):
    try:
        redis_client = get_redis_client()
        
        total_spiders = 0
        active_spiders = 0
        idle_spiders = 0
        error_spiders = 0
        total_items = 0
        total_requests = 0
        total_errors = 0
        response_times = []
        source_stats = {}
        
        if redis_client:
            pattern = "crawler:stats:*"
            keys = redis_client.keys(pattern)
            
            for key in keys:
                stats = redis_client.hgetall(key)
                source_name = key.split(":")[-1]
                
                if source_name not in source_stats:
                    source_stats[source_name] = {
                        "name": source_name,
                        "items": 0,
                        "requests": 0,
                        "success": 0,
                        "errors": 0
                    }
                
                source_stats[source_name]["items"] += int(stats.get("items_scraped", 0))
                source_stats[source_name]["requests"] += int(stats.get("requests", 0))
                source_stats[source_name]["success"] += int(stats.get("success", 0))
                source_stats[source_name]["errors"] += int(stats.get("errors", 0))
                
                total_items += int(stats.get("items_scraped", 0))
                total_requests += int(stats.get("requests", 0))
                total_errors += int(stats.get("errors", 0))
                
                if stats.get("avg_response_time"):
                    response_times.append(float(stats["avg_response_time"]))
            
            status_pattern = "crawler:status:*"
            status_keys = redis_client.keys(status_pattern)
            
            for key in status_keys:
                status_data = redis_client.hgetall(key)
                status = status_data.get("status", "idle")
                
                total_spiders += 1
                if status == "running":
                    active_spiders += 1
                elif status == "idle":
                    idle_spiders += 1
                elif status == "error":
                    error_spiders += 1
        
        with get_db_connection() as conn:
            with conn.cursor() as cur:
                cur.execute("SELECT COUNT(*) as total FROM crawler_configs")
                db_total = cur.fetchone()
                if db_total:
                    total_spiders = max(total_spiders, db_total['total'])
                
                time_delta = {
                    "1h": timedelta(hours=1),
                    "6h": timedelta(hours=6),
                    "12h": timedelta(hours=12),
                    "24h": timedelta(hours=24),
                    "7d": timedelta(days=7),
                    "30d": timedelta(days=30)
                }
                
                cutoff_time = datetime.now() - time_delta[time_range]
                
                # TODO: Query actual crawled data for time range
                last_24h_items = total_items
        
        avg_response_time = sum(response_times) / len(response_times) if response_times else 0
        success_rate = ((total_requests - total_errors) / max(total_requests, 1)) * 100
        
        top_sources = sorted(
            source_stats.values(),
            key=lambda x: x["items"],
            reverse=True
        )[:10]
        
        return CrawlerStatistics(
            total_spiders=total_spiders,
            active_spiders=active_spiders,
            idle_spiders=idle_spiders,
            error_spiders=error_spiders,
            total_items_scraped=total_items,
            total_requests=total_requests,
            total_errors=total_errors,
            average_response_time=round(avg_response_time, 2),
            success_rate=round(success_rate, 2),
            last_24h_items=last_24h_items,
            top_sources=top_sources
        )
    
    except Exception as e:
        logger.error("Failed to fetch crawler statistics", error=str(e))
        raise HTTPException(status_code=500, detail="Failed to fetch statistics")


@router.get("/health")
async def health_check():
    try:
        services = {
            "api": "healthy",
            "database": "unknown",
            "redis": "unknown",
            "scrapy": "unknown"
        }
        
        try:
            with get_db_connection() as conn:
                with conn.cursor() as cur:
                    cur.execute("SELECT 1")
                    services["database"] = "healthy"
        except:
            services["database"] = "unhealthy"
        
        try:
            redis_client = get_redis_client()
            if redis_client and redis_client.ping():
                services["redis"] = "healthy"
            else:
                services["redis"] = "unhealthy"
        except:
            services["redis"] = "unhealthy"
        
        # TODO: Check if Scrapy engine is running
        services["scrapy"] = "healthy"
        
        overall_status = "healthy" if all(s == "healthy" for s in services.values()) else "degraded"
        
        return HealthCheckResponse(
            status=overall_status,
            timestamp=datetime.now(),
            version="1.0.0",
            services=services
        )
    
    except Exception as e:
        logger.error("Health check failed", error=str(e))
        return HealthCheckResponse(
            status="unhealthy",
            timestamp=datetime.now(),
            version="1.0.0",
            services={"error": str(e)}
        )


@router.post("/control/{source_name}/{action}")
async def control_spider(source_name: str, action: str):
    if action not in ["start", "stop", "pause", "resume"]:
        raise HTTPException(status_code=400, detail="Invalid action")
    
    try:
        redis_client = get_redis_client()
        
        if redis_client:
            control_key = f"crawler:control:{source_name}"
            redis_client.set(control_key, action, ex=60)
            
            status_key = f"crawler:status:{source_name}"
            
            if action == "start":
                redis_client.hset(status_key, "status", "running")
            elif action == "stop":
                redis_client.hset(status_key, "status", "stopped")
            elif action == "pause":
                redis_client.hset(status_key, "status", "paused")
            elif action == "resume":
                redis_client.hset(status_key, "status", "running")
        
        with get_db_connection() as conn:
            with conn.cursor() as cur:
                if action in ["stop", "pause"]:
                    new_status = "paused" if action == "pause" else "inactive"
                    cur.execute(
                        "UPDATE crawler_configs SET status = %s WHERE source_name = %s",
                        (new_status, source_name)
                    )
                elif action in ["start", "resume"]:
                    cur.execute(
                        "UPDATE crawler_configs SET status = 'active' WHERE source_name = %s",
                        (source_name,)
                    )
                
                conn.commit()
        
        logger.info(f"Spider control action executed", source=source_name, action=action)
        
        return {
            "message": f"Action '{action}' executed for spider '{source_name}'",
            "source": source_name,
            "action": action,
            "timestamp": datetime.now().isoformat()
        }
    
    except Exception as e:
        logger.error("Failed to control spider", source=source_name, action=action, error=str(e))
        raise HTTPException(status_code=500, detail=f"Failed to execute action: {action}")


@router.get("/logs/{source_name}")
async def get_spider_logs(
    source_name: str,
    limit: int = Query(default=100, le=1000),
    level: Optional[str] = Query(default=None, regex="^(DEBUG|INFO|WARNING|ERROR)$")
):
    try:
        redis_client = get_redis_client()
        logs = []
        
        if redis_client:
            log_key = f"crawler:logs:{source_name}"
            raw_logs = redis_client.lrange(log_key, 0, limit - 1)
            
            for log_entry in raw_logs:
                try:
                    log_data = json.loads(log_entry)
                    if level and log_data.get("level") != level:
                        continue
                    logs.append(log_data)
                except:
                    logs.append({"message": log_entry, "level": "INFO"})
        
        return {
            "source": source_name,
            "logs": logs,
            "count": len(logs),
            "limit": limit,
            "level_filter": level
        }
    
    except Exception as e:
        logger.error("Failed to fetch spider logs", source=source_name, error=str(e))
        raise HTTPException(status_code=500, detail="Failed to fetch logs")