"""
Async Cleanup Service
异步清理服务 - 过期内容清理，使用asyncio调度和定期统计更新
"""

import asyncio
import uuid
from typing import Optional, Dict, Any, List, Callable
from datetime import datetime, timedelta
from pathlib import Path
import structlog

from sqlalchemy import select, and_, or_, func, delete, update
from sqlalchemy.ext.asyncio import AsyncSession

from ..core.config import settings
from ..core.exceptions import FileUploadError
from ..models.user import User
from ..models.generated_content import GeneratedContent, ContentStatus
from ..models.creation_request import CreationRequest, RequestStatus
from ..models.task_queue import TaskQueue, TaskStatus
from ..models.system_log import SystemLog, LogLevel, LogCategory, create_log_entry
from ..models.folder import Folder
from ..models.content_tag import ContentTag
from ..services.file_storage_service import FileStorageService
from ..services.system_monitoring_service import SystemMonitoringService

# 配置结构化日志
logger = structlog.get_logger(__name__)


class AsyncCleanupService:
    """异步清理服务类"""
    
    def __init__(self):
        self.is_running = False
        self.cleanup_tasks: Dict[str, asyncio.Task] = {}
        self.scheduled_tasks: List[Dict[str, Any]] = []
        self.cleanup_callbacks: List[Callable] = []
        self.last_cleanup_times: Dict[str, datetime] = {}
        
        # 默认清理配置
        self.default_config = {
            "expired_content_days": 30,  # 过期内容天数
            "failed_content_days": 7,    # 失败内容天数
            "old_logs_days": 90,         # 旧日志天数
            "temp_files_hours": 24,      # 临时文件小时数
            "orphaned_files_days": 7,    # 孤立文件天数
            "cleanup_interval_hours": 6, # 清理间隔小时数
            "statistics_update_hours": 1, # 统计更新间隔小时数
        }
    
    async def start_cleanup_scheduler(self) -> None:
        """启动清理调度器"""
        if self.is_running:
            logger.warning("Cleanup scheduler is already running")
            return
        
        logger.info("Starting async cleanup scheduler")
        self.is_running = True
        
        # 启动主要清理任务
        cleanup_task = asyncio.create_task(self._cleanup_scheduler_loop())
        self.cleanup_tasks["main_cleanup"] = cleanup_task
        
        # 启动统计更新任务
        stats_task = asyncio.create_task(self._statistics_update_loop())
        self.cleanup_tasks["statistics_update"] = stats_task
        
        # 启动临时文件清理任务
        temp_task = asyncio.create_task(self._temp_file_cleanup_loop())
        self.cleanup_tasks["temp_cleanup"] = temp_task
        
        logger.info("Async cleanup scheduler started successfully")
    
    async def stop_cleanup_scheduler(self) -> None:
        """停止清理调度器"""
        if not self.is_running:
            return
        
        logger.info("Stopping async cleanup scheduler")
        self.is_running = False
        
        # 取消所有任务
        for task_name, task in self.cleanup_tasks.items():
            if not task.done():
                task.cancel()
        
        # 等待所有任务完成
        await asyncio.gather(*self.cleanup_tasks.values(), return_exceptions=True)
        
        self.cleanup_tasks.clear()
        logger.info("Async cleanup scheduler stopped successfully")
    
    async def cleanup_expired_content(self, days_old: int = 30) -> Dict[str, Any]:
        """
        清理过期内容
        
        Args:
            days_old: 内容过期天数
            
        Returns:
            Dict[str, Any]: 清理结果统计
        """
        logger.info(f"Starting expired content cleanup (older than {days_old} days)")
        
        try:
            from ..core.database import get_db
            
            cutoff_date = datetime.utcnow() - timedelta(days=days_old)
            results = {
                "expired_content": 0,
                "deleted_files": 0,
                "freed_storage_mb": 0,
                "errors": []
            }
            
            async for db in get_db():
                # 查找过期内容
                expired_contents = await db.execute(
                    select(GeneratedContent).where(
                        and_(
                            GeneratedContent.created_at < cutoff_date,
                            GeneratedContent.status.in_([
                                ContentStatus.COMPLETED,
                                ContentStatus.FAILED,
                                ContentStatus.ARCHIVED
                            ])
                        )
                    )
                )
                contents = expired_contents.scalars().all()
                
                file_storage = FileStorageService()
                
                for content in contents:
                    try:
                        # 删除关联文件
                        if content.file_path:
                            await file_storage.delete_file_async(content.file_path)
                            results["deleted_files"] += 1
                            results["freed_storage_mb"] += content.file_size_mb
                        
                        # 删除内容记录
                        await db.delete(content)
                        results["expired_content"] += 1
                        
                    except Exception as e:
                        error_msg = f"Failed to delete content {content.id}: {str(e)}"
                        logger.error(error_msg)
                        results["errors"].append(error_msg)
                
                await db.commit()
            
            logger.info(
                "Expired content cleanup completed",
                expired_content_count=results["expired_content"],
                deleted_files_count=results["deleted_files"],
                freed_storage_mb=round(results["freed_storage_mb"], 2),
                error_count=len(results["errors"])
            )
            
            return results
            
        except Exception as e:
            logger.error(
                "Expired content cleanup failed",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"清理过期内容失败: {str(e)}")
    
    async def cleanup_failed_tasks(self, days_old: int = 7) -> Dict[str, Any]:
        """
        清理失败的任务
        
        Args:
            days_old: 任务过期天数
            
        Returns:
            Dict[str, Any]: 清理结果统计
        """
        logger.info(f"Starting failed tasks cleanup (older than {days_old} days)")
        
        try:
            from ..core.database import get_db
            
            cutoff_date = datetime.utcnow() - timedelta(days=days_old)
            results = {
                "failed_tasks": 0,
                "cancelled_tasks": 0,
                "errors": []
            }
            
            async for db in get_db():
                # 查找过期失败任务
                failed_tasks = await db.execute(
                    select(TaskQueue).where(
                        and_(
                            TaskQueue.created_at < cutoff_date,
                            TaskQueue.status.in_([
                                TaskStatus.FAILED,
                                TaskStatus.CANCELLED
                            ])
                        )
                    )
                )
                tasks = failed_tasks.scalars().all()
                
                for task in tasks:
                    try:
                        await db.delete(task)
                        
                        if task.status == TaskStatus.FAILED:
                            results["failed_tasks"] += 1
                        else:
                            results["cancelled_tasks"] += 1
                            
                    except Exception as e:
                        error_msg = f"Failed to delete task {task.id}: {str(e)}"
                        logger.error(error_msg)
                        results["errors"].append(error_msg)
                
                await db.commit()
            
            logger.info(
                "Failed tasks cleanup completed",
                failed_tasks_count=results["failed_tasks"],
                cancelled_tasks_count=results["cancelled_tasks"],
                error_count=len(results["errors"])
            )
            
            return results
            
        except Exception as e:
            logger.error(
                "Failed tasks cleanup failed",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"清理失败任务失败: {str(e)}")
    
    async def cleanup_old_logs(self, days_old: int = 90) -> Dict[str, Any]:
        """
        清理旧日志
        
        Args:
            days_old: 日志过期天数
            
        Returns:
            Dict[str, Any]: 清理结果统计
        """
        logger.info(f"Starting old logs cleanup (older than {days_old} days)")
        
        try:
            from ..core.database import get_db
            
            cutoff_date = datetime.utcnow() - timedelta(days=days_old)
            results = {
                "deleted_logs": 0,
                "errors": []
            }
            
            async for db in get_db():
                # 删除旧日志
                delete_result = await db.execute(
                    delete(SystemLog).where(SystemLog.created_at < cutoff_date)
                )
                
                results["deleted_logs"] = delete_result.rowcount
                await db.commit()
            
            logger.info(
                "Old logs cleanup completed",
                deleted_logs_count=results["deleted_logs"]
            )
            
            return results
            
        except Exception as e:
            logger.error(
                "Old logs cleanup failed",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"清理旧日志失败: {str(e)}")
    
    async def cleanup_temp_files(self, hours_old: int = 24) -> Dict[str, Any]:
        """
        清理临时文件
        
        Args:
            hours_old: 临时文件过期小时数
            
        Returns:
            Dict[str, Any]: 清理结果统计
        """
        logger.info(f"Starting temp files cleanup (older than {hours_old} hours)")
        
        try:
            temp_dir = Path(settings.storage.temp_dir)
            if not temp_dir.exists():
                return {"deleted_files": 0, "freed_space_mb": 0}
            
            cutoff_time = datetime.utcnow() - timedelta(hours=hours_old)
            results = {
                "deleted_files": 0,
                "freed_space_mb": 0,
                "errors": []
            }
            
            # 遍历临时目录
            for temp_file in temp_dir.rglob("*"):
                if temp_file.is_file():
                    try:
                        file_mtime = datetime.fromtimestamp(temp_file.stat().st_mtime)
                        
                        if file_mtime < cutoff_time:
                            file_size_mb = temp_file.stat().st_size / (1024 * 1024)
                            temp_file.unlink()
                            
                            results["deleted_files"] += 1
                            results["freed_space_mb"] += file_size_mb
                            
                    except Exception as e:
                        error_msg = f"Failed to delete temp file {temp_file}: {str(e)}"
                        logger.error(error_msg)
                        results["errors"].append(error_msg)
            
            logger.info(
                "Temp files cleanup completed",
                deleted_files_count=results["deleted_files"],
                freed_space_mb=round(results["freed_space_mb"], 2),
                error_count=len(results["errors"])
            )
            
            return results
            
        except Exception as e:
            logger.error(
                "Temp files cleanup failed",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"清理临时文件失败: {str(e)}")
    
    async def cleanup_orphaned_files(self, days_old: int = 7) -> Dict[str, Any]:
        """
        清理孤立文件（数据库中没有记录的文件）
        
        Args:
            days_old: 文件过期天数
            
        Returns:
            Dict[str, Any]: 清理结果统计
        """
        logger.info(f"Starting orphaned files cleanup (older than {days_old} days)")
        
        try:
            from ..core.database import get_db
            
            results = {
                "scanned_files": 0,
                "orphaned_files": 0,
                "deleted_files": 0,
                "freed_space_mb": 0,
                "errors": []
            }
            
            async for db in get_db():
                # 获取所有存储的文件路径
                existing_files = await db.execute(
                    select(GeneratedContent.file_path).where(
                        GeneratedContent.file_path.isnot(None)
                    )
                )
                db_file_paths = {row[0] for row in existing_files.fetchall()}
                
                # 扫描存储目录
                storage_dir = Path(settings.storage.upload_dir)
                if not storage_dir.exists():
                    return results
                
                cutoff_time = datetime.utcnow() - timedelta(days=days_old)
                
                for file_path in storage_dir.rglob("*"):
                    if not file_path.is_file():
                        continue
                    
                    results["scanned_files"] += 1
                    
                    try:
                        # 检查文件是否在数据库中
                        str_file_path = str(file_path)
                        if str_file_path not in db_file_paths:
                            # 检查文件是否过期
                            file_mtime = datetime.fromtimestamp(file_path.stat().st_mtime)
                            
                            if file_mtime < cutoff_time:
                                file_size_mb = file_path.stat().st_size / (1024 * 1024)
                                
                                # 删除孤立文件
                                file_path.unlink()
                                
                                results["orphaned_files"] += 1
                                results["deleted_files"] += 1
                                results["freed_space_mb"] += file_size_mb
                                
                    except Exception as e:
                        error_msg = f"Failed to process file {file_path}: {str(e)}"
                        logger.error(error_msg)
                        results["errors"].append(error_msg)
            
            logger.info(
                "Orphaned files cleanup completed",
                scanned_files_count=results["scanned_files"],
                orphaned_files_count=results["orphaned_files"],
                deleted_files_count=results["deleted_files"],
                freed_space_mb=round(results["freed_space_mb"], 2),
                error_count=len(results["errors"])
            )
            
            return results
            
        except Exception as e:
            logger.error(
                "Orphaned files cleanup failed",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"清理孤立文件失败: {str(e)}")
    
    async def update_system_statistics(self) -> Dict[str, Any]:
        """
        更新系统统计信息
        
        Returns:
            Dict[str, Any]: 更新的统计信息
        """
        try:
            from ..core.database import get_db
            
            logger.info("Updating system statistics")
            
            stats = {}
            
            async for db in get_db():
                # 用户统计
                user_stats = await db.execute(
                    select(
                        func.count(User.id).label('total_users'),
                        func.count(User.id).filter(User.status == 'active').label('active_users'),
                        func.count(User.id).filter(User.role == 'admin').label('admin_users')
                    )
                )
                user_data = user_stats.one()
                
                # 内容统计
                content_stats = await db.execute(
                    select(
                        func.count(GeneratedContent.id).label('total_content'),
                        func.count(GeneratedContent.id).filter(GeneratedContent.status == 'completed').label('completed_content'),
                        func.count(GeneratedContent.id).filter(GeneratedContent.status == 'failed').label('failed_content'),
                        func.sum(GeneratedContent.file_size_bytes).label('total_storage_bytes')
                    )
                )
                content_data = content_stats.one()
                
                # 任务统计
                task_stats = await db.execute(
                    select(
                        func.count(TaskQueue.id).filter(TaskQueue.status == 'pending').label('pending_tasks'),
                        func.count(TaskQueue.id).filter(TaskQueue.status == 'processing').label('processing_tasks'),
                        func.count(TaskQueue.id).filter(TaskQueue.status == 'completed').label('completed_tasks'),
                        func.count(TaskQueue.id).filter(TaskQueue.status == 'failed').label('failed_tasks')
                    )
                )
                task_data = task_stats.one()
                
                stats = {
                    "users": {
                        "total": user_data.total_users,
                        "active": user_data.active_users,
                        "admins": user_data.admin_users
                    },
                    "content": {
                        "total": content_data.total_content or 0,
                        "completed": content_data.completed_content or 0,
                        "failed": content_data.failed_content or 0,
                        "total_storage_mb": round((content_data.total_storage_bytes or 0) / (1024 * 1024), 2)
                    },
                    "tasks": {
                        "pending": task_data.pending_tasks or 0,
                        "processing": task_data.processing_tasks or 0,
                        "completed": task_data.completed_tasks or 0,
                        "failed": task_data.failed_tasks or 0
                    },
                    "updated_at": datetime.utcnow().isoformat()
                }
            
            # 记录系统日志
            await create_log_entry(
                LogLevel.INFO,
                "System statistics updated",
                LogCategory.SYSTEM,
                details=stats
            )
            
            logger.info("System statistics updated successfully", stats=stats)
            return stats
            
        except Exception as e:
            logger.error(
                "Failed to update system statistics",
                error=str(e),
                error_type=type(e).__name__
            )
            raise FileUploadError(f"更新系统统计失败: {str(e)}")
    
    def add_cleanup_callback(self, callback: Callable) -> None:
        """添加清理回调函数"""
        self.cleanup_callbacks.append(callback)
    
    async def run_manual_cleanup(self, cleanup_types: List[str]) -> Dict[str, Any]:
        """
        手动运行清理任务
        
        Args:
            cleanup_types: 清理类型列表
            
        Returns:
            Dict[str, Any]: 清理结果
        """
        logger.info(f"Starting manual cleanup: {cleanup_types}")
        
        results = {
            "cleanup_types": cleanup_types,
            "results": {},
            "start_time": datetime.utcnow().isoformat(),
            "errors": []
        }
        
        cleanup_functions = {
            "expired_content": lambda: self.cleanup_expired_content(),
            "failed_tasks": lambda: self.cleanup_failed_tasks(),
            "old_logs": lambda: self.cleanup_old_logs(),
            "temp_files": lambda: self.cleanup_temp_files(),
            "orphaned_files": lambda: self.cleanup_orphaned_files(),
            "statistics": lambda: self.update_system_statistics()
        }
        
        for cleanup_type in cleanup_types:
            if cleanup_type in cleanup_functions:
                try:
                    result = await cleanup_functions[cleanup_type]()
                    results["results"][cleanup_type] = result
                except Exception as e:
                    error_msg = f"Cleanup type '{cleanup_type}' failed: {str(e)}"
                    logger.error(error_msg)
                    results["errors"].append(error_msg)
            else:
                error_msg = f"Unknown cleanup type: {cleanup_type}"
                logger.warning(error_msg)
                results["errors"].append(error_msg)
        
        results["end_time"] = datetime.utcnow().isoformat()
        results["duration_seconds"] = (
            datetime.fromisoformat(results["end_time"]) - 
            datetime.fromisoformat(results["start_time"])
        ).total_seconds()
        
        # 执行回调函数
        for callback in self.cleanup_callbacks:
            try:
                if asyncio.iscoroutinefunction(callback):
                    await callback(results)
                else:
                    callback(results)
            except Exception as e:
                logger.error(
                    "Cleanup callback failed",
                    callback=str(callback),
                    error=str(e)
                )
        
        logger.info(
            "Manual cleanup completed",
            cleanup_types=cleanup_types,
            duration_seconds=results["duration_seconds"],
            error_count=len(results["errors"])
        )
        
        return results
    
    # 私有辅助方法
    
    async def _cleanup_scheduler_loop(self) -> None:
        """清理调度器主循环"""
        logger.info("Starting cleanup scheduler loop")
        
        while self.is_running:
            try:
                # 等待下一个清理周期
                await asyncio.sleep(self.default_config["cleanup_interval_hours"] * 3600)
                
                if not self.is_running:
                    break
                
                logger.info("Running scheduled cleanup tasks")
                
                # 运行标准清理任务
                cleanup_results = await self.run_manual_cleanup([
                    "expired_content",
                    "failed_tasks",
                    "old_logs",
                    "orphaned_files"
                ])
                
                logger.info(
                    "Scheduled cleanup completed",
                    duration_seconds=cleanup_results.get("duration_seconds", 0),
                    error_count=len(cleanup_results.get("errors", []))
                )
                
            except asyncio.CancelledError:
                logger.info("Cleanup scheduler loop cancelled")
                break
            except Exception as e:
                logger.error(
                    "Cleanup scheduler loop error",
                    error=str(e),
                    error_type=type(e).__name__
                )
                await asyncio.sleep(60)  # 出错后等待1分钟再重试
    
    async def _statistics_update_loop(self) -> None:
        """统计更新循环"""
        logger.info("Starting statistics update loop")
        
        while self.is_running:
            try:
                # 等待下一个更新周期
                await asyncio.sleep(self.default_config["statistics_update_hours"] * 3600)
                
                if not self.is_running:
                    break
                
                logger.info("Updating system statistics")
                
                # 更新统计信息
                stats = await self.update_system_statistics()
                
                logger.info(
                    "System statistics updated",
                    users_total=stats["users"]["total"],
                    content_total=stats["content"]["total"],
                    storage_mb=stats["content"]["total_storage_mb"]
                )
                
            except asyncio.CancelledError:
                logger.info("Statistics update loop cancelled")
                break
            except Exception as e:
                logger.error(
                    "Statistics update loop error",
                    error=str(e),
                    error_type=type(e).__name__
                )
                await asyncio.sleep(300)  # 出错后等待5分钟再重试
    
    async def _temp_file_cleanup_loop(self) -> None:
        """临时文件清理循环"""
        logger.info("Starting temp file cleanup loop")
        
        while self.is_running:
            try:
                # 每6小时清理一次临时文件
                await asyncio.sleep(6 * 3600)
                
                if not self.is_running:
                    break
                
                logger.info("Running temp file cleanup")
                
                # 清理临时文件
                temp_results = await self.cleanup_temp_files()
                
                logger.info(
                    "Temp file cleanup completed",
                    deleted_files=temp_results["deleted_files"],
                    freed_space_mb=round(temp_results["freed_storage_mb"], 2)
                )
                
            except asyncio.CancelledError:
                logger.info("Temp file cleanup loop cancelled")
                break
            except Exception as e:
                logger.error(
                    "Temp file cleanup loop error",
                    error=str(e),
                    error_type=type(e).__name__
                )
                await asyncio.sleep(3600)  # 出错后等待1小时再重试


# 全局异步清理服务实例
async_cleanup_service = AsyncCleanupService()


# 导出服务
__all__ = ["AsyncCleanupService", "async_cleanup_service"]