"""
数据管理相关的API路由
包括数据上传、数据统计、数据源管理等接口
"""

from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks, UploadFile, File
from typing import Dict, Any, List, Optional
from datetime import datetime
import os
import tempfile

from ....core.interfaces import Task, TaskStatus
from ....agents.agent_manager import agent_manager
from ..models import *
from ..dependencies import common_dependencies, validate_batch_size

router = APIRouter(prefix="/api/v1/data", tags=["Data Management"])


@router.post("/upload/file", response_model=DataUploadResponse)
async def upload_file(
    file: UploadFile = File(...),
    background_tasks: BackgroundTasks = BackgroundTasks(),
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """文件上传接口"""
    try:
        # 检查文件类型
        allowed_extensions = {'.txt', '.pdf', '.docx', '.md', '.json', '.csv'}
        file_extension = os.path.splitext(file.filename)[1].lower()
        
        if file_extension not in allowed_extensions:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail=f"File type {file_extension} not supported. Allowed: {allowed_extensions}"
            )
        
        # 保存上传的文件到临时目录
        with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_file:
            content = await file.read()
            temp_file.write(content)
            temp_file_path = temp_file.name
        
        # 获取数据灌入管道 - 临时禁用（lightrag开发中）
        # orchestrator = agent_manager.get_orchestrator()
        # if not orchestrator or not orchestrator.rag_controller:
        #     raise HTTPException(
        #         status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
        #         detail="RAG controller not available"
        #     )
        
        # ingestion_pipeline = orchestrator.rag_controller.ingestion_pipeline
        
        # 在后台处理文件 - 临时禁用
        # background_tasks.add_task(
        #     ingestion_pipeline.process_file,
        #     temp_file_path
        # )
        
        upload_id = f"file_upload_{datetime.now().timestamp()}"
        
        return DataUploadResponse(
            upload_id=upload_id,
            status="processing",
            data_type="uploaded_file",
            message=f"File '{file.filename}' uploaded successfully, processing in background",
            timestamp=datetime.now()
        )
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"File upload failed: {str(e)}"
        )


@router.post("/upload/batch-files", response_model=DataUploadResponse)
async def upload_batch_files(
    files: List[UploadFile] = File(...),
    background_tasks: BackgroundTasks = BackgroundTasks(),
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """批量文件上传接口"""
    try:
        # 验证批量大小
        if len(files) > 50:
            raise HTTPException(
                status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
                detail="Too many files. Maximum 50 files per batch."
            )
        
        allowed_extensions = {'.txt', '.pdf', '.docx', '.md', '.json', '.csv'}
        temp_files = []
        
        # 处理所有上传的文件
        for file in files:
            file_extension = os.path.splitext(file.filename)[1].lower()
            
            if file_extension not in allowed_extensions:
                # 清理已创建的临时文件
                for temp_path in temp_files:
                    os.unlink(temp_path)
                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail=f"File '{file.filename}' type not supported"
                )
            
            # 保存文件
            with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_file:
                content = await file.read()
                temp_file.write(content)
                temp_files.append(temp_file.name)
        
        # 获取数据灌入管道
        orchestrator = agent_manager.get_orchestrator()
        ingestion_pipeline = orchestrator.rag_controller.ingestion_pipeline
        
        # 在后台批量处理文件
        background_tasks.add_task(
            _process_batch_files,
            temp_files,
            ingestion_pipeline
        )
        
        upload_id = f"batch_upload_{datetime.now().timestamp()}"
        
        return DataUploadResponse(
            upload_id=upload_id,
            status="processing",
            data_type="batch_files",
            message=f"{len(files)} files uploaded successfully, processing in background",
            timestamp=datetime.now()
        )
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Batch file upload failed: {str(e)}"
        )


@router.post("/upload/dataset", response_model=DataUploadResponse)
async def upload_dataset(
    request: DataUploadRequest,
    background_tasks: BackgroundTasks = BackgroundTasks(),
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """数据集上传接口"""
    try:
        orchestrator = agent_manager.get_orchestrator()
        ingestion_pipeline = orchestrator.rag_controller.ingestion_pipeline
        
        if request.data_type == "huggingface_dataset":
            # 处理HuggingFace数据集
            background_tasks.add_task(
                ingestion_pipeline.process_huggingface_dataset,
                request.data_content,
                request.metadata or {}
            )
            
        elif request.data_type == "directory":
            # 验证目录路径
            if not os.path.exists(request.data_content):
                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail=f"Directory does not exist: {request.data_content}"
                )
            
            background_tasks.add_task(
                ingestion_pipeline.process_directory,
                request.data_content
            )
            
        elif request.data_type == "file_path":
            # 验证文件路径
            if not os.path.exists(request.data_content):
                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail=f"File does not exist: {request.data_content}"
                )
            
            background_tasks.add_task(
                ingestion_pipeline.process_file,
                request.data_content
            )
        
        upload_id = f"dataset_upload_{datetime.now().timestamp()}"
        
        return DataUploadResponse(
            upload_id=upload_id,
            status="processing",
            data_type=request.data_type,
            message=f"Dataset upload started for {request.data_type}: {request.data_content}",
            timestamp=datetime.now()
        )
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Dataset upload failed: {str(e)}"
        )


@router.get("/stats/pipeline", response_model=DataStatsResponse)
async def get_pipeline_stats(
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """获取数据管道统计信息"""
    try:
        orchestrator = agent_manager.get_orchestrator()
        ingestion_pipeline = orchestrator.rag_controller.ingestion_pipeline
        
        pipeline_stats = await ingestion_pipeline.get_pipeline_stats()
        
        return DataStatsResponse(
            pipeline_stats=pipeline_stats,
            timestamp=datetime.now()
        )
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get pipeline stats: {str(e)}"
        )


@router.get("/stats/storage")
async def get_storage_stats(
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """获取存储统计信息"""
    try:
        orchestrator = agent_manager.get_orchestrator()
        
        # 获取各种存储的统计信息
        vector_store = orchestrator.rag_controller.vector_store
        graph_store = orchestrator.rag_controller.graph_store
        
        storage_stats = {
            "vector_store": {
                "type": vector_store.__class__.__name__,
                "document_count": await vector_store.get_document_count() if hasattr(vector_store, 'get_document_count') else 0,
                "collection_info": getattr(vector_store, 'collection_info', {})
            },
            "graph_store": {
                "type": graph_store.__class__.__name__,
                "node_count": await graph_store.get_node_count() if hasattr(graph_store, 'get_node_count') else 0,
                "relationship_count": await graph_store.get_relationship_count() if hasattr(graph_store, 'get_relationship_count') else 0
            },
            "timestamp": datetime.now().isoformat()
        }
        
        return storage_stats
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get storage stats: {str(e)}"
        )


@router.delete("/clear/{store_type}")
async def clear_data_store(
    store_type: str,
    confirm: bool = False,
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """清除数据存储"""
    try:
        if not confirm:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail="Please set confirm=true to proceed with data clearing"
            )
        
        orchestrator = agent_manager.get_orchestrator()
        
        if store_type == "vector":
            # 清除向量存储
            vector_store = orchestrator.rag_controller.vector_store
            if hasattr(vector_store, 'clear'):
                await vector_store.clear()
            message = "Vector store cleared successfully"
            
        elif store_type == "graph":
            # 清除图谱存储
            graph_store = orchestrator.rag_controller.graph_store
            if hasattr(graph_store, 'clear'):
                await graph_store.clear()
            message = "Graph store cleared successfully"
            
        elif store_type == "all":
            # 清除所有存储
            vector_store = orchestrator.rag_controller.vector_store
            graph_store = orchestrator.rag_controller.graph_store
            
            if hasattr(vector_store, 'clear'):
                await vector_store.clear()
            if hasattr(graph_store, 'clear'):
                await graph_store.clear()
            message = "All data stores cleared successfully"
            
        else:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail=f"Unknown store type: {store_type}. Use 'vector', 'graph', or 'all'"
            )
        
        return {"message": message, "timestamp": datetime.now().isoformat()}
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to clear data store: {str(e)}"
        )


@router.get("/sources")
async def list_data_sources(
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """列出数据源"""
    try:
        orchestrator = agent_manager.get_orchestrator()
        ingestion_pipeline = orchestrator.rag_controller.ingestion_pipeline
        
        # 获取已处理的数据源信息
        sources_info = await ingestion_pipeline.get_data_sources()
        
        return {
            "data_sources": sources_info,
            "total_sources": len(sources_info),
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to list data sources: {str(e)}"
        )


@router.post("/export")
async def export_data(
    export_format: str = "json",
    store_type: str = "all",
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """导出数据"""
    try:
        if export_format not in ["json", "csv", "jsonl"]:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail="Supported formats: json, csv, jsonl"
            )
        
        orchestrator = agent_manager.get_orchestrator()
        
        export_data = {}
        
        if store_type in ["vector", "all"]:
            # 导出向量数据
            vector_store = orchestrator.rag_controller.vector_store
            if hasattr(vector_store, 'export_data'):
                export_data["vector_data"] = await vector_store.export_data()
        
        if store_type in ["graph", "all"]:
            # 导出图谱数据
            graph_store = orchestrator.rag_controller.graph_store
            if hasattr(graph_store, 'export_data'):
                export_data["graph_data"] = await graph_store.export_data()
        
        export_id = f"export_{datetime.now().timestamp()}"
        
        return {
            "export_id": export_id,
            "format": export_format,
            "store_type": store_type,
            "data": export_data,
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Data export failed: {str(e)}"
        )


@router.post("/validate")
async def validate_data_integrity(
    deps: Dict[str, Any] = Depends(common_dependencies)
):
    """验证数据完整性"""
    try:
        orchestrator = agent_manager.get_orchestrator()
        
        validation_results = {
            "vector_store": {"status": "unknown", "issues": []},
            "graph_store": {"status": "unknown", "issues": []},
        }
        
        # 验证向量存储
        vector_store = orchestrator.rag_controller.vector_store
        if hasattr(vector_store, 'validate'):
            validation_results["vector_store"] = await vector_store.validate()
        
        # 验证图谱存储
        graph_store = orchestrator.rag_controller.graph_store
        if hasattr(graph_store, 'validate'):
            validation_results["graph_store"] = await graph_store.validate()
        
        # 计算总体状态
        all_statuses = [result["status"] for result in validation_results.values()]
        overall_status = "healthy" if all(s == "healthy" for s in all_statuses) else "issues_found"
        
        return {
            "overall_status": overall_status,
            "validation_results": validation_results,
            "timestamp": datetime.now().isoformat()
        }
        
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Data validation failed: {str(e)}"
        )


# ================================
# 辅助函数
# ================================

async def _process_batch_files(temp_files: List[str], ingestion_pipeline):
    """批量处理文件（后台任务）"""
    try:
        for temp_file_path in temp_files:
            await ingestion_pipeline.process_file(temp_file_path)
            # 处理完成后删除临时文件
            os.unlink(temp_file_path)
        
        print(f"Batch processing completed for {len(temp_files)} files")
        
    except Exception as e:
        # 清理临时文件
        for temp_file_path in temp_files:
            try:
                os.unlink(temp_file_path)
            except:
                pass
        print(f"Batch processing failed: {str(e)}")