"""
数据灌入管道 - 完整的ETL流程
将原始文档转换为结构化知识
"""

import asyncio
from typing import List, Dict, Any, Optional
from pathlib import Path
import uuid
from datetime import datetime

from ...core.interfaces import Document, IngestionInterface
from ...core.events import EventEmitter
from ...core.container import container
from ...data_layer.data_manager import DataManager

from .document_parser import DocumentParserFactory
from .entity_extractor import EntityExtractor
from .embedding_generator import EmbeddingGenerator


class IngestionPipeline(IngestionInterface, EventEmitter):
    """数据灌入管道 - 协调整个ETL流程"""
    
    def __init__(self):
        super().__init__()
        self.parser_factory = DocumentParserFactory()
        self.entity_extractor = EntityExtractor()
        self.embedding_generator = EmbeddingGenerator()
        self.data_manager = None
        self._initialized = False
    
    async def initialize(self):
        """初始化管道组件"""
        if self._initialized:
            return
        
        try:
            # 初始化各个组件
            await self.entity_extractor.initialize()
            await self.embedding_generator.initialize()
            
            # 获取数据管理器
            self.data_manager = await container.resolve(DataManager)
            await self.data_manager.initialize()
            
            self._initialized = True
            
            await self.emit("ingestion_pipeline_initialized", {
                "components": ["parser", "entity_extractor", "embedding_generator", "data_manager"]
            })
            
        except Exception as e:
            await self.emit_error("ingestion_pipeline_initialization", e)
            raise
    
    async def process_document(self, document: Document) -> Document:
        """处理单个文档"""
        if not self._initialized:
            await self.initialize()
        
        try:
            await self.emit("document_processing_started", {
                "document_id": document.id,
                "content_length": len(document.content)
            })
            
            # 1. 实体提取
            entities, relationships = await self.entity_extractor.extract_entities(
                document.content, document.metadata
            )
            
            # 2. 生成嵌入
            optimized_content = await self.embedding_generator.optimize_text_for_embedding(
                document.content
            )
            embedding = await self.embedding_generator.generate_embedding(optimized_content)
            
            # 3. 更新文档
            enhanced_document = Document(
                id=document.id,
                content=document.content,
                metadata={
                    **document.metadata,
                    "entities": [
                        {
                            "id": entity.id,
                            "type": entity.type,
                            "properties": entity.properties
                        } for entity in entities
                    ],
                    "relationships": [
                        {
                            "source": rel.source,
                            "target": rel.target,
                            "type": rel.type,
                            "properties": rel.properties
                        } for rel in relationships
                    ],
                    "processed_at": datetime.now().isoformat(),
                    "embedding_dimension": len(embedding),
                    "entity_count": len(entities),
                    "relationship_count": len(relationships)
                },
                embedding=embedding
            )
            
            await self.emit("document_processing_completed", {
                "document_id": document.id,
                "entities_count": len(entities),
                "relationships_count": len(relationships),
                "embedding_dimension": len(embedding)
            })
            
            return enhanced_document
            
        except Exception as e:
            await self.emit_error("process_document", e)
            raise
    
    async def batch_process_documents(self, documents: List[Document]) -> List[Document]:
        """批量处理文档"""
        if not self._initialized:
            await self.initialize()
        
        try:
            await self.emit("batch_processing_started", {
                "document_count": len(documents)
            })
            
            processed_documents = []
            
            # 分批处理以控制内存使用
            batch_size = 10
            for i in range(0, len(documents), batch_size):
                batch = documents[i:i + batch_size]
                
                # 并行处理批次
                tasks = [self.process_document(doc) for doc in batch]
                batch_results = await asyncio.gather(*tasks, return_exceptions=True)
                
                # 收集成功的结果
                for result in batch_results:
                    if isinstance(result, Exception):
                        await self.emit_error("batch_document_processing", result)
                        continue
                    processed_documents.append(result)
                
                # 进度报告
                await self.emit("batch_processing_progress", {
                    "completed": len(processed_documents),
                    "total": len(documents),
                    "batch_number": i // batch_size + 1
                })
            
            await self.emit("batch_processing_completed", {
                "total_documents": len(documents),
                "successful_documents": len(processed_documents),
                "failed_documents": len(documents) - len(processed_documents)
            })
            
            return processed_documents
            
        except Exception as e:
            await self.emit_error("batch_process_documents", e)
            raise
    
    async def process_file(self, file_path: str, batch_size: int = 10) -> Dict[str, Any]:
        """处理文件"""
        if not self._initialized:
            await self.initialize()
        
        try:
            file_path = Path(file_path)
            if not file_path.exists():
                raise FileNotFoundError(f"File not found: {file_path}")
            
            await self.emit("file_processing_started", {
                "file_path": str(file_path),
                "file_size": file_path.stat().st_size
            })
            
            # 读取文件内容
            with open(file_path, 'rb') as f:
                raw_content = f.read()
            
            # 准备元数据
            metadata = {
                "id": str(uuid.uuid4()),
                "file_name": file_path.name,
                "file_extension": file_path.suffix,
                "file_size": len(raw_content),
                "source_path": str(file_path),
                "processed_at": datetime.now().isoformat()
            }
            
            # 解析文档
            document = await self.parser_factory.parse_document(raw_content, metadata)
            
            # 处理文档
            processed_document = await self.process_document(document)
            
            # 存储到数据层
            success = await self.data_manager.store_document_complete(
                doc_id=processed_document.id,
                content=processed_document.content,
                raw_content=raw_content,
                metadata=processed_document.metadata,
                embedding=processed_document.embedding
            )
            
            result = {
                "success": success,
                "document_id": processed_document.id,
                "content_length": len(processed_document.content),
                "entities_count": len(processed_document.metadata.get("entities", [])),
                "relationships_count": len(processed_document.metadata.get("relationships", [])),
                "file_path": str(file_path)
            }
            
            await self.emit("file_processing_completed", result)
            return result
            
        except Exception as e:
            await self.emit_error("process_file", e)
            raise
    
    async def process_directory(
        self, 
        directory_path: str, 
        file_patterns: List[str] = None,
        batch_size: int = 10
    ) -> Dict[str, Any]:
        """处理目录中的所有文件"""
        if not self._initialized:
            await self.initialize()
        
        try:
            directory_path = Path(directory_path)
            if not directory_path.exists():
                raise FileNotFoundError(f"Directory not found: {directory_path}")
            
            # 默认文件模式
            if file_patterns is None:
                file_patterns = ["*.pdf", "*.docx", "*.doc", "*.txt", "*.md"]
            
            # 收集所有匹配的文件
            all_files = []
            for pattern in file_patterns:
                all_files.extend(directory_path.glob(pattern))
            
            await self.emit("directory_processing_started", {
                "directory_path": str(directory_path),
                "file_count": len(all_files),
                "file_patterns": file_patterns
            })
            
            # 批量处理文件
            results = {
                "successful_files": [],
                "failed_files": [],
                "total_files": len(all_files)
            }
            
            for i in range(0, len(all_files), batch_size):
                batch_files = all_files[i:i + batch_size]
                
                # 并行处理文件批次
                tasks = [self.process_file(str(file_path)) for file_path in batch_files]
                batch_results = await asyncio.gather(*tasks, return_exceptions=True)
                
                # 收集结果
                for file_path, result in zip(batch_files, batch_results):
                    if isinstance(result, Exception):
                        results["failed_files"].append({
                            "file_path": str(file_path),
                            "error": str(result)
                        })
                    else:
                        results["successful_files"].append(result)
                
                # 进度报告
                await self.emit("directory_processing_progress", {
                    "completed_files": len(results["successful_files"]) + len(results["failed_files"]),
                    "total_files": len(all_files),
                    "successful_files": len(results["successful_files"]),
                    "failed_files": len(results["failed_files"])
                })
            
            await self.emit("directory_processing_completed", results)
            return results
            
        except Exception as e:
            await self.emit_error("process_directory", e)
            raise
    
    async def process_huggingface_dataset(
        self, 
        dataset_name: str = "opensporks/resumes",
        split: str = "train",
        max_samples: Optional[int] = None
    ) -> Dict[str, Any]:
        """处理HuggingFace数据集"""
        if not self._initialized:
            await self.initialize()
        
        try:
            # 动态导入datasets库
            try:
                from datasets import load_dataset
            except ImportError:
                raise ImportError("Please install datasets library: pip install datasets")
            
            await self.emit("huggingface_dataset_loading_started", {
                "dataset_name": dataset_name,
                "split": split,
                "max_samples": max_samples
            })
            
            # 加载数据集
            dataset = load_dataset(dataset_name, split=split)
            
            if max_samples:
                dataset = dataset.select(range(min(max_samples, len(dataset))))
            
            await self.emit("huggingface_dataset_loaded", {
                "dataset_size": len(dataset),
                "features": list(dataset.features.keys())
            })
            
            # 处理数据集
            documents = []
            
            for i, sample in enumerate(dataset):
                # 提取文本内容（根据数据集结构调整）
                content = ""
                if "resume" in sample:
                    content = sample["resume"]
                elif "text" in sample:
                    content = sample["text"]
                elif "content" in sample:
                    content = sample["content"]
                else:
                    # 尝试合并所有文本字段
                    text_fields = [k for k, v in sample.items() if isinstance(v, str)]
                    content = " ".join([sample[field] for field in text_fields])
                
                if not content.strip():
                    continue
                
                # 创建文档
                document = Document(
                    id=f"hf_dataset_{i}",
                    content=content,
                    metadata={
                        "source": "huggingface",
                        "dataset_name": dataset_name,
                        "split": split,
                        "index": i,
                        **{k: v for k, v in sample.items() if k not in ["resume", "text", "content"]}
                    }
                )
                documents.append(document)
            
            # 批量处理文档
            processed_documents = await self.batch_process_documents(documents)
            
            # 存储到数据层
            storage_results = []
            for doc in processed_documents:
                try:
                    success = await self.data_manager.store_document_complete(
                        doc_id=doc.id,
                        content=doc.content,
                        raw_content=doc.content.encode('utf-8'),
                        metadata=doc.metadata,
                        embedding=doc.embedding
                    )
                    storage_results.append(success)
                except Exception as e:
                    await self.emit_error("document_storage", e)
                    storage_results.append(False)
            
            result = {
                "dataset_name": dataset_name,
                "total_samples": len(dataset),
                "processed_documents": len(processed_documents),
                "successfully_stored": sum(storage_results),
                "failed_storage": len(storage_results) - sum(storage_results)
            }
            
            await self.emit("huggingface_dataset_processing_completed", result)
            return result
            
        except Exception as e:
            await self.emit_error("process_huggingface_dataset", e)
            raise
    
    async def get_pipeline_stats(self) -> Dict[str, Any]:
        """获取管道统计信息"""
        try:
            stats = {
                "initialized": self._initialized,
                "embedding_dimension": self.embedding_generator.get_embedding_dimension() if self._initialized else 0,
                "supported_formats": [
                    "PDF", "DOCX", "DOC", "XLSX", "XLS", "CSV", 
                    "TXT", "MD", "JSON", "XML", "HTML"
                ]
            }
            
            if self.data_manager:
                system_stats = await self.data_manager.get_system_stats()
                stats["data_storage"] = system_stats
            
            return stats
            
        except Exception as e:
            await self.emit_error("get_pipeline_stats", e)
            return {}
    
    async def cleanup(self):
        """清理资源"""
        try:
            if self.data_manager:
                await self.data_manager.cleanup()
            
            await self.emit("ingestion_pipeline_cleanup_completed", {})
            
        except Exception as e:
            await self.emit_error("ingestion_pipeline_cleanup", e)