"""
文件系统文档存储实现
支持本地文件存储和元数据管理
"""

import asyncio
import json
import hashlib
from typing import List, Dict, Any, Optional
from pathlib import Path
import aiofiles
from datetime import datetime
import uuid

from ...core.interfaces import DocumentStoreInterface
from ...core.events import EventEmitter
from ...core.config import config


class FileDocumentStore(DocumentStoreInterface, EventEmitter):
    """文件系统文档存储实现"""
    
    def __init__(self, base_path: str = "./data/raw"):
        super().__init__()
        self.base_path = Path(base_path)
        self.metadata_path = self.base_path / "metadata"
        self._ensure_directories()
        
    def _ensure_directories(self):
        """确保目录存在"""
        self.base_path.mkdir(parents=True, exist_ok=True)
        self.metadata_path.mkdir(parents=True, exist_ok=True)
        
    def _get_file_path(self, doc_id: str, extension: str = "") -> Path:
        """获取文件路径"""
        # 使用doc_id的哈希值作为子目录，避免单个目录文件过多
        hash_prefix = hashlib.md5(doc_id.encode()).hexdigest()[:2]
        subdir = self.base_path / hash_prefix
        subdir.mkdir(exist_ok=True)
        
        filename = doc_id + extension
        return subdir / filename
    
    def _get_metadata_path(self, doc_id: str) -> Path:
        """获取元数据文件路径"""
        hash_prefix = hashlib.md5(doc_id.encode()).hexdigest()[:2]
        subdir = self.metadata_path / hash_prefix
        subdir.mkdir(exist_ok=True)
        
        return subdir / f"{doc_id}.json"
    
    async def store_document(self, doc_id: str, content: bytes, metadata: Dict[str, Any]) -> str:
        """存储文档"""
        try:
            # 确定文件扩展名
            file_extension = metadata.get("file_extension", "")
            if file_extension and not file_extension.startswith("."):
                file_extension = "." + file_extension
            
            # 存储文档内容
            file_path = self._get_file_path(doc_id, file_extension)
            async with aiofiles.open(file_path, 'wb') as f:
                await f.write(content)
            
            # 准备完整元数据
            full_metadata = {
                "id": doc_id,
                "file_path": str(file_path),
                "file_size": len(content),
                "content_hash": hashlib.sha256(content).hexdigest(),
                "stored_at": datetime.now().isoformat(),
                "original_metadata": metadata
            }
            
            # 存储元数据
            metadata_path = self._get_metadata_path(doc_id)
            async with aiofiles.open(metadata_path, 'w', encoding='utf-8') as f:
                await f.write(json.dumps(full_metadata, indent=2, ensure_ascii=False))
            
            await self.emit("document_stored", {
                "document_id": doc_id,
                "file_path": str(file_path),
                "file_size": len(content)
            })
            
            return str(file_path)
            
        except Exception as e:
            await self.emit_error("store_document", e)
            raise
    
    async def retrieve_document(self, doc_id: str) -> Optional[bytes]:
        """检索文档内容"""
        try:
            # 获取元数据以确定文件路径
            metadata = await self.get_document_metadata(doc_id)
            if not metadata:
                return None
            
            file_path = Path(metadata["file_path"])
            if not file_path.exists():
                await self.emit("document_not_found", {
                    "document_id": doc_id,
                    "expected_path": str(file_path)
                })
                return None
            
            # 读取文件内容
            async with aiofiles.open(file_path, 'rb') as f:
                content = await f.read()
            
            # 验证内容完整性
            content_hash = hashlib.sha256(content).hexdigest()
            expected_hash = metadata.get("content_hash")
            if expected_hash and content_hash != expected_hash:
                await self.emit("document_integrity_error", {
                    "document_id": doc_id,
                    "expected_hash": expected_hash,
                    "actual_hash": content_hash
                })
                raise ValueError(f"Document integrity check failed for {doc_id}")
            
            await self.emit("document_retrieved", {
                "document_id": doc_id,
                "file_size": len(content)
            })
            
            return content
            
        except Exception as e:
            await self.emit_error("retrieve_document", e)
            return None
    
    async def get_document_metadata(self, doc_id: str) -> Optional[Dict[str, Any]]:
        """获取文档元数据"""
        try:
            metadata_path = self._get_metadata_path(doc_id)
            if not metadata_path.exists():
                return None
            
            async with aiofiles.open(metadata_path, 'r', encoding='utf-8') as f:
                content = await f.read()
                return json.loads(content)
                
        except Exception as e:
            await self.emit_error("get_document_metadata", e)
            return None
    
    async def update_document_metadata(self, doc_id: str, metadata_updates: Dict[str, Any]) -> bool:
        """更新文档元数据"""
        try:
            # 获取现有元数据
            current_metadata = await self.get_document_metadata(doc_id)
            if not current_metadata:
                return False
            
            # 更新元数据
            current_metadata["original_metadata"].update(metadata_updates)
            current_metadata["updated_at"] = datetime.now().isoformat()
            
            # 保存更新后的元数据
            metadata_path = self._get_metadata_path(doc_id)
            async with aiofiles.open(metadata_path, 'w', encoding='utf-8') as f:
                await f.write(json.dumps(current_metadata, indent=2, ensure_ascii=False))
            
            await self.emit("document_metadata_updated", {
                "document_id": doc_id,
                "updates": metadata_updates
            })
            
            return True
            
        except Exception as e:
            await self.emit_error("update_document_metadata", e)
            return False
    
    async def list_documents(self, filter_metadata: Optional[Dict[str, Any]] = None) -> List[str]:
        """列出文档"""
        try:
            document_ids = []
            
            # 遍历所有元数据文件
            for metadata_file in self.metadata_path.rglob("*.json"):
                try:
                    async with aiofiles.open(metadata_file, 'r', encoding='utf-8') as f:
                        metadata = json.loads(await f.read())
                    
                    # 应用过滤条件
                    if filter_metadata:
                        original_metadata = metadata.get("original_metadata", {})
                        if not self._matches_filter(original_metadata, filter_metadata):
                            continue
                    
                    document_ids.append(metadata["id"])
                    
                except Exception:
                    # 跳过无法读取的元数据文件
                    continue
            
            await self.emit("documents_listed", {
                "count": len(document_ids),
                "filter_applied": filter_metadata is not None
            })
            
            return document_ids
            
        except Exception as e:
            await self.emit_error("list_documents", e)
            return []
    
    def _matches_filter(self, metadata: Dict[str, Any], filter_criteria: Dict[str, Any]) -> bool:
        """检查元数据是否匹配过滤条件"""
        for key, value in filter_criteria.items():
            if key not in metadata:
                return False
            
            metadata_value = metadata[key]
            
            # 支持不同类型的匹配
            if isinstance(value, str):
                if isinstance(metadata_value, str):
                    if value.lower() not in metadata_value.lower():
                        return False
                else:
                    if str(metadata_value) != value:
                        return False
            elif isinstance(value, (int, float)):
                if metadata_value != value:
                    return False
            elif isinstance(value, list):
                if metadata_value not in value:
                    return False
            else:
                if metadata_value != value:
                    return False
        
        return True
    
    async def delete_document(self, doc_id: str) -> bool:
        """删除文档"""
        try:
            # 获取元数据
            metadata = await self.get_document_metadata(doc_id)
            if not metadata:
                return False
            
            # 删除文档文件
            file_path = Path(metadata["file_path"])
            if file_path.exists():
                file_path.unlink()
            
            # 删除元数据文件
            metadata_path = self._get_metadata_path(doc_id)
            if metadata_path.exists():
                metadata_path.unlink()
            
            await self.emit("document_deleted", {
                "document_id": doc_id,
                "file_path": str(file_path)
            })
            
            return True
            
        except Exception as e:
            await self.emit_error("delete_document", e)
            return False
    
    async def get_storage_stats(self) -> Dict[str, Any]:
        """获取存储统计信息"""
        try:
            stats = {
                "total_documents": 0,
                "total_size_bytes": 0,
                "file_types": {},
                "storage_path": str(self.base_path)
            }
            
            # 统计所有文档
            for metadata_file in self.metadata_path.rglob("*.json"):
                try:
                    async with aiofiles.open(metadata_file, 'r', encoding='utf-8') as f:
                        metadata = json.loads(await f.read())
                    
                    stats["total_documents"] += 1
                    stats["total_size_bytes"] += metadata.get("file_size", 0)
                    
                    # 统计文件类型
                    file_extension = metadata.get("original_metadata", {}).get("file_extension", "unknown")
                    stats["file_types"][file_extension] = stats["file_types"].get(file_extension, 0) + 1
                    
                except Exception:
                    continue
            
            # 计算可读的文件大小
            stats["total_size_human"] = self._format_file_size(stats["total_size_bytes"])
            
            return stats
            
        except Exception as e:
            await self.emit_error("get_storage_stats", e)
            return {}
    
    def _format_file_size(self, size_bytes: int) -> str:
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
            if size_bytes < 1024.0:
                return f"{size_bytes:.1f} {unit}"
            size_bytes /= 1024.0
        return f"{size_bytes:.1f} PB"
    
    async def cleanup_orphaned_files(self) -> Dict[str, int]:
        """清理孤立文件（有文件但没有元数据的情况）"""
        try:
            orphaned_files = 0
            orphaned_metadata = 0
            
            # 获取所有文档文件和元数据文件的ID
            file_ids = set()
            metadata_ids = set()
            
            # 扫描文档文件
            for file_path in self.base_path.rglob("*"):
                if file_path.is_file() and not file_path.name.startswith('.'):
                    doc_id = file_path.stem
                    file_ids.add(doc_id)
            
            # 扫描元数据文件
            for metadata_file in self.metadata_path.rglob("*.json"):
                doc_id = metadata_file.stem
                metadata_ids.add(doc_id)
            
            # 删除孤立的文档文件
            orphaned_file_ids = file_ids - metadata_ids
            for doc_id in orphaned_file_ids:
                try:
                    file_path = self._get_file_path(doc_id)
                    if file_path.exists():
                        file_path.unlink()
                        orphaned_files += 1
                except Exception:
                    continue
            
            # 删除孤立的元数据文件
            orphaned_metadata_ids = metadata_ids - file_ids
            for doc_id in orphaned_metadata_ids:
                try:
                    metadata_path = self._get_metadata_path(doc_id)
                    if metadata_path.exists():
                        metadata_path.unlink()
                        orphaned_metadata += 1
                except Exception:
                    continue
            
            result = {
                "orphaned_files_removed": orphaned_files,
                "orphaned_metadata_removed": orphaned_metadata
            }
            
            await self.emit("cleanup_completed", result)
            return result
            
        except Exception as e:
            await self.emit_error("cleanup_orphaned_files", e)
            return {"orphaned_files_removed": 0, "orphaned_metadata_removed": 0}