"""数据存储服务"""
import json
import aiofiles
from typing import Optional, List, Dict, Any
from pathlib import Path
from datetime import datetime
from fastapi import UploadFile
from app.core.config import settings


class StorageService:
    """数据存储服务"""
    
    def __init__(self):
        """初始化存储服务"""
        self.vector_store_path = settings.VECTOR_STORE_PATH
        self.uploads_dir = settings.UPLOADS_DIR
    
    async def save_uploaded_file(self, file: UploadFile, document_id: str) -> str:
        """
        保存用户上传的原始文件
        
        Args:
            file: 上传的文件
            document_id: 文档ID
            
        Returns:
            相对于项目根目录的相对路径
        """
        file_path = self.uploads_dir / f"{document_id}.txt"
        
        # 异步写入文件
        content = await file.read()
        async with aiofiles.open(file_path, "wb") as f:
            await f.write(content)
        
        # 返回相对于项目根目录的相对路径
        relative_path = file_path.relative_to(settings.BASE_DIR)
        return str(relative_path).replace("\\", "/")
    
    async def load_vector_store(self) -> Dict[str, Any]:
        """
        加载整个向量存储文件
        
        Returns:
            向量存储数据
        """
        async with aiofiles.open(self.vector_store_path, "r", encoding="utf-8") as f:
            content = await f.read()
            return json.loads(content)
    
    async def update_vector_store(self, data: Dict[str, Any]) -> None:
        """
        更新整个向量存储文件
        
        Args:
            data: 向量存储数据
        """
        async with aiofiles.open(self.vector_store_path, "w", encoding="utf-8") as f:
            await f.write(json.dumps(data, ensure_ascii=False, indent=2))
    
    async def add_document(self, document_data: Dict[str, Any]) -> None:
        """
        将新文档数据添加到 vector_store.json（扁平化结构）
        
        Args:
            document_data: 文档数据，包含 chunks 和 document_metadata
        """
        # 加载现有数据
        store = await self.load_vector_store()
        
        # 提取文本块和文档元数据
        chunks = document_data.get("chunks", [])
        document_metadata = document_data.get("document_metadata", {})
        
        # 将文本块添加到扁平数组
        if "chunks" not in store:
            store["chunks"] = []
        store["chunks"].extend(chunks)
        
        # 添加文档元数据
        if "documents" not in store:
            store["documents"] = []
        store["documents"].append(document_metadata)
        
        # 更新元数据
        store["metadata"]["total_documents"] = len(store["documents"])
        store["metadata"]["total_chunks"] = len(store["chunks"])
        store["metadata"]["last_updated"] = datetime.now().isoformat()
        
        # 保存更新后的数据
        await self.update_vector_store(store)
    
    async def load_document(self, document_id: str) -> Optional[Dict[str, Any]]:
        """
        加载指定文档的数据（包含文档元数据）
        
        Args:
            document_id: 文档ID
            
        Returns:
            文档数据，如果不存在则返回 None
        """
        return await self.get_document_metadata(document_id)
    
    async def list_documents(self) -> List[Dict[str, Any]]:
        """
        列出所有文档的元数据
        
        Returns:
            文档元数据列表
        """
        store = await self.load_vector_store()
        
        documents = []
        for doc in store.get("documents", []):
            documents.append({
                "document_id": doc["document_id"],
                "filename": doc["filename"],
                "upload_time": doc["upload_time"],
                "chunks_count": doc["chunks_count"]
            })
        
        return documents
    
    async def delete_document(self, document_id: str) -> bool:
        """
        从 vector_store.json 中删除文档数据并删除原始文件（扁平化结构）
        
        Args:
            document_id: 文档ID
            
        Returns:
            是否删除成功
        """
        # 加载现有数据
        store = await self.load_vector_store()
        
        # 查找并删除文档元数据
        original_count = len(store.get("documents", []))
        store["documents"] = [
            doc for doc in store.get("documents", [])
            if doc["document_id"] != document_id
        ]
        
        # 检查是否找到并删除了文档
        if len(store["documents"]) == original_count:
            return False
        
        # 删除该文档的所有文本块
        store["chunks"] = [
            chunk for chunk in store.get("chunks", [])
            if chunk["document_id"] != document_id
        ]
        
        # 更新元数据
        store["metadata"]["total_documents"] = len(store["documents"])
        store["metadata"]["total_chunks"] = len(store["chunks"])
        store["metadata"]["last_updated"] = datetime.now().isoformat()
        
        # 保存更新后的数据
        await self.update_vector_store(store)
        
        # 删除原始文件
        file_path = self.uploads_dir / f"{document_id}.txt"
        if file_path.exists():
            file_path.unlink()
        
        return True
    
    async def get_all_chunks(self) -> List[Dict[str, Any]]:
        """
        获取所有文本块（扁平化结构）
        
        Returns:
            文本块列表
        """
        store = await self.load_vector_store()
        return store.get("chunks", [])
    
    async def get_document_metadata(self, document_id: str) -> Optional[Dict[str, Any]]:
        """
        获取文档元数据
        
        Args:
            document_id: 文档ID
            
        Returns:
            文档元数据，如果不存在则返回 None
        """
        store = await self.load_vector_store()
        
        for doc in store.get("documents", []):
            if doc["document_id"] == document_id:
                return doc
        
        return None

