"""
文件管理模块 - 文件上传、存储和元数据管理
"""
import os
import shutil
import uuid
import logging
from datetime import datetime
from typing import List, Dict, Any, Optional
from pathlib import Path
import json

from ..config.settings import FILES_DIR
from ..config.database import db_manager, FileMetadata, VectorChunk
from ..utils.file_handlers import FileHandlerFactory
from ..utils.text_processor import document_processor
from ..core.vectorizer import batch_vectorizer
from ..core.milvus_client import milvus_client

logger = logging.getLogger(__name__)

class FileManager:
    """文件管理器"""
    
    def __init__(self):
        self.files_dir = FILES_DIR
        self.files_dir.mkdir(parents=True, exist_ok=True)
    
    def generate_file_id(self) -> str:
        """生成文件唯一ID"""
        return str(uuid.uuid4())
    
    def save_uploaded_file(self, uploaded_file, filename: str = None) -> Dict[str, Any]:
        """
        保存上传的文件
        
        Args:
            uploaded_file: 上传的文件对象
            filename: 文件名
            
        Returns:
            文件信息
        """
        try:
            # 生成文件ID和文件名
            file_id = self.generate_file_id()
            original_filename = filename or uploaded_file.filename
            
            # 获取文件扩展名
            file_ext = Path(original_filename).suffix.lower()
            
            # 生成新的文件名
            new_filename = f"{file_id}{file_ext}"
            file_path = self.files_dir / new_filename
            
            # 保存文件
            with open(file_path, "wb") as buffer:
                shutil.copyfileobj(uploaded_file.file, buffer)
            
            # 获取文件信息
            file_size = os.path.getsize(file_path)
            
            file_info = {
                "file_id": file_id,
                "original_filename": original_filename,
                "filename": new_filename,
                "file_path": str(file_path),
                "file_size": file_size,
                "file_type": file_ext,
                "upload_time": datetime.now()
            }
            
            logger.info(f"文件保存成功: {original_filename} -> {file_path}")
            return file_info
            
        except Exception as e:
            logger.error(f"文件保存失败: {e}")
            raise
    
    def process_file(self, file_info: Dict[str, Any]) -> Dict[str, Any]:
        """
        处理文件（文本提取、分块、向量化）
        
        Args:
            file_info: 文件信息
            
        Returns:
            处理结果
        """
        try:
            file_path = file_info["file_path"]
            
            # 1. 提取文件文本和元数据
            logger.info(f"开始处理文件: {file_info['original_filename']}")
            
            # 获取文件元数据
            file_metadata = FileHandlerFactory.get_metadata(file_path)
            logger.debug(f"文件元数据: {file_metadata}")
            
            # 2. 文档处理（文本分块）
            document_result = document_processor.process_file(file_path, file_metadata)
            logger.info(f"文档处理结果: success={document_result['success']}, chunks={len(document_result.get('chunks', []))}")
            
            if not document_result["success"]:
                error_msg = document_result.get("error", "文档处理失败")
                logger.error(f"文档处理失败: {error_msg}")
                return {
                    "success": False,
                    "error": error_msg,
                    "file_info": file_info
                }
            
            # 记录分块详情
            chunks = document_result.get('chunks', [])
            logger.debug(f"分块详情: 共{len(chunks)}个分块")
            for i, chunk in enumerate(chunks):
                logger.debug(f"分块 {i}: ID={chunk['chunk_id']}, 内容长度={chunk['content_length']}")
            
            # 3. 确保chunk_id唯一性
            self._ensure_unique_chunk_ids(document_result, file_info["file_id"])
            
            # 4. 向量化处理
            vectorized_result = batch_vectorizer.process_document_chunks(document_result)
            logger.info(f"向量化处理结果: success={vectorized_result['success']}, vectors={vectorized_result.get('vector_count', 0)}")
            
            if not vectorized_result["success"]:
                error_msg = vectorized_result.get("error", "向量化失败")
                logger.error(f"向量化失败: {error_msg}")
                return {
                    "success": False,
                    "error": error_msg,
                    "file_info": file_info
                }
            
            # 记录向量化详情
            vectorized_chunks = vectorized_result.get('chunks', [])
            logger.debug(f"向量化详情: 共{len(vectorized_chunks)}个向量化分块")
            for i, chunk in enumerate(vectorized_chunks):
                has_embedding = 'embedding' in chunk and chunk['embedding'] is not None
                logger.debug(f"向量化分块 {i}: ID={chunk['chunk_id']}, 有向量={has_embedding}, 向量维度={len(chunk.get('embedding', [])) if has_embedding else 0}")
            
            # 4. 保存到数据库
            db_result = self._save_to_database(file_info, file_metadata, vectorized_result)
            logger.info(f"数据库保存结果: success={db_result['success']}")
            
            if not db_result["success"]:
                error_msg = db_result.get("error", "数据库保存失败")
                logger.error(f"数据库保存失败: {error_msg}")
                return {
                    "success": False,
                    "error": error_msg,
                    "file_info": file_info
                }
            
            # 5. 检查Milvus是否可用，如果可用则保存到Milvus
            try:
                milvus_stats = milvus_client.get_collection_stats()
                milvus_available = milvus_stats is not None
                logger.debug(f"Milvus可用性检查: {milvus_available}")
            except Exception as e:
                logger.warning(f"Milvus连接检查失败: {e}")
                milvus_available = False
            
            if milvus_available:
                logger.info("开始保存到Milvus")
                milvus_result = self._save_to_milvus(file_info, vectorized_result)
                logger.info(f"Milvus保存结果: success={milvus_result['success']}")
                
                if not milvus_result["success"]:
                    error_msg = milvus_result.get("error", "Milvus保存失败")
                    logger.warning(f"Milvus保存失败，但文件已成功保存到数据库: {error_msg}")
            else:
                logger.info("Milvus不可用，跳过向量库保存")
            
            # 6. 更新文件状态
            self._update_file_status(file_info["file_id"], "completed")
            
            result = {
                "success": True,
                "file_info": file_info,
                "processing_info": {
                    "chunk_count": len(vectorized_result["chunks"]),
                    "vector_count": vectorized_result["vector_count"],
                    "embedding_model": vectorized_result["embedding_model"],
                    "embedding_dimension": vectorized_result["embedding_dimension"]
                }
            }
            
            logger.info(f"文件处理完成: {file_info['original_filename']}")
            return result
            
        except Exception as e:
            logger.error(f"文件处理失败: {e}")
            return {
                "success": False,
                "error": str(e),
                "file_info": file_info
            }
    
    def _save_to_database(self, file_info: Dict[str, Any], file_metadata: Dict[str, Any], 
                         vectorized_result: Dict[str, Any]) -> Dict[str, Any]:
        """保存到数据库"""
        try:
            session = db_manager.get_session()
            
            # 1. 保存文件元数据
            file_record = FileMetadata(
                file_id=file_info["file_id"],
                filename=file_info["original_filename"],
                file_path=file_info["file_path"],
                file_size=file_info["file_size"],
                file_type=file_info["file_type"],
                mime_type=file_metadata.get("mime_type", ""),
                is_processed=True,
                is_vectorized=True,
                chunk_count=len(vectorized_result["chunks"]),
                vector_count=vectorized_result["vector_count"],
                title=file_metadata.get("title", ""),
                author=file_metadata.get("author", ""),
                created_time=file_metadata.get("created_time"),
                modified_time=file_metadata.get("modified_time"),
                upload_time=file_info["upload_time"],
                process_time=datetime.now(),
                status="completed",
                tags=json.dumps(file_metadata.get("tags", []), ensure_ascii=False),
                category=file_metadata.get("category", "")
            )
            
            session.add(file_record)
            
            # 2. 保存向量块
            for chunk in vectorized_result["chunks"]:
                chunk_record = VectorChunk(
                    chunk_id=chunk["chunk_id"],
                    file_id=file_info["file_id"],
                    chunk_index=chunk["chunk_index"],
                    content=chunk["content"],
                    content_length=chunk["content_length"],
                    vector_id=str(chunk.get("vector_id", "")),
                    embedding_model=chunk.get("embedding_model", ""),
                    page_number=chunk.get("page_number"),
                    start_position=chunk.get("start_position", 0),
                    end_position=chunk.get("end_position", 0),
                    chunk_type=chunk.get("metadata", {}).get("chunk_type", "text"),
                    language=chunk.get("metadata", {}).get("language", "")
                )
                
                session.add(chunk_record)
            
            session.commit()
            session.close()
            
            return {"success": True}
            
        except Exception as e:
            logger.error(f"数据库保存失败: {e}")
            if session:
                session.rollback()
                session.close()
            return {"success": False, "error": str(e)}
    
    def _save_to_milvus(self, file_info: Dict[str, Any], vectorized_result: Dict[str, Any]) -> Dict[str, Any]:
        """保存向量数据到Milvus"""
        try:
            logger.info("开始保存向量数据到Milvus")
            
            # 准备向量数据
            vectors_data = []
            chunks = vectorized_result.get("chunks", [])
            
            logger.debug(f"准备保存 {len(chunks)} 个向量块到Milvus")
            
            for chunk in chunks:
                vector_data = {
                    "embedding": chunk.get("embedding"),
                    "content": chunk.get("content", ""),
                    "file_id": file_info["file_id"],
                    "chunk_id": chunk.get("chunk_id", ""),
                    "chunk_index": chunk.get("chunk_index", 0),
                    "file_type": file_info.get("file_type", ""),
                    "filename": file_info.get("original_filename", ""),
                    "metadata": chunk.get("metadata", {})
                }
                vectors_data.append(vector_data)
            
            if not vectors_data:
                logger.warning("没有向量数据需要保存到Milvus")
                return {
                    "success": False,
                    "error": "没有向量数据需要保存"
                }
            
            # 保存到Milvus
            logger.debug(f"调用Milvus客户端插入 {len(vectors_data)} 条向量数据")
            inserted_ids = milvus_client.insert_vectors(vectors_data)
            
            if inserted_ids:
                logger.info(f"成功保存 {len(inserted_ids)} 条向量数据到Milvus")
                # 刷新集合以确保数据可检索
                collection = milvus_client.get_collection()
                if collection:
                    collection.flush()
                    logger.debug("Milvus集合已刷新")
                
                return {
                    "success": True,
                    "inserted_count": len(inserted_ids)
                }
            else:
                error_msg = "向量插入失败，未返回插入ID"
                logger.error(error_msg)
                return {
                    "success": False,
                    "error": error_msg
                }
                
        except Exception as e:
            error_msg = f"保存向量数据到Milvus失败: {e}"
            logger.error(error_msg)
            logger.exception(e)
            return {
                "success": False,
                "error": error_msg
            }
    
    def _ensure_unique_chunk_ids(self, document_result: Dict[str, Any], file_id: str):
        """确保chunk_id的唯一性"""
        try:
            chunks = document_result.get("chunks", [])
            for i, chunk in enumerate(chunks):
                # 使用文件ID和索引创建唯一的chunk_id
                chunk["chunk_id"] = f"{file_id}_chunk_{i:06d}"
        except Exception as e:
            logger.error(f"确保chunk_id唯一性失败: {e}")
    
    def _update_file_status(self, file_id: str, status: str, error_message: str = None):
        """更新文件状态"""
        try:
            session = db_manager.get_session()
            
            file_record = session.query(FileMetadata).filter(FileMetadata.file_id == file_id).first()
            if file_record:
                file_record.status = status
                if error_message:
                    file_record.error_message = error_message
                if status == "completed":
                    file_record.process_time = datetime.now()
                
                session.commit()
            
            session.close()
            
        except Exception as e:
            logger.error(f"更新文件状态失败: {e}")
            if session:
                session.rollback()
                session.close()
    
    def get_file_list(self, page: int = 1, page_size: int = 20, 
                     status: str = None, file_type: str = None) -> Dict[str, Any]:
        """
        获取文件列表
        
        Args:
            page: 页码
            page_size: 每页大小
            status: 状态过滤
            file_type: 文件类型过滤
            
        Returns:
            文件列表和分页信息
        """
        try:
            session = db_manager.get_session()
            
            # 构建查询
            query = session.query(FileMetadata)
            
            if status:
                query = query.filter(FileMetadata.status == status)
            
            if file_type:
                query = query.filter(FileMetadata.file_type == file_type)
            
            # 计算总数
            total = query.count()
            
            # 分页
            offset = (page - 1) * page_size
            files = query.order_by(FileMetadata.upload_time.desc()).offset(offset).limit(page_size).all()
            
            # 转换为字典
            file_list = []
            for file in files:
                file_dict = {
                    "file_id": file.file_id,
                    "filename": file.filename,
                    "file_size": file.file_size,
                    "file_type": file.file_type,
                    "status": file.status,
                    "chunk_count": file.chunk_count,
                    "vector_count": file.vector_count,
                    "upload_time": file.upload_time.isoformat() if file.upload_time else None,
                    "process_time": file.process_time.isoformat() if file.process_time else None,
                    "title": file.title,
                    "author": file.author
                }
                file_list.append(file_dict)
            
            session.close()
            
            return {
                "files": file_list,
                "pagination": {
                    "page": page,
                    "page_size": page_size,
                    "total": total,
                    "total_pages": (total + page_size - 1) // page_size
                }
            }
            
        except Exception as e:
            logger.error(f"获取文件列表失败: {e}")
            if session:
                session.close()
            return {"files": [], "pagination": {"page": page, "page_size": page_size, "total": 0, "total_pages": 0}}
    
    def get_file_detail(self, file_id: str) -> Dict[str, Any]:
        """
        获取文件详情
        
        Args:
            file_id: 文件ID
            
        Returns:
            文件详情
        """
        try:
            session = db_manager.get_session()
            
            # 获取文件信息
            file_record = session.query(FileMetadata).filter(FileMetadata.file_id == file_id).first()
            
            if not file_record:
                return {"success": False, "error": "文件不存在"}
            
            # 获取向量块信息
            chunks = session.query(VectorChunk).filter(VectorChunk.file_id == file_id).order_by(VectorChunk.chunk_index).all()
            
            file_detail = {
                "file_id": file_record.file_id,
                "filename": file_record.filename,
                "file_path": file_record.file_path,
                "file_size": file_record.file_size,
                "file_type": file_record.file_type,
                "mime_type": file_record.mime_type,
                "status": file_record.status,
                "chunk_count": file_record.chunk_count,
                "vector_count": file_record.vector_count,
                "title": file_record.title,
                "author": file_record.author,
                "created_time": file_record.created_time.isoformat() if file_record.created_time else None,
                "modified_time": file_record.modified_time.isoformat() if file_record.modified_time else None,
                "upload_time": file_record.upload_time.isoformat() if file_record.upload_time else None,
                "process_time": file_record.process_time.isoformat() if file_record.process_time else None,
                "tags": json.loads(file_record.tags) if file_record.tags else [],
                "category": file_record.category,
                "error_message": file_record.error_message,
                "chunks": [
                    {
                        "chunk_id": chunk.chunk_id,
                        "chunk_index": chunk.chunk_index,
                        "content": chunk.content,
                        "content_length": chunk.content_length,
                        "page_number": chunk.page_number,
                        "chunk_type": chunk.chunk_type,
                        "language": chunk.language
                    }
                    for chunk in chunks
                ]
            }
            
            session.close()
            
            return {"success": True, "file_detail": file_detail}
            
        except Exception as e:
            logger.error(f"获取文件详情失败: {e}")
            if session:
                session.close()
            return {"success": False, "error": str(e)}
    
    def delete_file(self, file_id: str) -> Dict[str, Any]:
        """
        删除文件
        
        Args:
            file_id: 文件ID
            
        Returns:
            删除结果
        """
        try:
            session = db_manager.get_session()
            
            # 获取文件信息
            file_record = session.query(FileMetadata).filter(FileMetadata.file_id == file_id).first()
            
            if not file_record:
                return {"success": False, "error": "文件不存在"}
            
            # 1. 删除物理文件
            if os.path.exists(file_record.file_path):
                os.remove(file_record.file_path)
                logger.info(f"物理文件已删除: {file_record.file_path}")
            
            # 2. 删除Milvus中的向量
            milvus_client.delete_vectors(file_id)
            
            # 3. 删除数据库记录
            session.delete(file_record)
            
            # 删除相关的向量块
            session.query(VectorChunk).filter(VectorChunk.file_id == file_id).delete()
            
            session.commit()
            session.close()
            
            logger.info(f"文件删除成功: {file_id}")
            return {"success": True}
            
        except Exception as e:
            logger.error(f"删除文件失败: {e}")
            if session:
                session.rollback()
                session.close()
            return {"success": False, "error": str(e)}
    
    def get_system_stats(self) -> Dict[str, Any]:
        """获取系统统计信息"""
        try:
            session = db_manager.get_session()
            
            # 文件统计
            total_files = session.query(FileMetadata).count()
            processed_files = session.query(FileMetadata).filter(FileMetadata.status == "completed").count()
            failed_files = session.query(FileMetadata).filter(FileMetadata.status == "failed").count()
            
            # 向量统计
            total_chunks = session.query(VectorChunk).count()
            
            # 文件类型统计
            from sqlalchemy import func
            file_types = session.query(
                FileMetadata.file_type, 
                func.count(FileMetadata.file_id).label('count')
            ).group_by(FileMetadata.file_type).all()
            
            # Milvus统计
            milvus_stats = milvus_client.get_collection_stats()
            
            session.close()
            
            return {
                "files": {
                    "total": total_files,
                    "processed": processed_files,
                    "failed": failed_files,
                    "processing_rate": processed_files / total_files if total_files > 0 else 0
                },
                "vectors": {
                    "total_chunks": total_chunks,
                    "milvus_entities": milvus_stats.get("num_entities", 0)
                },
                "file_types": [
                    {"type": ft.file_type, "count": ft.count}
                    for ft in file_types
                ],
                "milvus": milvus_stats
            }
            
        except Exception as e:
            logger.error(f"获取系统统计信息失败: {e}")
            if session:
                session.close()
            return {}

# 全局实例
file_manager = FileManager()










