import uuid
from typing import List, Optional, Dict
import threading
import chromadb
from models.document import Document, DocumentSegment, Knowledge, ProcessStatus
from models.task import Task, TaskStatus
from services.minio_service import MinioService
from services.embedding_service import EmbeddingService
from services.text_splitter import TextSplitter, SplitConfig
from config import CHROMA_CONFIG
from minio.error import MinioException
import pandas as pd
import json
import io
from pathlib import Path
from chromadb import PersistentClient
from services.chroma_service import ChromaService
from datetime import datetime
from models import db
from flask import current_app
from chromadb.config import Settings

class DocumentService:
    def __init__(self):
        self.minio_service = MinioService()
        self.embedding_service = EmbeddingService()
        self.chroma_client = PersistentClient(path=str(Path(CHROMA_CONFIG['path']).resolve()), settings=Settings(anonymized_telemetry=False))
        self.chroma_service = ChromaService()

    def _get_collection(self, collection_name: str):
        """获取或创建指定名称的向量集合"""
        return self.chroma_client.get_or_create_collection(name=collection_name)

    def process_document(self, file_content: bytes, filename: str, knowledge_id: int,
                        split_config: Optional[SplitConfig] = None,
                        document_columns: List[str] = None,
                        metadata_columns: List[str] = None) -> dict:
        """处理上传的文档，立即返回task_id，异步处理向量化"""
        try:
            # 获取知识库信息
            knowledge = Knowledge.query.get_or_404(knowledge_id)
            # 保存knowledge的id，而不是对象
            knowledge_id = knowledge.id
            collection_name = knowledge.collection_name
            embedding_model = knowledge.embedding_model
            
            # 创建文档记录
            document = Document(
                knowledge_id=knowledge_id,
                filename=filename,
                minio_path=f"{uuid.uuid4()}/{filename}",
                status=ProcessStatus.PROCESSING
            )
            db.session.add(document)
            db.session.flush()  # 获取document.id
            document_id = document.id  # 保存document的id
            
            # 创建任务记录
            task_id = str(uuid.uuid4())
            task = Task(
                id=task_id,
                type='document_processing',
                status=TaskStatus.PENDING,
                document_id=document_id
            )
            db.session.add(task)
            db.session.commit()
            
            # 保存文件到MinIO
            self.minio_service.upload_file(
                document.minio_path,
                io.BytesIO(file_content),
                len(file_content)
            )
            
            # 修改这里，传入应用上下文
            app = current_app._get_current_object()
            
            # 启动异步处理，传递ID而不是对象
            threading.Thread(target=self._async_process_document, args=(
                app,
                task_id,
                file_content,
                document_id,
                knowledge_id,
                collection_name,
                embedding_model,
                split_config,
                document_columns,
                metadata_columns
            )).start()
            
            return {
                "success": True,
                "document_id": document_id,
                "task_id": task_id
            }
            
        except Exception as e:
            db.session.rollback()
            raise Exception(f"处理文档失败: {str(e)}")

    def _async_process_document(self, app, task_id: str, file_content: bytes,
                              document_id: int, knowledge_id: int, collection_name: str,
                              embedding_model: str, split_config: Optional[SplitConfig] = None,
                              document_columns: List[str] = None,
                              metadata_columns: List[str] = None):
        """异步处理文档内容"""
        with app.app_context():
            try:
                # 在新线程中重新获取对象
                task = Task.query.get(task_id)
                document = Document.query.get(document_id)
                knowledge = Knowledge.query.get(knowledge_id)

                if not all([task, document, knowledge]):
                    print(f"One or more objects not found: task={task_id}, document={document_id}, knowledge={knowledge_id}")
                    return

                task.status = TaskStatus.PROCESSING
                db.session.commit()

                if document.filename.lower().endswith(('.csv', '.xlsx', '.xls')):
                    result = self._process_structured_document(
                        task_id=task_id,
                        file_content=file_content,
                        document=document,
                        knowledge=knowledge,
                        document_columns=document_columns,
                        metadata_columns=metadata_columns
                    )
                else:
                    result = self._process_unstructured_document(
                        task_id=task_id,
                        file_content=file_content,
                        document=document,
                        knowledge=knowledge,
                        split_config=split_config
                    )

                # 更新状态为完成
                document.status = ProcessStatus.COMPLETED
                task.status = TaskStatus.COMPLETED
                db.session.commit()

            except Exception as e:
                task.status = TaskStatus.FAILED
                task.error_message = str(e)
                document.status = ProcessStatus.FAILED
                db.session.commit()
                print(f"异步处理文档失败: {str(e)}")

    def _process_structured_document(self, task_id: str, file_content: bytes, document: Document,
                                   knowledge: Knowledge, document_columns: List[str] = None,
                                   metadata_columns: List[str] = None) -> dict:
        """处理结构化文档，支持进度追踪"""
        try:
            # 读取结构化数据
            file_obj = io.BytesIO(file_content)
            if document.filename.lower().endswith('.csv'):
                df = pd.read_csv(file_obj)
            else:
                df = pd.read_excel(file_obj)
            
            # 更新任务总数
            task = Task.query.get(task_id)
            task.total_items = len(df)
            db.session.commit()
            
            vector_collection = self._get_collection(knowledge.collection_name)
            
            # 处理每一行数据
            for idx, row in df.iterrows():
                # 构建文档内容
                if document_columns:
                    doc_content = {col: str(row[col]) if not isinstance(row[col], (int, float, bool)) else row[col] 
                                 for col in document_columns if col in row}
                else:
                    doc_content = row.to_dict()
                
                # 构建元数据
                metadata = {
                    "document_id": str(document.id),
                    "segment_index": int(idx)
                }
                
                # 添加选定的元数据列
                if metadata_columns:
                    for col in metadata_columns:
                        if col in row:
                            val = row[col]
                            metadata[col] = str(val) if not isinstance(val, (int, float, bool)) else val
                
                # 转换为JSON字符串
                doc_content_str = json.dumps(doc_content, ensure_ascii=False)
                
                # 生成向量
                embedding = self.embedding_service.get_embeddings(
                    doc_content_str, 
                    model_name=knowledge.embedding_model
                )
                
                # 保存到向量库
                segment_id = f"{document.id}_{idx}"
                vector_collection.add(
                    ids=[segment_id],
                    embeddings=[embedding],
                    metadatas=[metadata],
                    documents=[doc_content_str]
                )
                
                # # 保存到MySQL
                # segment = DocumentSegment(
                #     document_id=document.id,
                #     segment_index=idx,
                #     text=doc_content_str,
                #     chroma_id=segment_id
                # )
                # db.session.add(segment)
                
                # 更新处理进度
                task.processed_items = idx + 1
                db.session.commit()
            
            return {"success": True, "segments_count": len(df)}
            
        except Exception as e:
            raise Exception(f"处理结构化文档失败: {str(e)}")

    def _process_unstructured_document(self, task_id: str, file_content: bytes,
                                     document: Document, knowledge: Knowledge,
                                     split_config: Optional[SplitConfig] = None) -> dict:
        """处理非结构化文档，支持进度追踪"""
        try:
            # 确保获取最新的数据库对象
            task = Task.query.get(task_id)
            document = Document.query.get(document.id)
            knowledge = Knowledge.query.get(knowledge.id)

            if not all([task, document, knowledge]):
                raise Exception("无法获取必要的数据库对象")

            # 根据文件扩展名判断文件类型
            file_type = 'pdf' if document.filename.lower().endswith('.pdf') else 'text'
            
            # 使用TextSplitter处理文件
            splitter = TextSplitter(split_config or SplitConfig())
            segments = splitter.process_file(file_content, file_type)

            print(f"Split into {len(segments)} segments")
            
            if not segments:
                # 如果是文本文件,作为后备方案尝试直接解码
                if file_type == 'text':
                    try:
                        content = file_content.decode('utf-8')
                        segments = [content]
                    except UnicodeDecodeError:
                        content = file_content.decode('latin1')
                        segments = [content]
                else:
                    raise Exception("No text content extracted")

            # 获取向量集合
            vector_collection = self._get_collection(knowledge.collection_name)
            
            # 更新任务总数
            task = Task.query.get(task_id)
            task.total_items = len(segments)
            db.session.commit()
            
            # 处理每个文本片段
            for idx, text in enumerate(segments):
                # 生成向量
                embedding = self.embedding_service.get_embeddings(
                    text,
                    model_name=knowledge.embedding_model
                )
                
                # 生成唯一ID
                segment_id = f"{document.id}_{idx}"
                
                # 保存到向量库
                vector_collection.add(
                    ids=[segment_id],
                    embeddings=[embedding],
                    metadatas=[{
                        "document_id": str(document.id),
                        "segment_index": idx
                    }],
                    documents=[text]
                )
                
                # # 保存到MySQL
                # segment = DocumentSegment(
                #     document_id=document.id,
                #     segment_index=idx,
                #     text=text,
                #     chroma_id=segment_id
                # )
                # db.session.add(segment)
                
                # 更新处理进度
                task.processed_items = idx + 1
                db.session.commit()
            
            return {"success": True, "segments_count": len(segments)}
            
        except Exception as e:
            raise Exception(f"处理非结构化文档失败: {str(e)}")

    def delete_document(self, document_id: int) -> dict:
        """删除文档及其相关数据"""
        try:
            document = Document.query.get_or_404(document_id)
            
            # 删除MinIO中的文件
            try:
                self.minio_service.delete_file(document.minio_path)
            except Exception as e:
                print(f"Warning: Failed to delete file from MinIO: {str(e)}")

            # 删除向量库中的记录
            knowledge = document.knowledge
            # segments = DocumentSegment.query.filter_by(document_id=document_id).all()
            # segment_ids = [segment.chroma_id for segment in segments]
            
            # if segment_ids:
            #     try:
            #         self.chroma_service.batch_delete_records(
            #             collection_name=knowledge.collection_name,
            #             ids=segment_ids
            #         )
            #     except Exception as e:
            #         print(f"Warning: Failed to delete vectors: {str(e)}")
            
            self.chroma_service.delete_records_by_document_id(
                collection_name=knowledge.collection_name,
                document_id=f"{document_id}"
            )

            # 删除关联的任务记录
            tasks = Task.query.filter_by(document_id=document_id).all()
            for task in tasks:
                db.session.delete(task)

            # 删除数据库记录
            db.session.delete(document)
            db.session.commit()

            return {"success": True, "message": "文档删除成功"}
        except Exception as e:
            db.session.rollback()
            raise Exception(f"删除文档失败: {str(e)}")

    def get_documents(self, knowledge_id: int) -> list:
        """获取知识库下的所有文档"""
        try:
            documents = Document.query.filter_by(knowledge_id=knowledge_id).all()
            return [{
                'id': doc.id,
                'filename': doc.filename or '',
                'knowledge_name': doc.knowledge.name if doc.knowledge else '',
                'status': doc.status.value if doc.status else 'unknown',
                'created_at': doc.created_at.isoformat() if doc.created_at else None
            } for doc in documents]
        except Exception as e:
            raise Exception(f"获取文档列表失败: {str(e)}")

    def get_collections(self, limit: int = 20) -> List[str]:
        """获取所有可用的collection
        
        Args:
            limit: 返回的最大collection数量，默认20
            
        Returns:
            List[str]: collection名称列表
        """
        try:
            collections = self.chroma_client.list_collections()
            # 返回限定数量的collection名称列表
            return collections[:limit]
        except Exception as e:
            raise Exception(f"Failed to get collections: {str(e)}")
        
    def download_document(self, document_id):
        """
        根据 document_id 下载文件：
        1. 获取文档记录，提取 storage_path
        2. 调用 MinioService.download_file(storage_path)
        3. 返回 (file_obj, filename, mimetype)
        """
        doc = Document.query.get(document_id)
        if not doc:
            raise Exception("文档未找到")
        storage_path = getattr(doc, 'minio_path', None)
        print(storage_path)
        if not storage_path:
            raise Exception("文档存储路径未找到")
        minio_service = MinioService()
        ret = minio_service.download_file(storage_path)
        if not ret:
            raise Exception("Minio下载文件失败")
        return ret

    def get_task_status(self, task_id: str) -> dict:
        """获取任务状态"""
        task = Task.query.get_or_404(task_id)
        return {
            "status": task.status,
            "progress": task.progress,
            "total_items": task.total_items,
            "processed_items": task.processed_items,
            "error_message": task.error_message,
            "created_at": task.created_at.isoformat(),
            "updated_at": task.updated_at.isoformat()
        }

    def create_empty_document(self, knowledge_id: int) -> Dict:
        """创建空文档"""
        try:
            # 获取知识库信息
            knowledge = Knowledge.query.get_or_404(knowledge_id)
            
            # 创建新文档记录
            document = Document(
                filename='空文档',
                knowledge_id=knowledge_id,
                status=ProcessStatus.COMPLETED
            )
            
            db.session.add(document)
            db.session.commit()
            
            return {
                'id': document.id,
                'filename': document.filename,
                'knowledge_name': knowledge.name,
                'status': document.status.value,
                'created_at': document.created_at.isoformat() if document.created_at else None
            }
            
        except Exception as e:
            db.session.rollback()
            print(f"创建空文档失败: {str(e)}")  # 添加错误日志
            raise ValueError(f"创建空文档失败: {str(e)}")