import uuid
from langchain_chroma import Chroma
from langchain_community.document_loaders import WebBaseLoader, TextLoader, UnstructuredWordDocumentLoader, PyPDFLoader
from langchain_ollama import OllamaEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

import os
import json
import pandas as pd
import xml.etree.ElementTree as ET
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
# 延迟导入doctr库，避免在模块加载时就初始化
# 仅在需要时导入

from langchain_community.document_loaders import (
    UnstructuredWordDocumentLoader,
    UnstructuredPowerPointLoader,
    UnstructuredEPubLoader,
    UnstructuredMarkdownLoader,
    UnstructuredRTFLoader,
    PyPDFLoader,
    TextLoader,
    CSVLoader,
    WebBaseLoader
)

from tool.config.config import get_config

config = get_config("config.toml")

class DoctrOCRDocumentLoader:
    def __init__(self, use_gpu=False):
        # 使用 PyTorch 后端的 OCR pipeline
        try:
            from doctr.io import DocumentFile
            from doctr.models import ocr_predictor
            self.ocr_model = ocr_predictor(pretrained=True, assume_straight_pages=True)
            self.device = "cuda" if use_gpu else "cpu"
            self.ocr_model = self.ocr_model.to(self.device)
            self.doctr_available = True
        except Exception as e:
            print(f"Warning: Failed to initialize doctr OCR: {e}")
            print("Image OCR functionality will be disabled.")
            self.doctr_available = False

    def load(self, image_path: str):
        if not os.path.exists(image_path):
            raise FileNotFoundError(f"Image not found: {image_path}")
        
        if not self.doctr_available:
            print(f"Warning: Doctr OCR is not available. Cannot process image: {image_path}")
            return [Document(page_content="[无法处理图像：OCR功能不可用]")]
        
        try:
            from doctr.io import DocumentFile
            print(f"Processing image with doctr: {image_path}")

            # 加载图像
            doc = DocumentFile.from_images(image_path)

            # OCR 推理
            result = self.ocr_model(doc)

            # 提取文本（按顺序）
            blocks = result.pages[0].blocks
            lines = []
            for block in blocks:
                for line in block.lines:
                    line_text = " ".join(word.value for word in line.words)
                    lines.append(line_text)

            full_text = "\n".join(lines)

            print(f"OCR result with doctr:\n{full_text}")
            return [Document(page_content=full_text)]
        except Exception as e:
            print(f"Error processing image with doctr: {e}")
            return [Document(page_content=f"[图像处理错误：{str(e)}]")]

# 创建全局OCR加载器实例
ocr_loader = DoctrOCRDocumentLoader()

docloaders = {}

base_url = "http://"+config['LLM']['SERVER_IP']+':'+config['LLM']['SERVER_PORT']
model = config['LLM']['EMBEDDING_MODEL']
print(f"Using Ollama server at: {base_url}:{model}")
local_embeddings = OllamaEmbeddings(model=model,base_url=base_url)
def regist_docloader(user):
    if user not in docloaders:
        user_vec_db = os.path.join(config["FILE_SYSTEM"]["CHROMA_DB"], user)
        if not os.path.exists(user_vec_db):
            os.makedirs(user_vec_db)
        docloaders[user] = DocLoader(persist_directory=user_vec_db, embeddings_model=local_embeddings)

class DocLoader:
    def __init__(self, persist_directory: str, embeddings_model) -> None:
        self.persist_directory = persist_directory
        self.embeddings_model = embeddings_model
        self.textspliter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
        self.vectostore = Chroma(
            persist_directory=self.persist_directory,
            embedding_function=self.embeddings_model
        )
        self.ocr_loader = ocr_loader

        self.loader_map = {
            '.doc': self.WordLoader,
            '.docx': self.WordLoader,
            '.pdf': self.PDFLoader,
            '.txt': self.TextLoaderFunc,
            '.md': self.MarkdownLoader,
            '.epub': self.EpubLoader,
            '.csv': self.CSVLoader,
            '.ppt': self.PowerPointLoader,
            '.pptx': self.PowerPointLoader,
            '.html': self.WebLoaderFromPath,
            '.htm': self.WebLoaderFromPath,
            '.url': self.WebLoaderFromPath,
            '.json': self.JsonLoader,
            '.xml': self.XMLLoader,
            '.xls': self.ExcelLoader,
            '.xlsx': self.ExcelLoader,
            '.rtf': self.RTFLoader,
            '.jpg': self.ImageLoader,
            '.jpeg': self.ImageLoader,
            '.png': self.ImageLoader,
            '.bmp': self.ImageLoader
        }

    def add_documents_to_vectorstore(self, file: str, doctype:str = "wrongQeustion") -> None:
        ext = os.path.splitext(file)[1].lower()
        if ext in self.loader_map:
            print(f"Loading file: {file}")
            data = self.loader_map[ext](f"file://{file}" if ext in ['.html', '.htm', '.url'] else file)
        else:
            raise ValueError(f"Unsupported file format: {ext}")

        file_id = str(uuid.uuid4())
        for doc in data:
            doc.metadata['file_id'] = file_id
            doc.metadata['source'] = file
            doc.metadata['doctype'] = doctype

        all_splits = self.textspliter.split_documents(data)
        
        self.vectostore.add_documents(all_splits)
        print(f"Added {len(all_splits)} chunks to vectorstore from {file}.")

    def delete_documents_by_file(self, file: str) -> None:
        print(f"Deleting documents from: {file}")
        self.vectostore.delete(where={"source": file})

    # ====== 加载器实现 ====== #
    def WordLoader(self, file_path: str):
        return UnstructuredWordDocumentLoader(file_path).load()

    def PDFLoader(self, file_path: str):
        return PyPDFLoader(file_path).load()

    def TextLoaderFunc(self, file_path: str):
        return TextLoader(file_path, encoding="utf-8").load()

    def MarkdownLoader(self, file_path: str):
        return UnstructuredMarkdownLoader(file_path).load()

    def EpubLoader(self, file_path: str):
        return UnstructuredEPubLoader(file_path).load()

    def PowerPointLoader(self, file_path: str):
        return UnstructuredPowerPointLoader(file_path).load()

    def CSVLoader(self, file_path: str):
        return CSVLoader(file_path).load()

    def WebLoaderFromPath(self, file_url: str):
        return WebBaseLoader(file_url).load()

    def RTFLoader(self, file_path: str):
        return UnstructuredRTFLoader(file_path).load()

    def JsonLoader(self, file_path: str):
        with open(file_path, "r", encoding="utf-8") as f:
            data = json.load(f)
        documents = []
        if isinstance(data, list):
            for item in data:
                text = json.dumps(item, ensure_ascii=False, indent=2)
                documents.append(Document(page_content=text, metadata={}))
        elif isinstance(data, dict):
            text = json.dumps(data, ensure_ascii=False, indent=2)
            documents.append(Document(page_content=text, metadata={}))
        return documents

    def ExcelLoader(self, file_path: str):
        df = pd.read_excel(file_path)
        documents = []
        for _, row in df.iterrows():
            text = " ".join(str(cell) for cell in row if pd.notna(cell))
            documents.append(Document(page_content=text, metadata={}))
        return documents

    def XMLLoader(self, file_path: str):
        tree = ET.parse(file_path)
        root = tree.getroot()
        texts = []

        def recurse(node):
            if node.text and node.text.strip():
                texts.append(node.text.strip())
            for child in node:
                recurse(child)

        recurse(root)
        full_text = "\n".join(texts)
        return [Document(page_content=full_text, metadata={})]
    
    def ImageLoader(self, file_path: str):
        return self.ocr_loader.load(file_path)