import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, Dict, Any, Tuple
from langchain_core.documents import Document
from langchain_community.document_loaders import (
    TextLoader, UnstructuredMarkdownLoader, PDFPlumberLoader,
    Docx2txtLoader, UnstructuredExcelLoader
)
import comtypes  # 处理.doc文件
import os
import tempfile
import shutil
from uuid import uuid4
import logging

logger = logging.getLogger("DocumentLoaderStrategy")

# 抽象策略接口 - 新增分片处理方法
class DocumentLoaderStrategy(ABC):
    @abstractmethod
    def load(self, file_path: str) -> List[Document]:
        """加载文档并返回Document列表"""
        pass

    @abstractmethod
    def get_file_type(self) -> List[str]:
        """返回支持的文件类型（如[".txt", ".md"]）"""
        pass

    def merge_chunks(self, chunk_files: List[str], output_path: str) -> None:
        """合并分片文件（默认实现适用于二进制文件）"""
        with open(output_path, 'wb') as outfile:
            for chunk_path in chunk_files:
                with open(chunk_path, 'rb') as infile:
                    shutil.copyfileobj(infile, outfile)


# TXT/Markdown策略 - 重写文本文件分片合并
# TXT/Markdown策略 - 增强编码兼容性和错误处理
class TextMarkdownStrategy(DocumentLoaderStrategy):
    def load(self, file_path: str) -> List[Document]:
        """加载文本文件，支持多种编码格式"""
        ext = Path(file_path).suffix.lower()
        try:
            if ext == ".md":
                loader = UnstructuredMarkdownLoader(
                    file_path,
                    mode="single",
                    strategy="fast"
                )
                return loader.load()
            else:  # .txt 文件，重点处理编码问题
                # 尝试常见编码格式（解决中文编码问题）
                encodings_to_try = ["utf-8", "gbk", "gb2312", "utf-16", "latin-1"]
                for encoding in encodings_to_try:
                    try:
                        loader = TextLoader(
                            file_path,
                            encoding=encoding
                        )
                        docs = loader.load()
                        logger.info(f"成功加载TXT文件（编码：{encoding}）：{file_path}")
                        return docs
                    except UnicodeDecodeError:
                        continue  # 尝试下一种编码
                    except Exception as e:
                        logger.warning(f"编码{encoding}加载失败：{str(e)}")
                        continue

                # 如果所有编码都失败，尝试二进制模式读取并清理
                logger.warning(f"所有编码尝试失败，使用二进制模式修复：{file_path}")
                with open(file_path, 'rb') as f:
                    content = f.read()
                # 清理不可见字符和错误编码
                content = content.decode('utf-8', errors='replace').replace('\ufffd', '?')
                return [Document(
                    page_content=content,
                    metadata={"source": file_path, "encoding": "repaired"}
                )]

        except Exception as e:
            # 详细记录错误信息，便于排查
            logger.error(f"TXT/Markdown文件加载失败：{file_path}，错误详情：{str(e)}",
                         exc_info=True)  # 记录完整堆栈信息
            raise ValueError(f"Error loading {file_path}：{str(e)}")  # 传递详细错误

    def get_file_type(self) -> List[str]:
        return [".txt", ".md"]

    def merge_chunks(self, chunk_files: List[str], output_path: str) -> None:
        """文本文件合并（处理编码）"""
        with open(output_path, 'w', encoding='utf-8') as outfile:
            for chunk_path in chunk_files:
                with open(chunk_path, 'r', encoding='utf-8', errors='replace') as infile:
                    outfile.write(infile.read())


# DOCX策略
class DocxStrategy(DocumentLoaderStrategy):
    def load(self, file_path: str) -> List[Document]:
        loader = Docx2txtLoader(file_path)
        return loader.load()

    def get_file_type(self) -> List[str]:
        return [".docx"]


# DOC策略（需安装Microsoft Word）
class DocStrategy(DocumentLoaderStrategy):
    def __init__(self):
        self._check_comtypes_client()  # 直接检查client模块

    def _check_comtypes_client(self):
        """强制检查comtypes.client是否存在"""
        try:
            # 直接导入client子模块，而非通过comtypes访问
            from comtypes import client
            # 进一步验证CreateObject是否可用
            assert hasattr(client, 'CreateObject'), "comtypes.client缺少CreateObject方法"
        except ImportError:
            raise ImportError(
                "comtypes库的client模块缺失，请执行：\n"
                "pip uninstall -y comtypes && pip install comtypes==1.1.14"
            )
        except AssertionError as e:
            raise ImportError(f"comtypes.client不完整：{str(e)}，请重新安装库")

    def load(self, file_path: str) -> List[Document]:
        try:
            # 显式导入client
            from comtypes import client
            word = client.CreateObject('Word.Application')  # 直接使用导入的client
            word.Visible = False

            file_path = os.path.abspath(file_path)
            doc = word.Documents.Open(file_path)

            temp_docx_path = os.path.splitext(file_path)[0] + ".docx"
            doc.SaveAs(temp_docx_path, FileFormat=12)
            doc.Close()
            word.Quit()

            from langchain_community.document_loaders import Docx2txtLoader
            loader = Docx2txtLoader(temp_docx_path)
            docs = loader.load()

            if os.path.exists(temp_docx_path):
                os.remove(temp_docx_path)

            return docs

        except Exception as e:
            logger.error(f".doc文件处理失败：{file_path}，错误：{str(e)}")
            raise

    def get_file_type(self) -> List[str]:
        return [".doc"]


# PDF策略
class PdfStrategy(DocumentLoaderStrategy):
    def load(self, file_path: str) -> List[Document]:
        loader = PDFPlumberLoader(file_path)
        return loader.load()

    def get_file_type(self) -> List[str]:
        return [".pdf"]


# XLSX策略
class XlsxStrategy(DocumentLoaderStrategy):
    def load(self, file_path: str) -> List[Document]:
        loader = UnstructuredExcelLoader(
            file_path, mode="single", sheet_name=None  # 加载所有sheet
        )
        return loader.load()

    def get_file_type(self) -> List[str]:
        return [".xlsx"]


# JSON策略 - 重写文本合并
# JSON策略 - 改进解析逻辑，避免超长内容
class JsonStrategy(DocumentLoaderStrategy):
    def __init__(self):
        self.max_single_content_length = 150000  # 单个文档最大长度，小于Milvus的限制
        self.encoding = "utf-8"

    def fix_garbled_text(text: str) -> str:
        """
        修复因编码错误导致的中文乱码（如UTF-8被当作latin-1解码的情况）
        """
        if not text:
            return text
        try:
            # 尝试将乱码字符串还原为UTF-8
            return text.encode('latin-1').decode('utf-8')
        except (UnicodeEncodeError, UnicodeDecodeError):
            # 还原失败时尝试其他编码组合
            try:
                return text.encode('gbk', errors='replace').decode('gbk', errors='replace')
            except:
                return text  # 最终无法修复则返回原始字符串

    def load(self, file_path: str) -> List[Document]:
        def metadata_func(record: Any, metadata: Dict[str, Any]) -> Dict[str, Any]:
            if isinstance(record, dict):
                metadata["json_key"] = record.get("id", "unknown")
                metadata["content_type"] = "dict"
            elif isinstance(record, list):
                metadata["json_key"] = f"list_{len(record)}_items"
                metadata["content_type"] = "list"
            else:
                metadata["json_key"] = "primitive_value"
                metadata["content_type"] = type(record).__name__
            return metadata

        def fix_garbled_text(text: str) -> str:
            """
            修复因编码错误导致的中文乱码（如UTF-8被当作latin-1解码的情况）
            """
            if not text:
                return text
            try:
                # 尝试将乱码字符串还原为UTF-8
                return text.encode('latin-1').decode('utf-8')
            except (UnicodeEncodeError, UnicodeDecodeError):
                # 还原失败时尝试其他编码组合
                try:
                    return text.encode('gbk', errors='replace').decode('gbk', errors='replace')
                except:
                    return text  # 最终无法修复则返回原始字符串

        # 处理JSON转义字符（解决编码问题）
        def unescape_json(data):
            """处理转义字符并修复可能的乱码"""
            if isinstance(data, str):
                try:
                    # 先处理Unicode转义
                    processed = data.encode('utf-8').decode('unicode_escape')
                    # 再修复可能的乱码
                    return fix_garbled_text(processed)
                except UnicodeDecodeError:
                    processed = data.encode('utf-8', errors='replace').decode('unicode_escape', errors='replace')
                    return fix_garbled_text(processed)
                except UnicodeEncodeError:
                    return fix_garbled_text(data)
            elif isinstance(data, list):
                return [unescape_json(item) for item in data]
            elif isinstance(data, dict):
                return {k: unescape_json(v) for k, v in data.items()}
            return data

        def json_parser(file_path: str) -> List[Any]:
            with open(file_path, 'r', encoding=self.encoding) as f:
                data = json.load(f)
            # 解码转义字符
            data = unescape_json(data)

            # 标准化输出为列表
            if isinstance(data, dict):
                return [data]
            elif isinstance(data, list):
                return data
            else:
                return [data]

        # 加载原始JSON数据
        raw_data = json_parser(file_path)
        documents = []

        for i, item in enumerate(raw_data):
            # 提取内容并处理超长问题
            content = self._extract_and_split_content(item)

            # 为每个分片创建文档
            for chunk_idx, chunk_content in enumerate(content):
                doc = Document(
                    page_content=chunk_content,
                    metadata={
                        "source": file_path,
                        "item_index": i,
                        "chunk_index": chunk_idx,
                        "total_chunks": len(content),
                        "file_type": "json",
                        "content_length": len(chunk_content)
                    }
                )
                documents.append(doc)

        logger.info(f"JSON文件加载完成：{file_path}，生成{len(documents)}个文档")
        return documents

    def _extract_and_split_content(self, data: Any) -> List[str]:
        """提取内容并在必要时分割"""
        # 提取内容
        if isinstance(data, dict):
            # 优先提取明显的内容字段
            content_fields = ["content", "text", "output", "description", "内容", "文本"]
            for field in content_fields:
                if field in data and isinstance(data[field], str):
                    content = data[field]
                    break
            else:
                # 没有明显内容字段，转换整个字典为字符串
                content = json.dumps(data, ensure_ascii=False, indent=2)
        elif isinstance(data, list):
            # 递归处理列表
            items_content = []
            for item in data:
                items_content.extend(self._extract_and_split_content(item))
            return items_content
        else:
            content = str(data)

        # 处理超长内容
        chunks = []
        if len(content) > self.max_single_content_length:
            logger.warning(f"JSON内容超长（{len(content)}字符），进行分割")
            # 按最大长度分割
            for i in range(0, len(content), self.max_single_content_length):
                chunks.append(content[i:i + self.max_single_content_length])
            return chunks
        return [content]

    def get_file_type(self) -> List[str]:
        return [".json"]

    def merge_chunks(self, chunk_files: List[str], output_path: str) -> None:
        """JSON文件合并（处理编码）"""
        with open(output_path, 'w', encoding='utf-8') as outfile:
            for chunk_path in chunk_files:
                with open(chunk_path, 'r', encoding='utf-8') as infile:
                    outfile.write(infile.read())

# 策略工厂 - 新增分片处理功能
class DocumentLoaderFactory:
    def __init__(self):
        self.strategies: List[DocumentLoaderStrategy] = [
            TextMarkdownStrategy(),
            DocxStrategy(),
            DocStrategy(),
            PdfStrategy(),
            XlsxStrategy(),
            JsonStrategy()
        ]
        # 建立文件类型到策略的映射
        self.type_strategy_map: Dict[str, DocumentLoaderStrategy] = {}
        for strategy in self.strategies:
            for file_type in strategy.get_file_type():
                self.type_strategy_map[file_type] = strategy

    def get_strategy(self, file_path: str) -> DocumentLoaderStrategy:
        """根据文件路径获取对应的加载策略"""
        ext = Path(file_path).suffix.lower()
        if ext not in self.type_strategy_map:
            raise ValueError(f"不支持的文件类型：{ext}，支持类型：{list(self.type_strategy_map.keys())}")
        return self.type_strategy_map[ext]

    def load_document(self, file_path: str) -> List[Document]:
        """统一加载接口：自动选择策略"""
        if not Path(file_path).exists():
            raise FileNotFoundError(f"文件不存在：{file_path}")
        strategy = self.get_strategy(file_path)
        logger.info(f"执行解析策略:{strategy}｝")
        return strategy.load(file_path)

    def merge_and_load_chunks(self,
                              chunk_files: List[Tuple[str, int]],  # (文件路径, 分片序号)
                              original_filename: str) -> List[Document]:
        """
        合并分片文件并加载

        参数:
            chunk_files: 包含分片文件路径和序号的元组列表
            original_filename: 原始文件名（用于确定文件类型）

        返回:
            加载后的Document列表
        """
        # 按分片序号排序
        sorted_chunks = sorted(chunk_files, key=lambda x: x[1])
        sorted_chunk_paths = [path for path, _ in sorted_chunks]

        # 获取文件扩展名
        ext = Path(original_filename).suffix.lower()
        if ext not in self.type_strategy_map:
            raise ValueError(f"不支持的分片文件类型：{ext}")

        # 创建临时文件用于合并
        temp_dir = tempfile.mkdtemp()
        try:
            # 生成临时文件名
            temp_filename = f"{uuid4()}{ext}"
            temp_path = os.path.join(temp_dir, temp_filename)

            # 获取对应策略并合并分片
            strategy = self.type_strategy_map[ext]
            strategy.merge_chunks(sorted_chunk_paths, temp_path)

            # 加载合并后的文件
            return strategy.load(temp_path)

        finally:
            # 清理临时目录
            shutil.rmtree(temp_dir, ignore_errors=True)


# 单例工厂
loader_factory = DocumentLoaderFactory()
