import os
import re
import gc
import io
import tempfile
import requests
import chardet
import logging
from datetime import datetime
from typing import List, Dict, Optional, Tuple, Any
from pathlib import Path
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import fitz  # PyMuPDF，替代pdfplumber处理PDF
from tqdm import tqdm
from docx import Document as DocxDocument
from docx.opc.exceptions import PackageNotFoundError

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import Docx2txtLoader, TextLoader
from langchain.schema import Document
from starlette.datastructures import UploadFile

from tools.small_chat_tools import Knowledge
from core.exception import ApiException

# 初始化日志
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


class RAGFileProcessor:
    """优化后的RAG文件处理工具，解决编码乱码问题"""

    def __init__(self, knowledge: Optional[Knowledge] = None):
        self.knowledge = knowledge or Knowledge()
        self.supported_extensions = {
            '.pdf': 'PDF文件',
            '.docx': 'Word文档',
            '.md': 'Markdown文件',
            '.txt': '文本文件',
            '.html': 'HTML页面',
            '.htm': 'HTML页面'
        }

        # 文本分割器配置
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            length_function=len,
            separators=["\n\n", "\n", " ", ""]
        )

        # 初始化HTTP会话（复用连接池）
        self.session = requests.Session()
        adapter = requests.adapters.HTTPAdapter(
            pool_connections=10,
            pool_maxsize=10,
            pool_block=False
        )
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)
        self.session.headers.update({
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9"
        })

    def is_supported_file(self, file_path: str) -> Tuple[bool, Optional[str]]:
        ext = Path(file_path).suffix.lower()
        return (True, self.supported_extensions[ext]) if ext in self.supported_extensions else (False, None)

    @staticmethod
    def _clean_text(text: str) -> str:
        """标准化文本清洗（关键优化：统一处理空白字符）"""
        text = re.sub(r'\n{3,}', '\n\n', text)  # 压缩多余换行
        text = re.sub(r'[ \t]+', ' ', text)  # 压缩多余空格/制表符
        return text.strip()

    def _clean_html_content(self, html_content: str) -> str:
        """增强版HTML内容清洗"""
        soup = BeautifulSoup(html_content, 'lxml')

        # 移除无关标签（保留核心内容）
        for tag in soup.find_all([
            'script', 'style', 'iframe', 'nav', 'footer',
            'aside', 'ad', 'banner', 'noscript', 'form',
            'header', 'menu', 'svg', 'img'  # 补充常见冗余标签
        ]):
            tag.decompose()

        # 提取文本并清洗
        text = soup.get_text(separator='\n', strip=True)
        return self._clean_text(text)

    def load_document(self, file_path: str) -> List[Document]:
        """重构文档加载逻辑，整合PyMuPDF解析PDF"""
        ext = Path(file_path).suffix.lower()
        documents = []

        try:
            if ext == '.pdf':
                # 关键优化：使用PyMuPDF解析PDF
                with fitz.open(file_path) as doc:
                    title = doc.metadata.get("title", "").strip() or Path(file_path).name
                    for page_num, page in enumerate(doc, 1):
                        text = page.get_text().strip()
                        if text:
                            documents.append(Document(
                                page_content=self._clean_text(text),
                                metadata={
                                    "source": file_path,
                                    "page": page_num,
                                    "total_pages": len(doc),
                                    "title": title
                                }
                            ))

            elif ext == '.docx':
                # 增强DOCX解析（兼容复杂格式）
                try:
                    doc = DocxDocument(file_path)
                    content = []
                    for para in doc.paragraphs:
                        if para.text.strip():
                            content.append(para.text.strip())
                    content = "\n".join(content)
                    title = doc.core_properties.title.strip() if doc.core_properties.title else Path(file_path).name
                    documents.append(Document(
                        page_content=self._clean_text(content),
                        metadata={"source": file_path, "title": title}
                    ))
                except:
                    # 降级使用LangChain加载器
                    loader = Docx2txtLoader(file_path)
                    documents = loader.load()
                    for doc in documents:
                        doc.page_content = self._clean_text(doc.page_content)

            elif ext == '.md':
                # Markdown编码增强处理
                with open(file_path, 'rb') as f:
                    raw_content = f.read()
                encoding = chardet.detect(raw_content)['encoding'] or 'utf-8'
                try:
                    content = raw_content.decode(encoding, errors='replace')
                except:
                    content = raw_content.decode('utf-8', errors='ignore')
                # 提取标题
                title_match = re.search(r'^# (.+)$', content, re.MULTILINE)
                title = title_match.group(1).strip() if title_match else Path(file_path).name
                documents.append(Document(
                    page_content=self._clean_text(content),
                    metadata={"source": file_path, "title": title}
                ))

            elif ext == '.txt':
                # TXT多编码支持
                with open(file_path, 'rb') as f:
                    raw_content = f.read()
                encoding = chardet.detect(raw_content)['encoding'] or 'utf-8'
                try:
                    content = raw_content.decode(encoding, errors='replace')
                except:
                    content = raw_content.decode('utf-8', errors='ignore')
                title = Path(file_path).name
                documents.append(Document(
                    page_content=self._clean_text(content),
                    metadata={"source": file_path, "title": title, "encoding": encoding}
                ))

            elif ext in ['.html', '.htm']:
                # HTML编码增强
                with open(file_path, 'rb') as f:
                    raw_content = f.read()
                encoding = chardet.detect(raw_content)['encoding'] or 'utf-8'
                try:
                    html_content = raw_content.decode(encoding, errors='replace')
                except:
                    html_content = raw_content.decode('utf-8', errors='ignore')
                cleaned_content = self._clean_html_content(html_content)
                title = Path(file_path).name
                documents.append(Document(
                    page_content=cleaned_content,
                    metadata={"source": file_path, "title": title, "encoding": encoding}
                ))

            else:
                raise ValueError(f"不支持的文件格式: {ext}")

            # 过滤空文档
            return [doc for doc in documents if doc.page_content.strip()]

        except Exception as e:
            logger.error(f"加载文档失败 {file_path}: {str(e)}", exc_info=True)
            raise ApiException(f"加载文档失败: {str(e)}")

    def fetch_and_process_html(self, url: str, metadata: Optional[Dict[str, Any]] = None) -> Tuple[
        bool, Dict[str, Any]]:
        """重构网页抓取逻辑，解决编码乱码"""
        try:
            if not re.match(r'^https?://', url, re.IGNORECASE):
                return False, {"error": "URL必须以http://或https://开头"}

            # 流式获取网页内容（支持大文件）
            response = self.session.get(url, stream=True, timeout=30)
            response.raise_for_status()

            # 关键优化：分块读取并检测编码
            raw_content = b""
            total_size = int(response.headers.get("content-length", 0))
            with tqdm(total=total_size, unit="B", unit_scale=True, desc="下载网页", mininterval=0.5) as pbar:
                for chunk in response.iter_content(chunk_size=32768):
                    if chunk:
                        raw_content += chunk
                        pbar.update(len(chunk))

            # 智能编码检测
            encoding = response.encoding or chardet.detect(raw_content)['encoding'] or 'utf-8'
            try:
                html_content = raw_content.decode(encoding, errors='replace')
            except UnicodeDecodeError:
                html_content = raw_content.decode('utf-8', errors='ignore')

            # 内容清洗与过滤
            cleaned_content = self._clean_html_content(html_content)
            if len(cleaned_content) < 100:
                return False, {"error": "HTML页面有效内容过少（<100字符）"}

            # 提取标题
            soup = BeautifulSoup(html_content, 'lxml')
            title = (soup.title.string.strip() if (soup.title and soup.title.string) else urlparse(url).netloc)

            # 构建元数据
            base_metadata = {
                "source": url,
                "title": title,
                "file_type": "html",
                "downloaded_at": datetime.now().isoformat(),
                "status_code": response.status_code,
                "final_url": response.url,
                "encoding": encoding,
                "content_length": len(cleaned_content)
            }
            if metadata:
                base_metadata.update(metadata)

            # 文本分割与存储
            raw_document = Document(page_content=cleaned_content, metadata=base_metadata)
            split_documents = self.text_splitter.split_documents([raw_document])
            if not split_documents:
                return False, {"error": "HTML内容分割后为空"}

            texts = [doc.page_content for doc in split_documents]
            metadatas = [doc.metadata for doc in split_documents]
            success, inserted_ids = self.knowledge.add_texts(texts=texts, metadatas=metadatas)

            if success:
                return True, {
                    "url": url,
                    "title": title,
                    "split_chunks": len(split_documents),
                    "inserted_count": len(inserted_ids),
                    "total_in_knowledge": self.knowledge.get_document_count()
                }
            return False, {"error": "HTML内容存储到知识库失败"}

        except requests.exceptions.Timeout:
            return False, {"error": "请求超时（30秒）"}
        except requests.exceptions.HTTPError as e:
            return False, {"error": f"HTTP错误: {str(e)}"}
        except Exception as e:
            logger.error(f"处理网页失败 {url}: {str(e)}", exc_info=True)
            return False, {"error": f"处理网页时出错: {str(e)}"}

    def process_and_store_document(
            self,
            file_path: str,
            metadata: Optional[Dict[str, Any]] = None,
            chunk_size: Optional[int] = None,
            chunk_overlap: Optional[int] = None
    ) -> Tuple[bool, Dict[str, Any]]:
        supported, format_name = self.is_supported_file(file_path)
        if not supported:
            return False, {"error": f"不支持的文件格式: {Path(file_path).suffix}"}

        try:
            raw_documents = self.load_document(file_path)
            if not raw_documents:
                return False, {"error": "文档加载后无有效内容"}

            file_name = os.path.basename(file_path)
            base_metadata = {
                "source": file_name,
                "file_path": file_path,
                "file_type": format_name,
                "file_size": os.path.getsize(file_path),
                "processed_at": datetime.now().isoformat()
            }
            if metadata:
                base_metadata.update(metadata)

            # 合并元数据
            for doc in raw_documents:
                doc.metadata = {**doc.metadata, **base_metadata}

            # 文本分割
            splitter = self.text_splitter
            if chunk_size and chunk_overlap:
                splitter = RecursiveCharacterTextSplitter(
                    chunk_size=chunk_size,
                    chunk_overlap=chunk_overlap,
                    length_function=len,
                    separators=["\n\n", "\n", " ", ""]
                )

            split_documents = splitter.split_documents(raw_documents)
            if not split_documents:
                return False, {"error": "文档分割后无有效内容"}

            # 存储到知识库
            texts = [doc.page_content for doc in split_documents]
            metadatas = [doc.metadata for doc in split_documents]
            success, inserted_ids = self.knowledge.add_texts(texts=texts, metadatas=metadatas)

            if success:
                return True, {
                    "message": "文档处理成功",
                    "file_name": file_name,
                    "file_type": format_name,
                    "raw_pages": len(raw_documents),
                    "split_chunks": len(split_documents),
                    "inserted_count": len(inserted_ids),
                    "total_in_knowledge": self.knowledge.get_document_count()
                }
            return False, {"error": "文档存储到知识库失败"}

        except Exception as e:
            logger.error(f"处理文档失败 {file_path}: {str(e)}", exc_info=True)
            return False, {"error": f"处理文档时出错: {str(e)}"}

    def process_uploaded_file(
            self,
            file: UploadFile,
            metadata: Optional[Dict[str, Any]] = None
    ) -> Tuple[bool, Dict[str, Any]]:
        supported, format_name = self.is_supported_file(file.filename)
        if not supported:
            return False, {
                "error": f"不支持的文件格式: {Path(file.filename).suffix}",
                "supported_formats": list(self.supported_extensions.items())
            }

        temp_file_path = None
        try:
            suffix = Path(file.filename).suffix
            # 关键优化：内存中处理文件（减少IO操作）
            with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
                content = file.file.read()
                temp_file.write(content)
                temp_file_path = temp_file.name

            return self.process_and_store_document(
                file_path=temp_file_path,
                metadata=metadata
            )

        except Exception as e:
            logger.error(f"处理上传文件失败 {file.filename}: {str(e)}", exc_info=True)
            return False, {"error": f"处理上传文件时出错: {str(e)}"}

        finally:
            if temp_file_path and os.path.exists(temp_file_path):
                try:
                    os.unlink(temp_file_path)
                except Exception as e:
                    logger.warning(f"清理临时文件失败 {temp_file_path}: {str(e)}")

    def process_and_store_directory(
            self,
            dir_path: str,
            recursive: bool = False,
            metadata: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        if not os.path.isdir(dir_path):
            return {"error": f"目录不存在: {dir_path}"}

        total_files = 0
        processed_files = 0
        failed_files = []
        total_chunks = 0
        total_inserted = 0

        for root, _, files in os.walk(dir_path):
            for file in files:
                file_path = os.path.join(root, file)
                supported, _ = self.is_supported_file(file_path)
                if not supported:
                    continue

                total_files += 1
                try:
                    success, result = self.process_and_store_document(
                        file_path=file_path,
                        metadata=metadata
                    )
                    if success:
                        processed_files += 1
                        total_chunks += result.get("split_chunks", 0)
                        total_inserted += result.get("inserted_count", 0)
                    else:
                        failed_files.append({
                            "file_path": file_path,
                            "error": result.get("error", "未知错误")
                        })
                except Exception as e:
                    failed_files.append({
                        "file_path": file_path,
                        "error": str(e)
                    })
                # 定期清理内存
                if total_files % 20 == 0:
                    gc.collect()

            if not recursive:
                break

        return {
            "total_files_found": total_files,
            "processed_successfully": processed_files,
            "failed_files": len(failed_files),
            "failed_details": failed_files,
            "total_chunks_created": total_chunks,
            "total_inserted_to_knowledge": total_inserted,
            "final_knowledge_count": self.knowledge.get_document_count()
        }


def create_rag_processor() -> RAGFileProcessor:
    return RAGFileProcessor()