import os
import re
import whisper
import nltk
from datetime import timedelta
from bs4 import BeautifulSoup
from langchain_community.document_loaders import TextLoader
from langchain_unstructured import UnstructuredLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.docstore.document import Document
from unstructured.staging.base import convert_to_isd
from unstructured.cleaners.core import clean, remove_punctuation, replace_unicode_quotes
from unstructured.partition.text import partition_text
import torch
from unstructured.partition.auto import partition


# 文本预处理工具类
class TextPreprocessor:
    @staticmethod
    def clean_text(text):
        # 替换特殊字符
        text = replace_unicode_quotes(text)
        # 确保text是文本内容而不是文件路径
        try:
            # 尝试解析HTML
            text = BeautifulSoup(text, "html.parser").get_text()
        except Exception as e:
            # 如果解析失败，可能text不是HTML，直接返回
            pass
        # 移除URL
        text = re.sub(r'http\S+|www\S+', '', text)
        # 移除邮箱
        text = re.sub(r'\S+@\S+', '', text)
        # 统一空格
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    @staticmethod
    def is_high_quality(text):
        # 检查文本长度
        if len(text.strip()) < 10:
            return False
        # 检查非文本字符比例
        non_text_chars = re.findall(r'[^a-zA-Z0-9\u4e00-\u9fff\s]', text)
        if len(non_text_chars) / max(1, len(text)) > 0.3:
            return False
        return True

    @staticmethod
    def enhance_text(text):
        # 分句处理
        sentences = nltk.sent_tokenize(text)
        # 过滤短句
        sentences = [s for s in sentences if len(s) > 5]
        return " ".join(sentences)


# Whisper 生成 SRT 片段（带时间戳）
def video_to_srt_segments(video_path):
    model = whisper.load_model('large-v2')
    result = model.transcribe(video_path, fp16=False)
    segments = result.get("segments", [])
    preprocessor = TextPreprocessor()
    docs = []

    for seg in segments:
        start = str(timedelta(seconds=int(seg["start"])))
        end = str(timedelta(seconds=int(seg["end"])))
        content = seg["text"].strip()

        # 视频内容预处理
        cleaned_content = preprocessor.clean_text(content)
        if not preprocessor.is_high_quality(cleaned_content):
            continue

        enhanced_content = preprocessor.enhance_text(cleaned_content)
        if not enhanced_content:
            continue

        doc = Document(
            page_content=f"[讲解视频] {enhanced_content}",
            metadata={"start_time": start, "end_time": end, "source": os.path.basename(video_path)}
        )
        docs.append(doc)
        print(enhanced_content)

    return docs


# 加载并处理Word文档
def process_docx_file(docx_path):
    # 创建输出目录用于保存提取的图片
    output_dir = os.path.join(os.path.dirname(docx_path), "extracted_images")
    os.makedirs(output_dir, exist_ok=True)

    # 使用partition函数处理Word文档，自动提取文本和图片
    elements = partition(
        filename=docx_path,
        extract_images_into_dir=output_dir,  # 指定图片提取目录
        strategy="hi_res"  # 使用高分辨率策略提取内容
    )

    preprocessor = TextPreprocessor()
    processed_docs = []
    current_section = ""
    current_content = []

    for element in elements:
        element_type = type(element).__name__
        text = str(element).strip()
        # print(text)

        if not text:
            continue

        # 处理图片元素
        if element_type == "Image":
            image_filename = element.metadata.filename
            image_path = os.path.join(output_dir, image_filename)
            image_alt_text = element.metadata.text  # 尝试获取图片的替代文本（如果有）

            # 构建图片标记，包含文件名和可能的替代文本
            if image_alt_text:
                image_text = f"[图片: {os.path.basename(image_path)} - 描述: {image_alt_text}]"
            else:
                image_text = f"[图片: {os.path.basename(image_path)}]"

            current_content.append(image_text)
            continue

        # 处理文本元素
        cleaned_text = preprocessor.clean_text(text)
        if not preprocessor.is_high_quality(cleaned_text):
            continue

        enhanced_text = preprocessor.enhance_text(cleaned_text)
        if not enhanced_text:
            continue

        # 标题判断逻辑
        if len(enhanced_text.split()) < 10:  # 阈值可调整
            if current_section and current_content:
                processed_docs.append(Document(
                    page_content="\n".join(current_content),
                    metadata={"section": current_section, "source": os.path.basename(docx_path)}
                ))
                current_content = []
            current_section = enhanced_text
        else:
            current_content.append(enhanced_text)

    # 处理剩余内容
    if current_section and current_content:
        processed_docs.append(Document(
            page_content="\n".join(current_content),
            metadata={"section": current_section, "source": os.path.basename(docx_path),
                      "images": [e.metadata.filename for e in elements if type(e).__name__ == "Image"]}
        ))

    return processed_docs

# 加载所有文档和视频片段
def load_all_documents():
    print("Loading all documents...")
    all_docs = []
    chapters = ['cp07', 'cp08', 'cp09']
    base_path = r"/Tools/retrieval_qa_chatglm/Knowledge_Base_Constructing/input"

    for chapter in chapters:
        chapter_path = os.path.join(base_path, chapter)
        print(f"正在处理章节: {chapter}")

        # 加载 Word 文档
        docx_files = [f for f in os.listdir(chapter_path) if f.endswith('.docx')]
        for docx_file in docx_files:
            docx_path = os.path.join(chapter_path, docx_file)
            try:
                print(f"  正在处理文档: {docx_file}")
                docx_docs = process_docx_file(docx_path)
                all_docs.extend(docx_docs)
            except Exception as e:
                print(f"    处理文档时出错: {e}")

        # 加载视频文件，并转 SRT 段
        for root, _, files in os.walk(chapter_path):
            for file in files:
                if file.endswith(('.mp4', '.avi', '.mkv')):
                    video_path = os.path.join(root, file)
                    try:
                        print(f"  正在处理视频: {file}")
                        srt_docs = video_to_srt_segments(video_path)
                        all_docs.extend(srt_docs)
                    except Exception as e:
                        print(f"    处理视频时出错: {e}")

    print(f"✅ 文档加载完成！共处理 {len(all_docs)} 个文档片段")
    return all_docs


# 构建知识库
def build_vector_store():
    all_docs = load_all_documents()

    # 优化文本分块策略
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=800,
        chunk_overlap=100,
        separators=["\n\n", "\n", "。", "！", "？", " ", ""]
    )
    split_docs = splitter.split_documents(all_docs)

    print("打印预处理文本内容")
    for doc in all_docs:
        print(doc.page_content)

    # 加载嵌入模型
    embedding_model = HuggingFaceEmbeddings(
        model_name=r"E:\projects\PycharmProjects\cnsoft\Intelligent Education Agent\Knowledge_Base_Constructing\local_model\BAAI\bge-small-zh-v1.5",
        model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
    )

    # 构建向量库
    vector_store = FAISS.from_documents(split_docs, embedding_model)

    # 保存向量库
    output_path = r"/Tools/retrieval_qa_chatglm/Knowledge_Base_Constructing/output/tfjs_knowledge3_faiss"
    vector_store.save_local(output_path)

    # 输出统计信息
    doc_count = len(all_docs)
    chunk_count = len(split_docs)
    avg_chunk_size = sum(len(doc.page_content) for doc in split_docs) / max(1, chunk_count)

    print(f"✅ 知识库构建成功！")
    print(f"  - 原始文档片段数: {doc_count}")
    print(f"  - 向量化分块数: {chunk_count}")
    print(f"  - 平均分块大小: {avg_chunk_size:.2f} 字符")


if __name__ == "__main__":
    build_vector_store()