import os
import re
import yaml
from datetime import datetime
from typing import List, Tuple, Dict, Optional, Callable
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_milvus import Milvus
from langchain_ollama import OllamaLLM, OllamaEmbeddings
from langchain_core.prompts import PromptTemplate

# 配置 LLM 和 Milvus
llm = OllamaLLM(model="qwen2.5:14b", base_url="http://192.168.7.3:11434")
embedder = OllamaEmbeddings(model="nomic-embed-text:latest", base_url="http://192.168.7.3:11434")

vectorstore = Milvus(
    embedding_function=embedder,
    collection_name="knowledge",
    connection_args={"uri": "http://192.168.6.20:19530/ipp_air_general"},
    auto_id=True
)

CHUNK_SIZE = 500  # 设定最大块大小
OVERLAP_RATIO = 0.2  # 设定重叠比例（20%）


def normalize_metadata(metadata):
    """确保所有元数据字段都转换为支持的类型"""
    for key, value in metadata.items():
        if isinstance(value, str):
            continue  # 字符串类型无需转换
        if isinstance(value, datetime):
            metadata[key] = str(value)  # 将日期转换为字符串
        elif isinstance(value, (int, float)):
            continue  # 数字类型无需转换
        else:
            metadata[key] = str(value)  # 其他类型转换为字符串
    return metadata


def extract_keywords(llm, text, top_k=3):
    """使用 LLM 提取关键词"""
    prompt = PromptTemplate(
        input_variables=["text", "top_k"],
        template="请从以下文本中提取 {top_k} 个最重要的关键词，并用逗号分隔：{text},请仅输出关键词,严禁输出多余的内容"
    )
    response = llm(prompt.format(text=text, top_k=top_k)).strip()
    print(f"提取关键词: {response}")
    return response if response else "无"


def extract_entities(llm, text):
    """使用 LLM 提取命名实体"""
    prompt = PromptTemplate(
        input_variables=["text"],
        template="请从以下文本中提取命名实体（如污染物名称、法规、地名等），并用逗号分隔：{text}，请仅输出实体, 严禁输出多余的内容"
    )
    response = llm(prompt.format(text=text)).strip()
    print(f"提取实体: {response}")
    return response if response else "无"


def parse_markdown(file_path: str) -> Tuple[Dict, List[Dict]]:
    """解析 Markdown 文件，提取 YAML Front Matter 和正文结构，并修正 title_path 解析，同时新增 root_id 和 parent_id"""
    global root_id, parent_id, section_id
    with open(file_path, "r", encoding="utf-8") as f:
        content = f.read()

    # 解析 YAML Front Matter
    metadata = {}
    yaml_match = re.match(r"---\n(.*?)\n---", content, re.DOTALL)
    if yaml_match:
        yaml_str = yaml_match.group(1)
        metadata = yaml.safe_load(yaml_str) or {}
        content = content[yaml_match.end():].strip()

    blocks = []
    pattern = re.compile(r"^(#{2,5}) (.+)$", re.MULTILINE)  # 匹配 2~5 级标题
    matches = list(pattern.finditer(content))

    if not matches:
        return metadata, []  # 没有找到标题，返回空

    metadata = normalize_metadata(metadata)

    title_hierarchy = {}
    last_title = None
    last_body = []
    last_title_path = ""

    seen_titles = set()  # 存储已经处理过的标题路径，避免重复

    for i, match in enumerate(matches):
        level = len(match.group(1))  # 获取标题级别
        title = match.group(2).strip()
        start = match.end()
        end = matches[i + 1].start() if i + 1 < len(matches) else len(content)
        body = content[start:end].strip()

        title_hierarchy[level] = title  # 更新标题层级结构
        for l in range(level + 1, 6):  # 清除低级标题，避免错误继承
            title_hierarchy.pop(l, None)

        title_path = " > ".join([title_hierarchy[l] for l in sorted(title_hierarchy.keys())])

        # **计算 root_id, parent_id, section_id**
        parts = title_path.split(" > ")
        root_id = parts[0] if len(parts) > 0 else ""
        parent_id = " > ".join(parts[:-1]) if len(parts) > 1 else ""
        section_id = parts[-1] if len(parts) > 0 else ""

        # 只有当 `title_path` 之前没见过时，才加入 blocks
        if last_title and last_body and last_title_path not in seen_titles:
            full_text = f"{last_title}\n" + "\n".join(last_body)
            blocks.append({
                "text": full_text,
                "metadata": {
                    **metadata,
                    "title": last_title,
                    "title_path": last_title_path,
                    "root_id": root_id,
                    "parent_id": parent_id,
                    "section_id": section_id
                }
            })
            seen_titles.add(last_title_path)  # 记录已处理的标题路径

        if body:
            last_title = title
            last_body = [body]
            last_title_path = title_path

    # 处理最后一个块
    if last_title and last_body and last_title_path not in seen_titles:
        full_text = f"{last_title}\n" + "\n".join(last_body)
        blocks.append({
            "text": full_text,
            "metadata": {
                **metadata,
                "title": last_title,
                "title_path": last_title_path,
                "root_id": root_id,
                "parent_id": parent_id,
                "section_id": section_id
            }
        })
        seen_titles.add(last_title_path)

    # 分块处理，避免重复
    final_blocks = []
    unique_blocks = set()  # 用于去重
    for position, block in enumerate(blocks):
        text = str(block["text"])
        metadata = {k: str(v) for k, v in block["metadata"].items()}
        metadata["position"] = str(position)

        split_blocks = split_text_with_metadata(text, metadata, CHUNK_SIZE, OVERLAP_RATIO)
        for sb in split_blocks:
            block_key = (sb["text"], tuple(sb["metadata"].items()))  # 用元组存唯一性
            if block_key not in unique_blocks:
                final_blocks.append(sb)
                unique_blocks.add(block_key)

    return metadata, final_blocks



def split_text_with_metadata(
        text: str,
        metadata: Dict,
        chunk_size: int,
        overlap_ratio: float,
        dynamic_metadata_fn: Optional[Callable] = None
) -> List[Dict]:
    """对文本进行分块，并提取关键词和实体"""
    chunk_overlap = max(0, min(int(chunk_size * overlap_ratio), chunk_size // 2))

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        separators=["\n\n", "。", "！", "？", "\n", "，", "；"]
    )

    chunks = text_splitter.create_documents([text])
    results = []

    seen_chunks = set()  # 记录已经处理的分块，避免重复存入
    for i, chunk in enumerate(chunks):
        chunk_text = chunk.page_content.strip()

        # 避免重复分块
        if chunk_text in seen_chunks:
            continue
        seen_chunks.add(chunk_text)

        chunk_metadata = metadata.copy()
        if dynamic_metadata_fn:
            chunk_metadata.update(dynamic_metadata_fn(i, chunk_text))

        keywords = extract_keywords(llm, chunk_text)
        entities = extract_entities(llm, chunk_text)

        chunk_metadata["keywords"] = keywords
        chunk_metadata["entities"] = entities

        results.append({
            "text": chunk_text,
            "metadata": chunk_metadata
        })

    return results


def assign_position(blocks: List[Dict]) -> List[Dict]:
    """
    遍历最终的 blocks，为每个分块分配唯一的 position。
    """
    for i, block in enumerate(blocks):
        block["metadata"]["position"] = str(i)  # Milvus 需要 position 作为 str
    return blocks

def store_in_milvus(blocks: List[Dict]):
    """存储数据到 Milvus"""
    texts = [block["text"] for block in blocks]
    metadatas = [block["metadata"] for block in blocks]
    vectorstore.add_texts(texts, metadatas)
    print(f"成功存入 {len(blocks)} 条数据到 Milvus")



if __name__ == "__main__":
    source_dir = r"C:\Desktop\各类知识\知识手册"
    for filename in os.listdir(source_dir):
        if filename.lower().endswith(".md"):
            input_path = os.path.join(source_dir, filename)
            print(f"正在处理文件: {input_path}")
            metadata, blocks = parse_markdown(input_path)
            blocks = assign_position(blocks)
            store_in_milvus(blocks)
