import os
import re
import hashlib
import glob  # 添加此行
from markdown_it import MarkdownIt
from qdrant_client import QdrantClient, models
from sentence_transformers import SentenceTransformer
from tqdm import tqdm

class SmartMarkdownProcessor:
    def __init__(self, model_name="baai/bge-base-zh-v1.5", max_chunk_size=500, min_chunk_size=50, overlap=30):
        self.encoder = SentenceTransformer(model_name)
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        self.overlap = overlap
        self.qdrant_client = QdrantClient(host="localhost", port=6333)
        self.collection_name = "mall_review"
        self.vector_size = self.encoder.get_sentence_embedding_dimension()
        self.md_parser = MarkdownIt()

    def create_collection(self):
        """创建带Payload字段的集合"""
        if not self.qdrant_client.collection_exists(self.collection_name):
            self.qdrant_client.create_collection(
                collection_name=self.collection_name,
                vectors_config=models.VectorParams(
                    size=self.vector_size,
                    distance=models.Distance.COSINE
                )
            )
            print(f"集合已创建: {self.collection_name}")

    def process_directory(self, directory_path):
        """处理目录中的所有Markdown文件（父子分块）"""
        md_files = self._find_markdown_files(directory_path)
        print(f"发现 {len(md_files)} 个Markdown文件")
        
        for file_path in tqdm(md_files, desc="处理中"):
            relative_path = os.path.relpath(file_path, directory_path)
            with open(file_path, "r", encoding="utf-8") as f:
                content = f.read()

            root_doc_id = int(hashlib.sha256(relative_path.encode()).hexdigest()[:16], 16)
            # 计算根向量（顶级父块）
            root_vector = self.encoder.encode(content).tolist()
            # 生成文档块（顶级父块）
            top_chunk = models.PointStruct(
                    id=root_doc_id,  # 文件路径哈希
                    vector=root_vector,
                    payload={
                        "content": content,  # 整个文档内容
                        "file_path": relative_path,
                        "type": "root"  # 标记为父块
                    }
                )
            
            # 生成父子结构分块
            chunks = self._hierarchical_chunking(content, relative_path, root_doc_id)       

            # 添加顶级父块到分块列表
            chunks.insert(0, top_chunk)   
            

            self._upload_points(chunks)

    def _find_markdown_files(self, directory_path):
        # 只导入 xx_cr.md 文件  
        return [
            f for f in glob.glob(os.path.join(directory_path, "**", "*.md"), recursive=True) 
            if os.path.isfile(f) and "_cr.md" in f
        ]

    def _hierarchical_chunking(self, content, file_path, root_doc_id):
        """基于AST的父子分块策略，增强健壮性"""
        tokens = self.md_parser.parse(content)
        chunks = []
        current_chunk = {"text": "", "parent_headings": [], "level": 0}

        i = 0
        n = len(tokens)
        while i < n:
            token = tokens[i]
            if token.type == "heading_open":
                # 获取标题级别
                try:
                    level = int(token.tag[1])
                except (IndexError, ValueError):
                    level = 1  # 默认h1

                # 获取标题文本
                heading_text = ""
                if i + 1 < n and hasattr(tokens[i + 1], "content"):
                    heading_text = tokens[i + 1].content.strip()
                else:
                    heading_text = "Untitled"

                # 标题升级时保存当前块
                if (
                    level <= current_chunk["level"]
                    and len(current_chunk["text"].strip()) >= self.min_chunk_size
                ):
                    chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))
                    current_chunk = {
                        "text": "",
                        "parent_headings": current_chunk["parent_headings"][: level - 1],
                        "level": level,
                    }

                # 更新父标题栈
                current_chunk["parent_headings"] = (
                    current_chunk["parent_headings"][: level - 1] + [heading_text]
                )
                current_chunk["level"] = level

            elif token.type == "inline":
                # 只处理非空文本
                if hasattr(token, "content") and token.content.strip():
                    current_chunk["text"] += token.content + " "

                    # 动态分块：达到最大尺寸时分割
                    if len(current_chunk["text"]) > self.max_chunk_size:
                        chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))
                        # 保留重叠区域作为上下文
                        overlap_text = current_chunk["text"][-self.overlap :]
                        current_chunk = {
                            "text": overlap_text,
                            "parent_headings": current_chunk["parent_headings"][:],
                            "level": current_chunk["level"],
                        }
            i += 1

        # 处理最后一个块
        if current_chunk["text"].strip():
            chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))
        return chunks

    def _finalize_chunk(self, chunk_data, file_path, root_doc_id):
        """构建带父子关系的分块Payload"""
        # 添加上下文：父标题+当前块文本
        context = " > ".join(chunk_data["parent_headings"]) + "\n" + chunk_data["text"]
        vector = self.encoder.encode(context).tolist()
        
        # 生成唯一ID（避免哈希冲突）
        unique_id = int(hashlib.sha256(
            f"{file_path}_{chunk_data['parent_headings']}_{chunk_data['text'][:50]}".encode()
        ).hexdigest()[:16], 16)        
        
        return models.PointStruct(
            id=unique_id,
            vector=vector,
            payload={
                "content": chunk_data["text"],
                "file_path": file_path,
                "parent_headings": chunk_data["parent_headings"],
                "heading_level": chunk_data["level"],
                "chunk_size": len(chunk_data["text"]),
                "root_doc_id": root_doc_id,  # 关联父块ID
                "type": "child"  # 标记为子块
            }
        )

    def _upload_points(self, points):
        """批量上传到Qdrant"""
        self.qdrant_client.upsert(
            collection_name=self.collection_name,
            points=points,
            wait=False  # 异步提升吞吐
        )

if __name__ == "__main__":
    processor = SmartMarkdownProcessor(
        model_name="baai/bge-base-zh-v1.5",
        max_chunk_size=500,   # 推荐值[3](@ref)
        min_chunk_size=100,
        overlap=50
    )
    processor.create_collection()
    processor.process_directory("C:\\Users\\zhang\\mall")
    print("导入完成！访问控制台：http://localhost:6333/dashboard")