import os
import re
import hashlib
import glob  # 添加此行
from markdown_it import MarkdownIt
from qdrant_client import QdrantClient, models
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
from services.sql_segment import segment_sql_script

class SmartSqlProcessor:
    def __init__(self, model_name="baai/bge-base-zh-v1.5", max_chunk_size=500, min_chunk_size=50, overlap=30):
        self.encoder = SentenceTransformer(model_name)
        self.max_chunk_size = max_chunk_size
        self.min_chunk_size = min_chunk_size
        self.overlap = overlap
        self.qdrant_client = QdrantClient(host="192.168.109.128", port=6333)
        self.collection_name = "mall_sql"
        self.vector_size = self.encoder.get_sentence_embedding_dimension()
        self.sql_parser = segment_sql_script

    def create_collection(self):
        """创建带Payload字段的集合"""
        if not self.qdrant_client.collection_exists(self.collection_name):
            self.qdrant_client.create_collection(
                collection_name=self.collection_name,
                vectors_config=models.VectorParams(
                    size=self.vector_size,
                    distance=models.Distance.COSINE
                )
            )
            print(f"集合已创建: {self.collection_name}")

    def delete_file_chunks(self, file_path):
        # 创建删除过滤器
            # 创建过滤条件
            delete_filter = models.Filter(
                must=[models.FieldCondition(
                    key="file_path",
                    match=models.MatchValue(value=str(file_path))
                )]
            )
            # 执行删除操作
            result = self.qdrant_client.delete(
                collection_name=self.collection_name,
                points_selector=models.FilterSelector(
                    filter=delete_filter
                )
            )       

            # 检查删除结果
            if result.status == models.UpdateStatus.COMPLETED:
                print(f"✅ 成功删除文件 {file_path} 关联的子块")
            else:
                print(f"❌ 删除操作失败: {result.status}")


    def process_directory(self, directory_path):
        """处理目录中的所有SQL文件（父子分块）"""
        sql_files = self._find_sql_files(directory_path)
        print(f"发现 {len(sql_files)} 个sql文件")
        
        for file_path in tqdm(sql_files, desc="处理中"):
            relative_path = os.path.relpath(file_path, directory_path)
            with open(file_path, "r", encoding="utf-8") as f:
                content = f.read()           

            self.delete_file_chunks(relative_path)  # 删除文件旧的分块
            self.delete_file_chunks(file_path)  # 删除文件旧的分块             
            
            # 生成父子结构分块
            chunks = self._hierarchical_chunking(content, file_path)             
            

            self._upload_points(chunks)

    def _find_sql_files(self, directory_path):
        # 只导入 xx.sql 文件  
        return [
            f for f in glob.glob(os.path.join(directory_path, "**", "*.sql"), recursive=True) 
            if os.path.isfile(f) and ".sql" in f
        ]

    def _hierarchical_chunking(self, content, file_path):
        """基于AST的父子分块策略，增强健壮性"""
        tuple_blocks = self.sql_parser(content)
        block_structure, parent_blocks = tuple_blocks        

        chunks = []        

        file_name = os.path.basename(file_path)

        for table, children in block_structure.items():
            # f.write(f"\nTable: {table}\n")
            # f.write(f" {parent_blocks[table]}\n")
            root_doc_id = int(hashlib.sha256(table.encode()).hexdigest()[:16], 16)
            # 计算根向量（顶级父块）
            root_vector = self.encoder.encode(parent_blocks[table]).tolist()
            # 生成文档块（顶级父块）
            root_chunk = models.PointStruct(
                    id=root_doc_id,  # 父块ID
                    vector=root_vector,
                    payload={
                        "content": parent_blocks[table],  # 父块内容
                        "table_name": table,  # 表名
                        "file_path": file_path,
                        "file_name": file_name,
                        "title": file_name,
                        "heading_level": 0,  # 顶级标题
                        "type": "root",  # 标记为父块
                        "chunk_size": len(parent_blocks[table])
                    }
                ) 
            chunks.append(root_chunk)  # 添加顶级父块到分块列表

            for child in children:
                # f.write(f"  - Record Section ({len(child)} chars)\n")
                # f.write(f"{child}\n")
                # 如果child过大，超过500字符，则分块
                if len(child) > self.max_chunk_size:
                    # 分割成多个小块
                    for i in range(0, len(child), self.max_chunk_size - self.overlap):
                        # 确保不会超出边界
                        if i + self.max_chunk_size > len(child):
                            # 处理最后一个块
                            chunk_text = child[i:]
                        else:
                            chunk_text = child[i:i + self.max_chunk_size]
                        current_chunk = {"text": "", "parent_headings": [], "level": 2}  # 从2级开始，避免h1干扰
                        current_chunk["text"] = chunk_text + "\n"
                        current_chunk["parent_headings"] = [table]
                        current_chunk["table_name"] = table  # 添加表名
                        chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))
                        # 如果分块小于最小大小，则跳过
                        if len(chunk_text) < self.min_chunk_size:
                            continue

                    # 最后一个块可能小于最小大小，直接添加
                    if len(child) % (self.max_chunk_size - self.overlap) != 0:
                        last_chunk_text = child[-(self.max_chunk_size - self.overlap):]
                        if len(last_chunk_text) >= self.min_chunk_size:
                            current_chunk = {"text": "", "parent_headings": [], "level": 2}  # 从2级开始，避免h1干扰
                            current_chunk["text"] = last_chunk_text + "\n"
                            current_chunk["parent_headings"] = [table]
                            current_chunk["table_name"] = table  # 添加表名
                            chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))
                # 否则直接添加当前块
                else:
                    current_chunk = {"text": "", "parent_headings": [], "level": 2}  # 从2级开始，避免h1干扰
                    current_chunk["text"] = child + "\n"
                    current_chunk["parent_headings"] = [table]
                    current_chunk["table_name"] = table  # 添加表名
                    # 如果当前块小于最小大小，则跳过
                    chunks.append(self._finalize_chunk(current_chunk, file_path, root_doc_id))

        return chunks

    def _finalize_chunk(self, chunk_data, file_path, root_doc_id):
        """构建带父子关系的分块Payload"""
        # 添加上下文：父标题+当前块文本
        context = " > ".join(chunk_data["parent_headings"]) + "\n" + chunk_data["text"]
        vector = self.encoder.encode(context).tolist()

        file_name = os.path.basename(file_path)

        # 生成唯一ID（避免哈希冲突）
        unique_id = int(hashlib.sha256(
            f"{file_path}_{chunk_data['parent_headings']}_{chunk_data['text'][:50]}".encode()
        ).hexdigest()[:16], 16)        
        
        return models.PointStruct(
            id=unique_id,
            vector=vector,
            payload={
                "content": chunk_data["text"],
                "file_path": file_path,
                "file_name": file_name,
                "title": chunk_data["parent_headings"][-1] if chunk_data["parent_headings"] else "Untitled",
                "parent_headings": chunk_data["parent_headings"],
                "table_name": chunk_data.get("table_name", ""),  # 添加表名
                "heading_level": chunk_data["level"],
                "chunk_size": len(chunk_data["text"]),
                "root_doc_id": root_doc_id,  # 关联父块ID
                "type": "child"  # 标记为子块
            }
        )

    def _upload_points(self, points):
        """批量上传到Qdrant"""
        self.qdrant_client.upsert(
            collection_name=self.collection_name,
            points=points,
            wait=True  #同步防止数据丢失
        )

if __name__ == "__main__":
    processor = SmartSqlProcessor(
        model_name="baai/bge-base-zh-v1.5",
        max_chunk_size=200,   # 推荐值[3](@ref)
        min_chunk_size=50,
        overlap=25
    )
    processor.create_collection()
    processor.process_directory("C:\\Users\\zhang\\mall\\document\\sql")
    print("导入完成！访问控制台：http://192.168.109.128:6333/dashboard")