#!/usr/bin/env python3
"""
MD文件向量化脚本
使用 mxbai-embed-large 模型进行向量化，并将结果保存到 Milvus 中
"""

import os
import json
import re
import hashlib
from typing import List, Dict, Any, Optional
from datetime import datetime
import logging
from pathlib import Path

# 导入必要的库
try:
    import requests
    from pymilvus import (
        connections, Collection, FieldSchema, CollectionSchema, DataType,
        utility, MilvusException
    )
except ImportError:
    print("请安装必要的依赖包：")
    print("pip install pymilvus requests")
    exit(1)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class MDVectorizer:
    """MD文件向量化处理器"""
    
    def __init__(self, 
                 embedding_model: str = "mxbai-embed-large",
                 embedding_url: str = "http://localhost:11434/api/embeddings",
                 milvus_host: str = "localhost",
                 milvus_port: int = 19530,
                 collection_name: str = "md_documents"):
        """
        初始化向量化处理器
        
        Args:
            embedding_model: 嵌入模型名称
            embedding_url: 嵌入服务URL
            milvus_host: Milvus主机地址
            milvus_port: Milvus端口
            collection_name: 集合名称
        """
        self.embedding_model = embedding_model
        self.embedding_url = embedding_url
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port
        self.collection_name = collection_name
        self.collection = None
        
        # 文本分块参数
        self.chunk_size = 1000  # 每个块的字符数
        self.chunk_overlap = 200  # 块之间的重叠字符数
        
        # 连接Milvus
        self._connect_milvus()
        self._create_collection()
    
    def _connect_milvus(self):
        """连接到Milvus数据库"""
        try:
            connections.connect(
                alias="default",
                host=self.milvus_host,
                port=self.milvus_port
            )
            logger.info(f"成功连接到Milvus: {self.milvus_host}:{self.milvus_port}")
        except Exception as e:
            logger.error(f"连接Milvus失败: {e}")
            raise
    
    def _create_collection(self):
        """创建或获取Milvus集合"""
        try:
            # 检查集合是否存在
            if utility.has_collection(self.collection_name):
                logger.info(f"集合 {self.collection_name} 已存在")
                self.collection = Collection(self.collection_name)
            else:
                # 创建新集合
                fields = [
                    FieldSchema(name="id", dtype=DataType.VARCHAR, max_length=64, is_primary=True),
                    FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
                    FieldSchema(name="feature", dtype=DataType.FLOAT_VECTOR, dim=1024),  # mxbai-embed-large 的维度
                    FieldSchema(name="file_path", dtype=DataType.VARCHAR, max_length=512),
                    FieldSchema(name="file_name", dtype=DataType.VARCHAR, max_length=256),
                    FieldSchema(name="chunk_index", dtype=DataType.INT64),
                    FieldSchema(name="chunk_size", dtype=DataType.INT64),
                    FieldSchema(name="metadata", dtype=DataType.VARCHAR, max_length=2048),
                    FieldSchema(name="created_time", dtype=DataType.VARCHAR, max_length=32)
                ]
                
                schema = CollectionSchema(fields, "MD文档向量化集合")
                self.collection = Collection(self.collection_name, schema)
                
                # 创建索引
                index_params = {
                    "metric_type": "COSINE",  # 使用余弦相似度
                    "index_type": "IVF_FLAT",
                    "params": {"nlist": 128}
                }
                self.collection.create_index("feature", index_params)
                logger.info(f"成功创建集合 {self.collection_name}")
            
            # 加载集合到内存
            self.collection.load()
            
        except Exception as e:
            logger.error(f"创建集合失败: {e}")
            raise
    
    def get_embedding(self, text: str) -> List[float]:
        """
        获取文本的向量表示
        
        Args:
            text: 待向量化的文本
            
        Returns:
            向量列表
        """
        try:
            # 准备请求数据
            payload = {
                "model": self.embedding_model,
                "prompt": text
            }
            
            # 发送请求
            response = requests.post(
                self.embedding_url,
                json=payload,
                headers={"Content-Type": "application/json"},
                timeout=30
            )
            
            if response.status_code == 200:
                result = response.json()
                if "embedding" in result:
                    return result["embedding"]
                else:
                    logger.error(f"响应中没有embedding字段: {result}")
                    return None
            else:
                logger.error(f"嵌入请求失败: {response.status_code} - {response.text}")
                return None
                
        except Exception as e:
            logger.error(f"获取嵌入向量失败: {e}")
            return None
    
    def chunk_text(self, text: str) -> List[str]:
        """
        将文本分割成块
        
        Args:
            text: 原始文本
            
        Returns:
            文本块列表
        """
        if len(text) <= self.chunk_size:
            return [text]
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + self.chunk_size
            
            # 如果不是最后一块，尝试在句子边界处分割
            if end < len(text):
                # 寻找句子结束符
                sentence_end = text.rfind('。', start, end)
                if sentence_end == -1:
                    sentence_end = text.rfind('！', start, end)
                if sentence_end == -1:
                    sentence_end = text.rfind('？', start, end)
                if sentence_end == -1:
                    sentence_end = text.rfind('\n', start, end)
                
                if sentence_end != -1 and sentence_end > start:
                    end = sentence_end + 1
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            # 计算下一块的起始位置，考虑重叠
            start = end - self.chunk_overlap if end - self.chunk_overlap > start else end
        
        return chunks
    
    def extract_metadata(self, file_path: str, text: str) -> Dict[str, Any]:
        """
        从文件中提取元数据
        
        Args:
            file_path: 文件路径
            text: 文件内容
            
        Returns:
            元数据字典
        """
        metadata = {
            "file_size": len(text),
            "char_count": len(text),
            "line_count": text.count('\n'),
            "language": "zh-CN"  # 默认中文
        }
        
        # 提取Markdown标题
        headers = re.findall(r'^#+\s+(.+)$', text, re.MULTILINE)
        if headers:
            metadata["headers"] = headers[:10]  # 最多保存10个标题
            metadata["first_header"] = headers[0]
        
        # 检测是否包含特殊内容
        if "问答" in text or "Q:" in text:
            metadata["content_type"] = "qa"
        elif "表结构" in text or "字段" in text:
            metadata["content_type"] = "database_schema"
        else:
            metadata["content_type"] = "general"
        
        return metadata
    
    def process_md_file(self, file_path: str) -> bool:
        """
        处理单个MD文件
        
        Args:
            file_path: MD文件路径
            
        Returns:
            处理是否成功
        """
        try:
            logger.info(f"开始处理文件: {file_path}")
            
            # 读取文件内容
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
            
            if not content.strip():
                logger.warning(f"文件内容为空: {file_path}")
                return False
            
            # 提取元数据
            metadata = self.extract_metadata(file_path, content)
            
            # 分块处理
            chunks = self.chunk_text(content)
            logger.info(f"文件 {file_path} 分割为 {len(chunks)} 个块")
            
            # 处理每个块
            entities = []
            for i, chunk in enumerate(chunks):
                # 生成唯一ID
                chunk_id = hashlib.md5(f"{file_path}_{i}_{chunk[:100]}".encode()).hexdigest()
                
                # 获取嵌入向量
                embedding = self.get_embedding(chunk)
                if embedding is None:
                    logger.error(f"无法获取块 {i} 的嵌入向量")
                    continue
                
                # 准备数据
                entity = {
                    "id": chunk_id,
                    "text": chunk,
                    "feature": embedding,
                    "file_path": file_path,
                    "file_name": os.path.basename(file_path),
                    "chunk_index": i,
                    "chunk_size": len(chunk),
                    "metadata": json.dumps(metadata, ensure_ascii=False),
                    "created_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                }
                entities.append(entity)
                
                logger.info(f"处理块 {i+1}/{len(chunks)} - 文本长度: {len(chunk)} 字符")
            
            if entities:
                # 批量插入到Milvus
                try:
                    # 转换为列表格式
                    data = [
                        [entity["id"] for entity in entities],
                        [entity["text"] for entity in entities],
                        [entity["feature"] for entity in entities],
                        [entity["file_path"] for entity in entities],
                        [entity["file_name"] for entity in entities],
                        [entity["chunk_index"] for entity in entities],
                        [entity["chunk_size"] for entity in entities],
                        [entity["metadata"] for entity in entities],
                        [entity["created_time"] for entity in entities]
                    ]
                    
                    insert_result = self.collection.insert(data)
                    logger.info(f"成功插入 {len(entities)} 个向量到Milvus")
                    
                    # 刷新集合以确保数据持久化
                    self.collection.flush()
                    
                except Exception as e:
                    logger.error(f"插入数据到Milvus失败: {e}")
                    return False
            
            return True
            
        except Exception as e:
            logger.error(f"处理文件 {file_path} 时出错: {e}")
            return False
    
    def process_directory(self, directory_path: str) -> Dict[str, int]:
        """
        处理目录中的所有MD文件
        
        Args:
            directory_path: 目录路径
            
        Returns:
            处理结果统计
        """
        results = {"success": 0, "failed": 0, "total": 0}
        
        md_files = list(Path(directory_path).glob("**/*.md"))
        results["total"] = len(md_files)
        
        logger.info(f"找到 {len(md_files)} 个MD文件")
        
        for md_file in md_files:
            if self.process_md_file(str(md_file)):
                results["success"] += 1
            else:
                results["failed"] += 1
        
        return results
    
    def search_similar(self, query_text: str, top_k: int = 10) -> List[Dict]:
        """
        搜索相似文档
        
        Args:
            query_text: 查询文本
            top_k: 返回结果数量
            
        Returns:
            相似文档列表
        """
        try:
            # 获取查询文本的向量
            query_embedding = self.get_embedding(query_text)
            if query_embedding is None:
                logger.error("无法获取查询文本的嵌入向量")
                return []
            
            # 搜索参数
            search_params = {"metric_type": "COSINE", "params": {"nprobe": 16}}
            
            # 执行搜索
            results = self.collection.search(
                data=[query_embedding],
                anns_field="feature",
                param=search_params,
                limit=top_k,
                output_fields=["text", "file_path", "file_name", "chunk_index", "metadata"]
            )
            
            # 格式化结果
            formatted_results = []
            for hits in results:
                for hit in hits:
                    result = {
                        "id": hit.id,
                        "score": hit.score,
                        "text": hit.entity.get("text"),
                        "file_path": hit.entity.get("file_path"),
                        "file_name": hit.entity.get("file_name"),
                        "chunk_index": hit.entity.get("chunk_index"),
                        "metadata": json.loads(hit.entity.get("metadata", "{}"))
                    }
                    formatted_results.append(result)
            
            return formatted_results
            
        except Exception as e:
            logger.error(f"搜索失败: {e}")
            return []
    
    def get_collection_stats(self) -> Dict[str, Any]:
        """
        获取集合统计信息
        
        Returns:
            统计信息字典
        """
        try:
            stats = {
                "total_entities": self.collection.num_entities,
                "collection_name": self.collection_name,
                "index_type": "IVF_FLAT",
                "metric_type": "COSINE"
            }
            return stats
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}


def main():
    """主函数"""
    # 配置参数
    config = {
        "embedding_model": "mxbai-embed-large",
        "embedding_url": "http://localhost:11434/api/embeddings",  # Ollama默认地址
        "milvus_host": "localhost",
        "milvus_port": 19530,
        "collection_name": "md_documents",
        "md_directory": "/home/dockerAI/lang-rag/python-data/md_output"
    }
    
    try:
        # 初始化向量化处理器
        vectorizer = MDVectorizer(
            embedding_model=config["embedding_model"],
            embedding_url=config["embedding_url"],
            milvus_host=config["milvus_host"],
            milvus_port=config["milvus_port"],
            collection_name=config["collection_name"]
        )
        
        # 处理MD文件
        logger.info("开始处理MD文件向量化...")
        results = vectorizer.process_directory(config["md_directory"])
        
        # 输出结果
        logger.info(f"处理完成!")
        logger.info(f"总文件数: {results['total']}")
        logger.info(f"成功处理: {results['success']}")
        logger.info(f"处理失败: {results['failed']}")
        
        # 输出集合统计
        stats = vectorizer.get_collection_stats()
        logger.info(f"集合统计: {stats}")
        
        # 示例搜索
        print("\n=== 搜索示例 ===")
        query = "数据库表结构"
        results = vectorizer.search_similar(query, top_k=3)
        
        print(f"查询: {query}")
        print(f"找到 {len(results)} 个相关结果:")
        
        for i, result in enumerate(results, 1):
            print(f"\n{i}. 相似度: {result['score']:.4f}")
            print(f"   文件: {result['file_name']}")
            print(f"   文本片段: {result['text'][:200]}...")
        
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        return 1
    
    return 0


if __name__ == "__main__":
    exit(main())
