#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import re
import pickle
import numpy as np
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

try:
    from sentence_transformers import SentenceTransformer
    import faiss
    SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
    SENTENCE_TRANSFORMERS_AVAILABLE = False
    print("⚠️ 请先安装依赖: pip install sentence-transformers faiss-cpu")

def build_rag_index():
    """构建RAG索引"""
    
    if not SENTENCE_TRANSFORMERS_AVAILABLE:
        print("❌ 缺少必要依赖，无法构建索引")
        return False
    
    print("=== RAG索引构建器 ===")
    
    # 1. 读取markdown文档
    md_file = "config/自动测试用例生成格式规范.md"
    if not os.path.exists(md_file):
        print(f"❌ Markdown文件不存在: {md_file}")
        return False
    
    print(f"📖 正在读取文档: {md_file}")
    with open(md_file, 'r', encoding='utf-8') as f:
        content = f.read()
    
    # 2. 文档分段
    print("🔪 正在分段文档...")
    chunks = segment_document(content)
    print(f"✅ 成功分段，共 {len(chunks)} 个片段")
    
    # 3. 向量化
    print("🔢 正在向量化...")
    
    # 尝试使用本地模型路径
    model_path = "./models/sentence-transformers"
    if os.path.exists(model_path):
        print(f"🔍 使用本地模型: {model_path}")
        model = SentenceTransformer(model_path)
    else:
        print("🔍 使用在线模型: paraphrase-multilingual-MiniLM-L12-v2")
        model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
    
    embeddings = model.encode(chunks, show_progress_bar=True)
    print(f"✅ 向量化完成，维度: {embeddings.shape}")
    
    # 4. 构建faiss索引
    print("🔍 正在构建faiss索引...")
    index = faiss.IndexFlatL2(embeddings.shape[1])
    index.add(np.array(embeddings).astype('float32'))
    print(f"✅ 索引构建完成，包含 {index.ntotal} 个向量")
    
    # 5. 保存索引和分段
    print("💾 正在保存索引...")
    
    # 确保data目录存在
    os.makedirs("data", exist_ok=True)
    
    # 保存分段
    with open('data/rag_chunks.pkl', 'wb') as f:
        pickle.dump(chunks, f)
    
    # 保存索引
    faiss.write_index(index, 'data/rag_index.faiss')
    
    print("✅ RAG索引构建完成！")
    print("📁 保存的文件:")
    print("  - data/rag_chunks.pkl (文档分段)")
    print("  - data/rag_index.faiss (向量索引)")
    
    # 显示分段预览
    print(f"\n📝 分段预览 (前3个):")
    for i, chunk in enumerate(chunks[:3]):
        print(f"  片段 {i+1}: {chunk[:100]}...")
    
    return True

def segment_document(content):
    """文档分段"""
    chunks = []
    
    # 按"2.x 字段名"分段
    segments = re.split(r'(2\.\d+\s+\w+)', content)
    
    for i in range(1, len(segments), 2):
        if i + 1 < len(segments):
            title = segments[i].strip()
            body = segments[i + 1].strip()
            
            # 清理body，移除多余的空行和格式
            body = re.sub(r'\n\s*\n', '\n', body)
            body = body.strip()
            
            if title and body:
                chunk = f"{title}\n{body}"
                chunks.append(chunk)
    
    # 如果没有按预期分段，尝试其他方法
    if not chunks:
        print("⚠️ 按字段分段失败，尝试按段落分段...")
        # 按段落分段
        paragraphs = re.split(r'\n\s*\n', content)
        for para in paragraphs:
            if para.strip() and len(para.strip()) > 50:  # 过滤太短的段落
                chunks.append(para.strip())
    
    return chunks

def build_keyword_index(chunks):
    """构建关键词索引"""
    keyword_index = {}
    
    # 定义字段类型关键词 - 包含文档中定义的所有12个字段
    field_keywords = {
        'text': ['text', '文本', '字符串', '字符'],
        'bigint': ['bigint', '长整型', '整数', '数字'],
        'number': ['number', '数字', '数值', '浮点数'],
        'date': ['date', '日期', '时间'],
        'datetime': ['datetime', '日期时间', '时间戳'],
        'option': ['option', '选项', '枚举', '选择'],
        'boolean': ['boolean', '布尔', '真假', '逻辑'],
        'richText': ['richText', '富文本', '文本'],
        'multilingual': ['multilingual', '多语言', '国际化'],
        'mobileNumber': ['mobileNumber', '手机号', '电话', 'mobile', 'phone'],
        'email': ['email', '邮箱', '邮件', '电子邮箱'],
        'decimal': ['decimal', '小数', '浮点数', '精度', '定点数']
    }
    
    # 为每个chunk建立关键词映射
    for i, chunk in enumerate(chunks):
        chunk_lower = chunk.lower()
        
        # 检查每个字段类型的关键词
        for field_type, keywords in field_keywords.items():
            for keyword in keywords:
                if keyword.lower() in chunk_lower:
                    if field_type not in keyword_index:
                        keyword_index[field_type] = []
                    keyword_index[field_type].append({
                        'chunk_index': i,
                        'keyword': keyword,
                        'content': chunk
                    })
    
    return keyword_index

def test_retrieval():
    """测试检索功能"""
    if not os.path.exists('data/rag_chunks.pkl') or not os.path.exists('data/rag_index.faiss'):
        print("❌ 索引文件不存在，请先运行 build_rag_index()")
        return
    
    print("\n=== 检索测试 ===")
    
    # 加载索引
    with open('data/rag_chunks.pkl', 'rb') as f:
        chunks = pickle.load(f)
    index = faiss.read_index('data/rag_index.faiss')
    
    # 尝试使用本地模型路径
    model_path = "./models/sentence-transformers"
    if os.path.exists(model_path):
        print(f"🔍 使用本地模型: {model_path}")
        model = SentenceTransformer(model_path)
    else:
        print("🔍 使用在线模型: paraphrase-multilingual-MiniLM-L12-v2")
        model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
    
    # 测试查询
    test_queries = [
        "text字段的格式限制",
        "bigint字段的数值范围",
        "email字段的格式要求",
        "mobile_number字段的结构"
    ]
    
    for query in test_queries:
        print(f"\n🔍 查询: {query}")
        
        # 向量化查询
        q_emb = model.encode([query])
        
        # 检索
        D, I = index.search(np.array(q_emb).astype('float32'), 2)
        
        print("📄 检索结果:")
        for i, (dist, idx) in enumerate(zip(D[0], I[0])):
            chunk = chunks[idx]
            print(f"  {i+1}. 相似度: {1-dist:.3f}")
            print(f"     内容: {chunk[:100]}...")

def main():
    """主函数"""
    print("=" * 60)
    print("RAG索引构建器")
    print("=" * 60)
    
    # 构建索引
    success = build_rag_index()
    
    if success:
        # 测试检索
        test_retrieval()
    
    print("\n🎉 完成！")

if __name__ == "__main__":
    main() 