from openai import OpenAI
from dotenv import load_dotenv
import os
import numpy as np
import glob
import re
import time

# 加载环境变量
load_dotenv()

# 初始化OpenAI客户端
client = OpenAI(
    api_key=os.getenv("API_KEY"), 
    base_url=os.getenv("API_URL")
)
# 预定义关键词库（可根据需要扩展）
KEYWORDS = ["咨询", "热线", "电话", "联系", "联系方式", "联系电话", "招生热线"]

def load_documents(data_dir, max_length=500):
    """从指定目录加载文档，智能分段处理"""
    documents = []
    files = glob.glob(os.path.join(data_dir, '*.txt'))
    
    if not files:
        print(f"⚠️ 在 '{data_dir}' 目录中未找到任何txt文件")
        return documents
    
    print(f"📂 发现 {len(files)} 个文本文件")
    
    for file_path in files:
        try:
            with open(file_path, "r", encoding='utf-8', errors='ignore') as f:
                content = f.read().strip()
                if not content:
                    continue
                    
                filename = os.path.basename(file_path)
                
                # 短文档直接整体处理
                if len(content) <= max_length:
                    has_keywords = any(kw in content for kw in KEYWORDS)
                    documents.append({
                        "source": filename,
                        "content": content,
                        "has_keywords": has_keywords
                    })
                # 长文档智能分段
                else:
                    segments = smart_segment(content, max_length)
                    if not segments:
                        continue
                        
                    # 标识包含关键词的片段
                    for i, seg in enumerate(segments):
                        has_keywords = any(kw in seg for kw in KEYWORDS)
                        documents.append({
                            "source": f"{filename}@{i+1}",
                            "content": seg,
                            "has_keywords": has_keywords
                        })
        except Exception as e:
            print(f"⚠️ 加载文档出错: {os.path.basename(file_path)} - {str(e)}")
    
    # 统计包含关键词的文档数
    key_docs = sum(1 for d in documents if d['has_keywords'])
    print(f"✅ 加载完成: {len(documents)}个文本片段 ({key_docs}个包含关键词)")
    return documents

def smart_segment(text, max_length=500):
    """智能文本分段：优先保留含关键词的上下文"""
    segments = []
    
    # 首先查找关键词周围的上下文
    found_key_contexts = []
    for kw in KEYWORDS:
        if kw in text:
            start_idx = max(0, text.index(kw) - 50)
            end_idx = min(len(text), text.index(kw) + 150)
            segment = text[start_idx:end_idx]
            if segment not in found_key_contexts:
                found_key_contexts.append(segment)
    
    # 添加找到的关键词上下文
    segments.extend(found_key_contexts)
    
    # 剩余文本按自然段落分割
    remaining_text = text
    for seg in segments:
        remaining_text = remaining_text.replace(seg, "")
    
    if len(remaining_text) > max_length:
        # 按段落分割
        if '\n\n' in remaining_text:
            paras = remaining_text.split('\n\n')
            for para in paras:
                if para and len(para) <= max_length:
                    segments.append(para)
                elif para:
                    # 按句子分割
                    sentences = re.split(r'(?<=[。！？.!?])', para)
                    current = ""
                    for sent in sentences:
                        if len(current) + len(sent) > max_length:
                            segments.append(current)
                            current = sent
                        else:
                            current += sent
                    if current:
                        segments.append(current)
        else:
            # 简单长度分割
            while remaining_text:
                segments.append(remaining_text[:max_length])
                remaining_text = remaining_text[max_length:]
    
    # 过滤空片段
    segments = [seg for seg in segments if seg.strip()]
    return segments

def get_embedding(text, model="bge-m3", max_retries=3):
    """获取文本嵌入向量，带重试机制"""
    for attempt in range(max_retries):
        try:
            response = client.embeddings.create(
                model=model,
                input=[text]
            )
            return response.data[0].embedding
        except Exception as e:
            if attempt < max_retries - 1:
                print(f"⚠️ 获取嵌入失败 (重试 {attempt+1}/{max_retries}): {str(e)}")
                time.sleep(1)
            else:
                print(f"❌ 无法获取文本嵌入: {str(e)}")
                return None
    return None

def enhance_cosine_similarity(query_embed, doc_embed, doc_content):
    """增强版相似度计算：基础相似度 + 关键词权重"""
    if query_embed is None or doc_embed is None:
        return 0.0
    
    a = np.array(query_embed)
    b = np.array(doc_embed)
    norm_a = np.linalg.norm(a)
    norm_b = np.linalg.norm(b)
    
    if norm_a == 0 or norm_b == 0:
        return 0.0
    
    base_sim = np.dot(a, b) / (norm_a * norm_b)
    
    # 关键词加权
    keyword_boost = 0.0
    for kw in KEYWORDS:
        if kw in doc_content:
            count = doc_content.count(kw)
            keyword_boost += min(0.25, count * 0.1)  # 最大加0.25
    
    return min(base_sim + keyword_boost, 1.0)

def find_top_contexts(query_embedding, documents, top_k=3):
    """查找最相关的上下文片段，优先关键词文档"""
    results = []
    
    for doc in documents:
        if "embedding" not in doc or doc["embedding"] is None:
            continue
            
        sim = enhance_cosine_similarity(query_embedding, doc["embedding"], doc["content"])
        
        # 创建结果对象
        result = {
            "source": doc["source"],
            "content": doc["content"],
            "similarity": sim,
            "has_keywords": doc["has_keywords"]
        }
        
        # 当相似度大于0或包含关键词时考虑
        if sim > 0 or doc["has_keywords"]:
            results.append(result)
    
    if not results:
        return []
    
    # 优先排序：含关键词的文档排前 > 相似度高的排前
    results.sort(key=lambda x: (-x["has_keywords"], -x["similarity"]))
    return results[:top_k]

def build_quality_prompt(user_query, context_results):
    """构建高质量的提示词，过滤低相关片段"""
    prompt = f"用户查询: {user_query}\n\n"
    
    if not context_results:
        return prompt + "⚠️ 未找到与查询相关的上下文信息\n请根据您的知识回答用户查询:"
    
    prompt += "相关上下文信息(基于语义匹配):\n"
    high_relevance = []
    low_relevance = []
    
    for ctx in context_results:
        if ctx["similarity"] >= 0.5 or ctx["has_keywords"]:
            high_relevance.append(ctx)
        else:
            low_relevance.append(ctx)
    
    # 优先添加高相关片段
    for i, ctx in enumerate(high_relevance, 1):
        # 高亮显示关键词
        content = ctx["content"]
        for kw in KEYWORDS:
            if kw in content:
                content = content.replace(kw, f"【{kw}】")
                
        prompt += f"[来源: {ctx['source']}, 相关度: {ctx['similarity']:.1%}]"
        if ctx["has_keywords"]:
            prompt += " 🔑"
        prompt += "\n"
        
        # 添加带缩进的片段内容
        prompt += "> " + "\n> ".join(content.split('\n'))
        prompt += "\n\n"
    
    # 添加低相关片段摘要
    if low_relevance:
        prompt += "---\n其他参考片段:\n"
        for ctx in low_relevance[:2]:
            preview = ctx["content"][:100].replace('\n', ' ') + ("..." if len(ctx["content"]) > 100 else "")
            prompt += f"- {ctx['source']}: {preview}\n"
    
    prompt += "\n请基于以上上下文专业地回答用户查询:"
    return prompt

def main():
    # 1. 加载文档并计算嵌入
    data_dir = "data"
    print(f"\n{'='*50}")
    print(f"📚 文档加载与嵌入计算系统".center(50))
    print(f"{'='*50}\n")
    
    print(f"正在扫描 '{data_dir}' 目录...")
    documents = load_documents(data_dir)
    
    if not documents:
        print(f"⚠️ 终止: 没有可用的文档内容")
        return
    
    # 2. 计算文档嵌入
    print("\n🔄 正在计算文本嵌入(可能需要几分钟)...")
    start_time = time.time()
    
    for i, doc in enumerate(documents):
        doc["embedding"] = get_embedding(doc["content"])
        if (i+1) % 10 == 0 or i == len(documents)-1:
            elapsed = time.time() - start_time
            print(f"  ⏱️ 已完成 {i+1}/{len(documents)} 个片段 (用时: {elapsed:.1f}s)")
    
    # 3. 用户交互循环
    print(f"\n{'='*50}")
    print(f"🔍 交互查询模式".center(50))
    print(f"{'='*50}")
    print("输入查询内容并按回车键开始检索")
    print("输入 exit 或 quit 退出程序\n")
    
    while True:
        # 获取用户输入
        user_query = input("请输入查询内容: ").strip()
        
        if user_query.lower() in ['exit', 'quit']:
            print("\n🛑 程序已终止")
            break
            
        if not user_query:
            print("⚠️ 查询内容不能为空，请重新输入\n")
            continue
            
        print(f"\n⏳ 处理查询: '{user_query}'...")
            
        # 计算查询嵌入
        query_embedding = get_embedding(user_query)
        
        if query_embedding is None:
            print("❌ 无法计算查询的嵌入向量，请检查API连接\n")
            continue
        
        # 查找相关上下文
        context_results = find_top_contexts(query_embedding, documents)
        
        # 构建并输出提示词
        print("\n" + "=" * 80)
        print(f"✨ 查询结果: {user_query}")
        print("=" * 80)
        final_prompt = build_quality_prompt(user_query, context_results)
        print(final_prompt)
        
        # 诊断信息
        print("\n📊 上下文匹配报告:")
        if context_results:
            print(f"- 找到 {len(context_results)} 个相关片段")
            if context_results:
                print(f"- 最佳匹配相似度: {context_results[0]['similarity']:.1%}")
            print(f"- 包含关键词的片段: {sum(1 for c in context_results if c['has_keywords'])}")
            
            # 检查关键词存在性
            has_query_keyword = any(user_query in ctx['content'] for ctx in context_results)
            print(f"- 上下文是否包含查询词: {'是' if has_query_keyword else '否'}")
        else:
            print("⚠️ 未找到任何相关上下文")
            print("可能原因: ")
            print("1. 文档中缺少相关信息")
            print("2. 嵌入计算或相似度匹配失败")
        
        print("\n" + "=" * 80)
        print("输入下一个查询或输入 exit 退出\n")

if __name__ == "__main__":
    main()