# app.py
import time
import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
import gradio as gr

# === 线程设置（保持不变）===
os.environ["OMP_NUM_THREADS"] = "6"
os.environ["OPENBLAS_NUM_THREADS"] = "6"
os.environ["MKL_NUM_THREADS"] = "6"
os.environ["VECLIB_MAXIMUM_THREADS"] = "6"
os.environ["NUMEXPR_NUM_THREADS"] = "6"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.set_num_threads(6)

# === 自定义 LLM 包装器（增加日志）===
class Phi3LLM:
    def __init__(self, model_path):
        print("⏳ 正在加载 tokenizer...")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        print("⏳ 正在加载模型...")
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            device_map="auto",
            torch_dtype=torch.float32,
            trust_remote_code=True,
            low_cpu_mem_usage=True
        )
        print(f"✅ 模型加载到设备: {next(self.model.parameters()).device}")
        self.model.eval()

    def __call__(self, user_prompt: str) -> str:
        # 🧠 系统指令（固定）
        system_message = (
            "你是一个小助理，请根据提供的本地向量库上下文回答问题。"
            "如果搜到相关的内容你就根据内容回答，如果搜不到就自己回答。"
        )

        # 构造标准聊天消息
        messages = [
            {"role": "system", "content": system_message},
            {"role": "user", "content": user_prompt}
        ]

        try:
            # 应用 Phi-3 官方聊天模板
            full_prompt = self.tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True  # 添加 <|assistant|> 等生成提示
            )
        except Exception as e:
            print(f"❌ apply_chat_template 出错: {e}")
            # 降级：直接拼接（不推荐，仅兜底）
            full_prompt = f"<|system|>\n{system_message}\n<|user|>\n{user_prompt}\n<|assistant|>\n"

        print("\n🧠 [LLM 最终输入 Prompt]:")
        print("=" * 60)
        print(full_prompt)
        print("=" * 60)
        print(f"📊 Prompt 总长度: {len(full_prompt)} 字符")

        # Tokenize
        start_tok = time.time()
        try:
            inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device)
            tok_time = time.time() - start_tok
            print(f"⏱️ Tokenize 耗时: {tok_time:.2f} 秒 | Token 数: {inputs['input_ids'].shape[1]}")
        except Exception as e:
            print(f"❌ Tokenize 失败: {e}")
            raise

        # Generate
        start_gen = time.time()
        try:
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=32,  # 足够回答“艾丽斯”
                    do_sample=False,
                    pad_token_id=self.tokenizer.eos_token_id,
                    eos_token_id=self.tokenizer.eos_token_id,
                    early_stopping=True
                )
            gen_time = time.time() - start_gen
        except Exception as e:
            print(f"❌ 模型生成失败: {e}")
            raise

        # Decode
        try:
            generated_tokens = outputs[0][inputs['input_ids'].shape[1]:]
            raw_response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
            print(f"⏱️ 模型生成耗时: {gen_time:.2f} 秒 | 生成 Token 数: {len(generated_tokens)}")
            print("🤖 [LLM 原始输出]:")
            print(repr(raw_response))
            return raw_response
        except Exception as e:
            print(f"❌ 解码失败: {e}")
            return "❌ 解码错误"

# === 加载组件 ===
print("🔄 正在加载模型和知识库...")

llm = Phi3LLM("./models/LLM-Research/Phi-3-mini-4k-instruct")

embedding = HuggingFaceEmbeddings(
    model_name="./models/AI-ModelScope/bge-small-zh-v1___5",
    model_kwargs={'device': 'cpu'},
    encode_kwargs={'normalize_embeddings': True}
)

vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embedding)

retriever = vectorstore.as_retriever(
    search_type="similarity_score_threshold",
    search_kwargs={"k": 3, "score_threshold": 0.3}
)

# === 构建 RAG 链 ===
def format_docs(docs):
    print(f"\n📄 检索返回 {len(docs)} 个文档片段")
    for i, doc in enumerate(docs):
        score = getattr(doc, 'score', None)
        score_str = f"{float(score):.3f}" if score is not None else "N/A"
        print(f"  [{i+1}] 相似度: {score_str} | 长度: {len(doc.page_content)} 字")
        print(f"      预览: {doc.page_content[:100]}...")

    combined = "\n\n".join(doc.page_content for doc in docs[:2])
    truncated = combined[:600]
    print(f"\n✂️  经 format_docs 处理后上下文长度: {len(truncated)} 字")
    return truncated

# ✅ 简化 prompt：只保留上下文和问题（系统指令移到 LLM 内部）
prompt_template = """
上下文：
{context}

问题：{question}
"""
prompt = PromptTemplate.from_template(prompt_template)

rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | (lambda x: llm(x.text))
    | StrOutputParser()
)

# === 问答函数（增强日志）===
def ask_question(history, question):
    if not question.strip():
        return history, ""
    print("\n" + "="*60)
    print(f"❓ 用户问题: {question}")
    print("="*60)
    
    history.append([question, "思考中..."])
    
    # 手动分阶段计时
    t0 = time.time()
    docs = retriever.invoke(question)
    t1 = time.time()
    context = format_docs(docs)
    t2 = time.time()
    
    full_prompt = prompt.format(context=context, question=question)
    t3 = time.time()
    
    try:
        answer = llm(full_prompt)
        t4 = time.time()
        
        total = t4 - t0
        print("\n📈 阶段耗时统计:")
        print(f"  检索:     {t1 - t0:.2f} 秒")
        print(f"  格式化:   {t2 - t1:.2f} 秒")
        print(f"  Prompt组装:{t3 - t2:.2f} 秒")
        print(f"  LLM生成:   {t4 - t3:.2f} 秒")
        print(f"  总计:     {total:.2f} 秒")
        
        answer_with_time = f"{answer}\n\n⏱️ 耗时：{total:.1f} 秒"
        history[-1][1] = answer_with_time
    except Exception as e:
        print(f"❌ 异常: {str(e)}")
        history[-1][1] = f"❌ 出错: {str(e)}"
    
    return history, ""

# === Gradio UI（保持不变）===
print("🚀 启动 Web 界面...")

with gr.Blocks(title="私有小说问答助手") as demo:
    gr.Markdown("## 📱 私有小说问答助手")
    gr.Markdown("基于你的 170 万字小说构建，本地问答，数据不出内网")
    
    chatbot = gr.Chatbot(height=500, bubble_full_width=False, show_label=False)
    
    with gr.Row():
        msg = gr.Textbox(lines=2, placeholder="例如：林雷的初恋是谁？", show_label=False, container=False)
        submit_btn = gr.Button("发送", scale=0)
    
    gr.Examples(
        examples=[
            ["林雷的初恋是谁？"],
            ["迪莉娅是什么种族？"],
            ["巴鲁克家族有哪些成员？"]
        ],
        inputs=msg
    )
    
    clear_btn = gr.ClearButton([msg, chatbot], value="清空对话")

    msg.submit(fn=ask_question, inputs=[chatbot, msg], outputs=[chatbot, msg], queue=False)
    submit_btn.click(fn=ask_question, inputs=[chatbot, msg], outputs=[chatbot, msg], queue=False)

demo.launch(server_name="127.0.0.1", server_port=7860, inbrowser=True)