"""
optimize_rag_demo_qwen.py

功能：
- 构建一个使用 SiliconFlow 平台（Qwen + BGE Embedding）的 RAG 系统
- 使用 LLM 作为 judge，为生成回答打分（0-5）
- 使用 Optuna (TPESampler) 进行贝叶斯式超参数优化
"""

import os
import csv
import random
import json
import optuna
import numpy as np
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Tuple, Optional

from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain.schema import Document


# ========== 全局配置 ==========
API_BASE = "https://api.siliconflow.cn/v1"
API_KEY = "sk-pdfifkpjdlxvyvgkerbluaotktpznsmpbcvskjauotenxgvz"  # 你的 key
MODEL_NAME = "Qwen/Qwen3-8B"

RANDOM_SEED = 42
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)

RUN_LOG_PATH = Path(__file__).with_name("rag_run_history.csv")
RUN_LOG_FIELDS = ["timestamp", "stage", "trial_number", "temperature", "top_k", "avg_score"]


def append_run_log(stage: str, params: Dict[str, float], result: float, trial_number: Optional[int] = None) -> None:
    RUN_LOG_PATH.parent.mkdir(parents=True, exist_ok=True)
    row = {
        "timestamp": datetime.utcnow().isoformat(),
        "stage": stage,
        "trial_number": trial_number if trial_number is not None else "",
        "temperature": params.get("temperature"),
        "top_k": params.get("top_k"),
        "avg_score": result,
    }
    is_new_file = not RUN_LOG_PATH.exists()
    with RUN_LOG_PATH.open("a", newline="", encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=RUN_LOG_FIELDS)
        if is_new_file:
            writer.writeheader()
        writer.writerow(row)


# ========== 构造语料 ==========
def build_sample_corpus() -> List[Document]:
    docs = [
        Document(page_content="LangChain 是一个构建语言模型应用的框架，支持链式调用、agents、memory 等功能。", metadata={"source": "doc1"}),
        Document(page_content="RAG 指的是检索增强生成，通过检索知识库来辅助生成更加准确和可控的答案。", metadata={"source": "doc2"}),
        Document(page_content="Embeddings 将文本映射为向量，以便在向量数据库中通过相似度进行检索。", metadata={"source": "doc3"}),
        Document(page_content="FAISS 是一个高效的相似检索库，常用于向量检索和相似度搜索。", metadata={"source": "doc4"}),
        Document(page_content="Optuna 是一个用于超参数优化的库，支持 TPE、CMA-ES、网格搜索等算法。", metadata={"source": "doc5"}),
    ]
    return docs


# ========== 向量数据库 ==========
def create_vectorstore(docs: List[Document]):
    embeddings = OpenAIEmbeddings(
        model="BAAI/bge-large-zh-v1.5",
        openai_api_key=API_KEY,
        openai_api_base=API_BASE
    )
    texts = [d.page_content for d in docs]
    metadatas = [d.metadata for d in docs]
    vectorstore = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
    return vectorstore


# ========== 手动 RAG 组件 ==========
ANSWER_PROMPT_TEMPLATE = PromptTemplate(
    input_variables=["context", "question"],
    template=(
        "你是一位严谨的知识问答助手。以下是检索到的参考资料：\n"
        "{context}\n\n"
        "请根据参考资料回答问题：{question}\n"
        "请直接给出回答："
    )
)


class ManualRAG:
    def __init__(self, vectorstore: FAISS, llm: ChatOpenAI, top_k: int):
        self.vectorstore = vectorstore
        self.llm = llm
        self.top_k = top_k

    def retrieve(self, question: str) -> List[Document]:
        return self.vectorstore.similarity_search(question, k=self.top_k)

    @staticmethod
    def _format_docs(docs: List[Document]) -> str:
        if not docs:
            return "（未检索到相关文档）"
        formatted = []
        for idx, doc in enumerate(docs, start=1):
            source = doc.metadata.get("source", "unknown")
            formatted.append(f"[{idx}] 来源: {source}\n{doc.page_content}")
        return "\n\n".join(formatted)

    def generate(self, question: str, docs: List[Document]) -> Tuple[str, object]:
        context = self._format_docs(docs)
        prompt = ANSWER_PROMPT_TEMPLATE.format(context=context, question=question)
        response = self.llm.invoke(prompt)
        text = response.content.strip() if hasattr(response, "content") else str(response).strip()
        return text, response

    def run(self, question: str) -> Tuple[str, List[Document], object]:
        docs = self.retrieve(question)
        answer, raw_response = self.generate(question, docs)
        return answer, docs, raw_response


def build_rag_chain(vectorstore: FAISS, llm_temperature=0.0, top_k=3) -> ManualRAG:
    llm = ChatOpenAI(
        openai_api_base=API_BASE,
        openai_api_key=API_KEY,
        model_name=MODEL_NAME,
        temperature=llm_temperature,
    )
    return ManualRAG(vectorstore=vectorstore, llm=llm, top_k=top_k)


# ========== Judge 模块 ==========
def llm_judge_score(question: str, reference: str, generated: str) -> float:
    judge_llm = ChatOpenAI(
        openai_api_base=API_BASE,
        openai_api_key=API_KEY,
        model_name=MODEL_NAME,
        temperature=0.0,
    )

    system_prompt = (
        "你是一个专业的回答质量评估员。请根据题目、参考答案和模型生成答案，"
        "从 0 到 5 为生成答案打分，标准包括正确性、相关性和简洁性。"
        "仅返回 JSON 格式，如 {\"score\": 4, \"reason\": \"回答较为准确，但略显冗长。\"}"
    )

    user_prompt = f"""
问题：
{question}

参考答案：
{reference}

模型回答：
{generated}

请评分。
"""

    # 拼接成单条输入
    full_prompt = system_prompt + "\n\n" + user_prompt

    resp = judge_llm.invoke(full_prompt)
    try:
        judge_response_struct = resp.dict()
    except AttributeError:
        judge_response_struct = {"raw_response": repr(resp)}
    print("Judge LLM 返回结构:", judge_response_struct)
    text = resp.content.strip() if hasattr(resp, "content") else str(resp).strip()

    try:
        j = json.loads(text)
        score = float(j.get("score", 0))
        return max(0.0, min(1.0, score / 5.0))
    except Exception:
        import re
        m = re.search(r"([0-5])", text)
        if m:
            return int(m.group(1)) / 5.0
        return 0.0


# ========== 评估函数 ==========
def evaluate_rag_on_dataset(rag_chain: ManualRAG, dataset: List[Tuple[str, str]]) -> float:
    scores = []
    for question, reference in dataset:
        generated, retrieved_docs, raw_llm_response = rag_chain.run(question)
        print("RAG 召回文档:", [
            {"source": doc.metadata.get("source"), "content": doc.page_content}
            for doc in retrieved_docs
        ])
        try:
            rag_llm_struct = raw_llm_response.dict()
        except AttributeError:
            rag_llm_struct = {"raw_response": repr(raw_llm_response)}
        print("RAG LLM 返回结构:", rag_llm_struct)
        print("RAG 生成返回结果:", {"question": question, "generated": generated})
        score = llm_judge_score(question, reference, generated)
        scores.append(score)
    return float(np.mean(scores)) if scores else 0.0


# ========== 验证集 ==========
def build_validation_dataset():
    dataset = [
        ("什么是 RAG？", "RAG 是检索增强生成，通过检索外部知识并用生成模型结合检索到的信息生成答案。"),
        ("LangChain 的主要用途是什么？", "LangChain 用于构建基于 LLM 的应用，提供链式调用、memory、agents 等工具。"),
        ("FAISS 是用来做什么的？", "FAISS 是 Facebook 提供的一个高效向量相似检索库。"),
    ]
    return dataset


# ========== Optuna 优化目标 ==========
def objective(trial: optuna.trial.Trial, vectorstore: FAISS, validate_dataset: List[Tuple[str, str]]):
    top_k = trial.suggest_int("top_k", 1, 5)
    temperature = trial.suggest_float("temperature", 0.0, 1.0)

    rag_chain = build_rag_chain(vectorstore, llm_temperature=temperature, top_k=top_k)
    avg_score = evaluate_rag_on_dataset(rag_chain, validate_dataset)
    append_run_log(
        stage="optuna_trial",
        params={"temperature": temperature, "top_k": top_k},
        result=avg_score,
        trial_number=trial.number,
    )
    return avg_score


# ========== 主流程 ==========
def main():
    print("开始构建语料与向量库...")
    docs = build_sample_corpus()
    vectorstore = create_vectorstore(docs)
    validate_dataset = build_validation_dataset()

    print("开始 Optuna 超参优化...")
    study = optuna.create_study(direction="maximize", sampler=optuna.samplers.TPESampler(seed=RANDOM_SEED))
    for _ in tqdm(range(5)):  # 可改大
        study.optimize(lambda trial: objective(trial, vectorstore, validate_dataset),
                       n_trials=1, show_progress_bar=False)

    print("优化完成。最佳参数：", study.best_trial.params)
    print("最佳平均得分：", study.best_value)

    best_params = study.best_trial.params
    final_temperature = best_params.get("temperature", 0.0)
    final_top_k = best_params.get("top_k", 3)
    final_rag = build_rag_chain(vectorstore,
                                llm_temperature=final_temperature,
                                top_k=final_top_k)
    final_score = evaluate_rag_on_dataset(final_rag, validate_dataset)
    print("最终模型得分：", final_score)
    append_run_log(
        stage="final_evaluation",
        params={"temperature": final_temperature, "top_k": final_top_k},
        result=final_score,
        trial_number=None,
    )


if __name__ == "__main__":
    main()
