# text2sql_query.py
import os
import logging
import yaml
import openai
import re
from dotenv import load_dotenv
from pymilvus import MilvusClient
from pymilvus import model
from sqlalchemy import create_engine, text
from openai import OpenAI
from langchain_community.chat_models import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings


# 1. 环境与日志配置
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
load_dotenv()  # 加载 .env 环境变量

# 2. 初始化 OpenAI API（使用最新 Response API）
# openai.api_key = os.getenv("OPENAI_API_KEY")

openai_client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key=os.getenv("TONYI_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

# 建议使用新 Response API 风格
# 例如: openai.chat.completions.create(...) 而非旧的 ChatCompletion.create

MODEL_NAME = "qwen-plus"


# 3. 嵌入函数初始化
def init_embedding():
    return model.dense.OpenAIEmbeddingFunction(
        api_key=os.getenv("TONYI_API_KEY"),
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        model_name="text-embedding-v1",
    )


# 4. Milvus 客户端连接
client = MilvusClient(uri="http://172.30.132.142:19530", db_name="default")

# 检查集合是否存在，如果不存在则创建
collections = client.list_collections()
print("Collections:", collections)

# client.load_collection("ddl_knowledge_li_0922")
# client.load_collection("dbdesc_knowledge_li_0922")

# 5. 嵌入函数实例化
embedding_fn = init_embedding()

# # 6. 数据库连接（SAKILA）
# DB_URL = os.getenv(
#     "SAKILA_DB_URL", "mysql+pymysql://root:password@172.30.132.155:3306/sakila"
# )
# engine = create_engine(DB_URL)


# 7. 检索函数
def retrieve(collection: str, query_emb: list, top_k: int = 3, fields: list = None):
    results = client.search(
        collection_name=collection, data=[query_emb], limit=top_k, output_fields=fields
    )
    logging.info(f"[检索] {collection} 检索结果: {results}")
    return results[0]  # 返回第一个查询的结果列表


# 8. SQL 提取函数
def extract_sql(text: str) -> str:
    # 尝试匹配 SQL 代码块
    sql_blocks = re.findall(r"```sql\n(.*?)\n```", text, re.DOTALL)
    if sql_blocks:
        return sql_blocks[0].strip()

    # 如果没有找到代码块，尝试匹配 SELECT 语句
    select_match = re.search(r"SELECT.*?;", text, re.DOTALL)
    if select_match:
        return select_match.group(0).strip()

    # 如果都没有找到，返回原始文本
    return text.strip()


# 9. 执行 SQL 并返回结果
# def execute_sql(sql: str):
#     try:
#         with engine.connect() as conn:
#             result = conn.execute(text(sql))
#             cols = result.keys()
#             rows = result.fetchall()
#             return True, cols, rows
#     except Exception as e:
#         return False, None, str(e)


# 10. 生成 SQL 函数
def generate_sql(prompt: str, error_msg: str = None) -> str:
    if error_msg:
        prompt += f"\n之前的SQL执行失败，错误信息：{error_msg}\n请修正SQL语句："

    response = openai_client.chat.completions.create(
        model=MODEL_NAME,
        messages=[{"role": "user", "content": prompt}],
    )
    raw_sql = response.choices[0].message.content.strip()
    sql = extract_sql(raw_sql)
    logging.info(f"[生成] 原始输出: {raw_sql}")
    logging.info(f"[生成] 提取的SQL: {sql}")
    return sql


# 11. 核心流程：自然语言 -> SQL -> 执行 -> 返回
def text2sql(question: str, max_retries: int = 3):
    # 11.1 用户提问嵌入
    q_emb = embedding_fn([question])[0]
    logging.info(f"[检索] 问题嵌入完成")

    # 11.2 RAG 检索：DDL
    ddl_hits = retrieve(
        "ddl_knowledge_li_0922", q_emb.tolist(), top_k=3, fields=["ddl_text"]
    )
    logging.info(f"[检索] DDL检索结果: {ddl_hits}")
    try:

        ddl_context = "\n".join(
            hit.get("entity").get("ddl_text", "") for hit in ddl_hits
        )
        print(ddl_context)
    except Exception as e:
        logging.error(f"[检索] DDL处理错误: {e}")
        ddl_context = ""

    # 11.3 RAG 检索：示例对
    q2sql_hits = []
    logging.info(f"[检索] Q2SQL检索结果: {q2sql_hits}")
    try:
        example_context = "\n".join(
            f"NL: \"{hit.get('question', '')}\"\nSQL: \"{hit.get('sql_text', '')}\""
            for hit in q2sql_hits
        )
    except Exception as e:
        logging.error(f"[检索] Q2SQL处理错误: {e}")
        example_context = ""

    # 11.4 RAG 检索：字段描述
    desc_hits = retrieve(
        "dbdesc_knowledge_li_0922",
        q_emb.tolist(),
        top_k=5,
        fields=["table_name", "column_name", "description"],
    )
    logging.info(f"[检索] 字段描述检索结果: {desc_hits}")
    try:
        desc_context = "\n".join(
            f"{hit.get('entity').get('table_name', '')}.{hit.get('entity').get('column_name', '')}: {hit.get('entity').get('description', '')}"
            for hit in desc_hits
        )
    except Exception as e:
        logging.error(f"[检索] 字段描述处理错误: {e}")
        desc_context = ""

    # 11.5 组装基础 Prompt
    base_prompt = (
        f"### Schema Definitions:\n{ddl_context}\n"
        f"### Field Descriptions:\n{desc_context}\n"
        f"### Examples:\n{example_context}\n"
        f'### Query:\n"{question}"\n'
        "请只返回SQL语句，不要包含任何解释或说明。"
    )

    # 11.6 生成并执行 SQL，最多重试 max_retries 次
    error_msg = None
    for attempt in range(max_retries):
        logging.info(f"[执行] 第 {attempt + 1} 次尝试")

        try:
            sql = generate_sql(base_prompt, error_msg)
            logging.info(f"gen sql: {sql}")
            return sql, [
                f"### Schema Definitions:\n{ddl_context}\n",
                f"### Field Descriptions:\n{desc_context}\n",
                f"### Examples:\n{example_context}\n",
            ]
        except:
            # 生成 SQL
            logging.error(f"[执行] 第 {attempt + 1} 次执行失败: {error_msg}")
            print(f"执行失败，已达到最大重试次数 {max_retries}。")
            print("最后错误信息：", error_msg)
            return "", [
                f"### Schema Definitions:\n{ddl_context}\n",
                f"### Field Descriptions:\n{desc_context}\n",
                f"### Examples:\n{example_context}\n",
            ]


# 12. 程序入口
if __name__ == "__main__":
    import json
    import random
    from ragas import evaluate
    from ragas.metrics import (
        context_precision,
        context_recall,
        answer_relevancy,
        faithfulness,
        answer_correctness,
    )

    # 从 q2sql_pairs.json 中随机选取 20 条数据作为测评数据集
    with open("90-文档-Data/sakila/q2sql_pairs.json", "r") as f:
        q2sql_pairs = json.load(f)

    # 转换为 ragas 格式
    # eval_dataset = [
    #     {
    #         "question": pair["question"],
    #         "contexts": [""],  # 可根据实际需求填充上下文
    #         "answer": pair["sql"],  # 使用 SQL 作为答案
    #         "reference": pair["sql"],  # 添加 reference 字段，用于 context_recall 指标
    #     }
    #     for pair in random.sample(q2sql_pairs, 1)
    # ]

    # 初始化评估结果列表
    evaluation_results = []

    from datasets import Dataset

    eval_dataset = []

    for pair in random.sample(q2sql_pairs, 8):
        print(f"问题: {pair['question']}")
        # 调用 text2sql 方法生成 SQL 并执行
        sql, contexts = text2sql(pair["question"])
        if len(sql) == 0:
            continue
        eval_pair = {
            "question": pair["question"],
            "contexts": contexts,
            "answer": sql,
            "ground_truth": pair["sql"],
        }
        eval_dataset.append(eval_pair)
        # 评估生成的 SQL
        result = evaluate(
            Dataset.from_list([eval_pair]),
            llm=ChatTongyi(
                model=MODEL_NAME,
                api_key=os.getenv("TONYI_API_KEY"),
            ),
            embeddings=DashScopeEmbeddings(
                dashscope_api_key=os.getenv("TONYI_API_KEY"),
                model="text-embedding-v1",
            ),
            metrics=[
                context_recall,
                answer_relevancy,
                faithfulness,
                answer_correctness,
            ],
        )

        print(result)
        # 记录评估结果
        evaluation_results.append(
            {
                "question": pair["question"],
                "context_recall": f'{result["context_recall"][0]:.2f}',
                "answer_relevancy": f'{result["answer_relevancy"][0]:.2f}',
                "faithfulness": f'{result["faithfulness"][0]:.2f}',
                "answer_correctness": f'{result["answer_correctness"][0]:.2f}',
            }
        )

        print(evaluation_results)

    # 输出 Markdown 表格
    print("\n## 评估结果\n")
    print("| 问题 |  上下文召回率 | 答案相关性 | 答案忠实度 | 答案正确性 |")
    print(
        "|------|--------------|--------------|------------|------------|------------|"
    )
    for r in evaluation_results:
        print(
            f"| {r['question']} |  {r['context_recall']} | {r['answer_relevancy']} | {r['faithfulness']} | {r['answer_correctness']} |"
        )

    # 计算并输出整体统计
    avg_context_recall = sum(
        float(r["context_recall"]) for r in evaluation_results
    ) / len(evaluation_results)
    avg_answer_relevancy = sum(
        float(r["answer_relevancy"]) for r in evaluation_results
    ) / len(evaluation_results)
    avg_faithfulness = sum(float(r["faithfulness"]) for r in evaluation_results) / len(
        evaluation_results
    )
    avg_answer_correctness = sum(
        float(r["answer_correctness"]) for r in evaluation_results
    ) / len(evaluation_results)

    print("\n## 整体统计\n")
    print("| 指标 | 平均值 |")
    print("|------|------|")
    print(f"| 上下文召回率 | {avg_context_recall:.2f} |")
    print(f"| 答案相关性 | {avg_answer_relevancy:.2f} |")
    print(f"| 答案忠实度 | {avg_faithfulness:.2f} |")
    print(f"| 答案正确性 | {avg_answer_correctness:.2f} |")
