# 人工智能NLP-Agent数字人项目-04-基金数据问答任务工单V1.1-2.13
import csv
import re
import copy
import pandas as pd
from utils.instances import TOKENIZER, LLM
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
import utils.configFinRAG as configFinRAG


def extract_dates(question):
    """
    从问题中提取日期（格式为8位数字）
    :param question: 输入的问题
    :return: 日期列表
    """
    pattern = r'\d{8}'
    return re.findall(pattern, question)


def replace_dates(question, date_list):
    """
    将问题中的日期替换为空格
    :param question: 输入的问题
    :param date_list: 日期列表
    :return: 替换后的问题
    """
    temp_question = question
    for date in date_list:
        temp_question = temp_question.replace(date, ' ')
    return temp_question


def calculate_jaccard_similarity(token1, token2):
    """
    计算两个token列表的Jaccard相似度
    :param token1: 第一个token列表
    :param token2: 第二个token列表
    :return: Jaccard相似度
    """
    intersection = len(set(token1) & set(token2))
    union = len(set(token1)) + len(set(token2))
    return intersection / union if union != 0 else 0


def find_top_similar_indices(similarity_list, example_num):
    """
    找到相似度列表中前example_num个最大相似度的索引
    :param similarity_list: 相似度列表
    :param example_num: 要找的最大相似度的数量
    :return: 前example_num个最大相似度的索引列表
    """
    t = copy.deepcopy(similarity_list)
    max_index = []
    for _ in range(example_num):
        if not t:
            break
        number = max(t)
        index = t.index(number)
        t[index] = 0
        max_index.append(index)
    return max_index


def generate_examples(example_question_list, example_sql_list, short_index_list):
    """
    生成示例字符串
    :param example_question_list: 示例问题列表
    :param example_sql_list: 示例SQL列表
    :param short_index_list: 匹配到的问题下标列表
    :return: 示例字符串
    """
    examples = ''
    for index in short_index_list:
        examples += f"问题：{example_question_list[index]}\n"
        examples += f"SQL：{example_sql_list[index]}\n"
    return examples


def extract_sql(response):
    """
    从响应中提取SQL语句
    :param response: 响应内容
    :return: 提取的SQL语句，如果未找到则返回None
    """
    sql_pattern_start = '```sql'
    sql_pattern_end = '```'
    start_index = response.find(sql_pattern_start) + len(sql_pattern_start)
    end_index = -1
    if start_index >= 0:
        end_index = response[start_index:].find(sql_pattern_end) + start_index
    if start_index < end_index:
        return response[start_index:end_index]
    return None


def generate_sql(question, llm, example_question_list, example_sql_list, example_token_list, example_num=5):
    """
    根据问题生成SQL语句
    :param question: 输入的问题
    :param llm: 大语言模型
    :param example_question_list: 示例问题列表
    :param example_sql_list: 示例SQL列表
    :param example_token_list: 示例问题的token列表
    :param example_num: 要找的最大相似度的数量
    :return: 生成的prompt和SQL语句，如果出错则返回"error"
    """
    date_list = extract_dates(question)
    temp_question = replace_dates(question, date_list)
    temp_tokens = TOKENIZER(temp_question)['input_ids']

    similarity_list = [calculate_jaccard_similarity(temp_tokens, tokens) for tokens in example_token_list]
    max_index = find_top_similar_indices(similarity_list, example_num)

    temp_length_test = ""
    short_index_list = []
    for index in max_index:
        temp_length_test += example_question_list[index] + example_sql_list[index]
        if len(temp_length_test) > 2000:
            break
        short_index_list.append(index)

    prompt = ChatPromptTemplate.from_template(prompts.GENERATE_SQL_TEMPLATE)
    examples = generate_examples(example_question_list, example_sql_list, short_index_list)

    chain = prompt | llm
    response = chain.invoke({"examples": examples, "table_info": prompts.TABLE_INFO, "question": question})

    sql = extract_sql(response.content)
    if sql is not None:
        return prompt.invoke({"examples": examples, "table_info": prompts.TABLE_INFO, "question": question}), sql
    else:
        print(f"generate sql error: {question}")
        return "error", "error"


def main():
    try:
        # 第一步：读取问题和SQL模板，使用tokenizer进行token化
        sql_examples_file = pd.read_csv(configFinRAG.sql_examples_path, delimiter=",", header=0)
        example_question_list = []
        example_sql_list = []
        example_token_list = []
        for _, row in sql_examples_file.iterrows():
            example_question_list.append(row['问题'])
            example_sql_list.append(row['SQL'])
            tokens = TOKENIZER(row['问题'])['input_ids']
            example_token_list.append(tokens)

        # 第二步：测试问题及结果文件
        question_csv_file = pd.read_csv(configFinRAG.question_classify_path, delimiter=",", header=0)

        with open(configFinRAG.question_sql_path, 'w', newline='', encoding='utf-8-sig') as question_sql_file:
            csvwriter = csv.writer(question_sql_file)
            csvwriter.writerow(['问题id', '问题', 'SQL', 'prompt'])

            # 第三步：循环问题，使用Jaccard进行相似度计算问题与模板中的问题相似度最高的几条记录
            for _, row in question_csv_file.iterrows():
                if row['分类'] == '查询数据库':
                    result_prompt, result = generate_sql(
                        row['问题'], LLM, example_question_list, example_sql_list, example_token_list
                    )
                    csvwriter.writerow([row['问题id'], row['问题'], result, result_prompt])
                else:
                    print(f"pass question: {row['问题']}")
    except FileNotFoundError as e:
        print(f"文件未找到: {e.filename}")
    except Exception as e:
        print(f"发生未知错误: {e}")


if __name__ == '__main__':
    main()