import csv
import re
import copy
import utils.configFinRAG as configFinRAG
import pandas as pd
from utils.instances import LLM, TOKENIZER
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate

# 封装读取示例数据的函数
def read_example_data():
    sql_examples_file = pd.read_csv(configFinRAG.sql_examples_path, delimiter=",", header=0)
    example_question_list = []
    example_info_list = []
    example_fa_list = []
    example_token_list = []
    for index, row in sql_examples_file.iterrows():
        question = row['问题']
        info = row['资料']
        fa = row['FA']
        example_question_list.append(question)
        example_info_list.append(info)
        example_fa_list.append(fa)
        tokens = TOKENIZER(question)['input_ids']
        example_token_list.append(tokens)
    return example_question_list, example_info_list, example_fa_list, example_token_list

# 封装生成答案的函数
def generate_answer(question, fa, llm, example_question_list, example_info_list, example_fa_list,
                    tmp_example_token_list, example_num=5):
    pattern = r'\d{8}'
    # 提取数字
    date_list = re.findall(pattern, question)
    temp_question2_for_search = re.sub(pattern, ' ', question)
    temp_tokens = TOKENIZER(temp_question2_for_search)['input_ids']

    # 计算与已有问题的相似度--使用Jaccard进行相似度计算
    similarity_list = [
        len(set(temp_tokens) & set(tokens)) / (len(set(temp_tokens)) + len(set(tokens)))
        for tokens in tmp_example_token_list
    ]

    # 求m个最大的数值及其索引
    max_index = sorted(range(len(similarity_list)), key=lambda i: similarity_list[i], reverse=True)[:example_num]

    # 防止提示语过长
    temp_length_test = ""
    short_index_list = []  # 匹配到的问题下标
    for index in max_index:
        temp_length_test += example_question_list[index] + example_fa_list[index]
        if len(temp_length_test) > 2000:
            break
        short_index_list.append(index)

    # 组装prompt
    prompt = ChatPromptTemplate.from_template(prompts.ANSWER_TEMPLATE)
    examples = '\n'.join([
        f"问题：{example_question_list[index]}\n资料：{example_info_list[index]}\n答案：{example_fa_list[index]}"
        for index in short_index_list
    ])

    chain = prompt | llm
    try:
        response = chain.invoke({"examples": examples, "FA": fa, "question": question})
        return response.content
    except Exception as e:
        print(f"生成答案时出错: {e}")
        return None

if __name__ == '__main__':
    # 第一步：读取问题和FA模板，使用tokenizer进行token化
    g_example_question_list, g_example_info_list, g_example_fa_list, g_example_token_list = read_example_data()

    # 第二步：拿到答案
    result_csv_file = pd.read_csv(configFinRAG.question_sql_check_path, delimiter=",", header=0)

    try:
        answer_file = open(configFinRAG.answer_path, 'w', newline='', encoding='utf-8-sig')
        csvwriter = csv.writer(answer_file)
        csvwriter.writerow(['问题id', '问题', '资料', 'FA'])

        # 第三步：循环问题，使用Jaccard进行相似度计算问题与模板中的问题相似度最高的几条记录
        for index, row in result_csv_file.iterrows():
            if row['flag'] == 1:
                result = generate_answer(row['问题'], row['执行结果'], LLM,
                                         g_example_question_list, g_example_info_list, g_example_fa_list,
                                         g_example_token_list)
                if result is not None:
                    csvwriter.writerow([
                        str(row['问题id']),
                        str(row['问题']),
                        str(row['执行结果']),
                        result
                    ])
    except Exception as e:
        print(f"写入答案文件时出错: {e}")
    finally:
        if 'answer_file' in locals():
            answer_file.close()