# 人工智能NLP-Agent数字人项目-04-基金数据问答任务工单V1.1-2.14
import csv
import re
import copy
import utils.configFinRAG as configFinRAG
import pandas as pd

from utils.instances import LLM, TOKENIZER
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate

# 定义常量
PATTERN_DATE = r'\d{8}'
MAX_PROMPT_LENGTH = 2000
EXAMPLE_NUM = 5

def preprocess_question(question):
    """
    预处理问题，提取日期并替换为空格，然后进行token化
    """
    # 提取数字
    date_list = re.findall(PATTERN_DATE, question)
    temp_question = question
    # 将数字都替换为空格
    for t_date in date_list:
        temp_question = temp_question.replace(t_date, ' ')
    # 进行token化
    temp_tokens = TOKENIZER(temp_question)
    return temp_tokens['input_ids']

def calculate_similarity(temp_tokens, example_token_list):
    """
    计算与已有问题的相似度，使用Jaccard进行相似度计算
    """
    similarity_list = []
    for example_tokens in example_token_list:
        intersection = len(set(temp_tokens) & set(example_tokens))
        union = len(set(temp_tokens)) + len(set(example_tokens))
        similarity = intersection / union if union != 0 else 0
        similarity_list.append(similarity)
    return similarity_list

def find_max_similar_indices(similarity_list, example_num):
    """
    求m个最大的数值及其索引
    """
    t = copy.deepcopy(similarity_list)
    max_index = []
    for _ in range(example_num):
        if not t:
            break
        number = max(t)
        index = t.index(number)
        t[index] = 0
        max_index.append(index)
    return max_index

def filter_indices_by_length(example_question_list, example_fa_list, max_index):
    """
    防止提示语过长，过滤掉过长的提示语对应的索引
    """
    temp_length_test = ""
    short_index_list = []
    for index in max_index:
        temp_length_test += example_question_list[index]
        temp_length_test += example_fa_list[index]
        if len(temp_length_test) > MAX_PROMPT_LENGTH:
            break
        short_index_list.append(index)
    return short_index_list

def assemble_prompt(example_question_list, example_info_list, example_fa_list, short_index_list):
    """
    组装prompt
    """
    examples = ''
    for index in short_index_list:
        examples += f"问题：{example_question_list[index]}\n"
        examples += f"资料：{example_info_list[index]}\n"
        examples += f"答案：{example_fa_list[index]}\n"
    return examples

def generate_answer(question, fa, llm, example_question_list, example_info_list, example_fa_list,
                    example_token_list, example_num=EXAMPLE_NUM):
    """
    生成答案
    """
    # 预处理问题
    temp_tokens = preprocess_question(question)

    # 计算与已有问题的相似度
    similarity_list = calculate_similarity(temp_tokens, example_token_list)

    # 求与第X个问题相似的问题
    max_index = find_max_similar_indices(similarity_list, example_num)

    # 防止提示语过长
    short_index_list = filter_indices_by_length(example_question_list, example_fa_list, max_index)

    # 组装prompt
    examples = assemble_prompt(example_question_list, example_info_list, example_fa_list, short_index_list)

    # 构建prompt模板
    prompt = ChatPromptTemplate.from_template(prompts.ANSWER_TEMPLATE)
    chain = prompt | llm
    response = chain.invoke({"examples": examples, "FA": fa, "question": question})

    return response.content

def read_examples():
    """
    读取问题和FA模板，使用tokenizer进行token化
    """
    try:
        sql_examples_file = pd.read_csv(configFinRAG.sql_examples_path, delimiter=",", header=0)
        example_question_list = []
        example_info_list = []
        example_fa_list = []
        example_token_list = []
        for _, row in sql_examples_file.iterrows():
            question = row['问题']
            info = row['资料']
            fa = row['FA']
            example_question_list.append(question)
            example_info_list.append(info)
            example_fa_list.append(fa)
            tokens = TOKENIZER(question)
            example_token_list.append(tokens['input_ids'])
        return example_question_list, example_info_list, example_fa_list, example_token_list
    except FileNotFoundError:
        print(f"未找到文件: {configFinRAG.sql_examples_path}")
        return [], [], [], []
    except Exception as e:
        print(f"读取文件时发生错误: {e}")
        return [], [], [], []

def process_questions(example_question_list, example_info_list, example_fa_list, example_token_list):
    """
    循环问题，使用Jaccard进行相似度计算问题与模板中的问题相似度最高的几条记录
    """
    try:
        result_csv_file = pd.read_csv(configFinRAG.question_sql_check_path, delimiter=",", header=0)
        with open(configFinRAG.answer_path, 'w', newline='', encoding='utf-8-sig') as answer_file:
            csvwriter = csv.writer(answer_file)
            csvwriter.writerow(['问题id', '问题', '资料', 'FA'])
            for _, row in result_csv_file.iterrows():
                if row['flag'] == 1:
                    result = generate_answer(row['问题'], row['执行结果'], LLM,
                                             example_question_list, example_info_list, example_fa_list,
                                             example_token_list)
                    csvwriter.writerow([str(row['问题id']),
                                        str(row['问题']),
                                        str(row['执行结果']),
                                        result])
    except FileNotFoundError:
        print(f"未找到文件: {configFinRAG.question_sql_check_path}")
    except Exception as e:
        print(f"处理问题时发生错误: {e}")

if __name__ == '__main__':
    # 第一步：读取问题和FA模板，使用tokenizer进行token化
    example_question_list, example_info_list, example_fa_list, example_token_list = read_examples()

    # 第二步和第三步：处理问题并写入答案
    if example_question_list:
        process_questions(example_question_list, example_info_list, example_fa_list, example_token_list)