# 人工智能NLP-Agent数字人项目-04-基金数据问答任务工单V1.1-2025.2.14
import csv
import re
import copy
import pandas as pd
from utils.instances import LLM, TOKENIZER
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
from configFinRAG import sql_examples_path, question_sql_check_path, answer_path


def tokenize_and_filter(question):
    """对问题进行token化并过滤数字"""
    pattern = r'\d{8}'
    date_list = re.findall(pattern, question)
    question_for_search = re.sub(pattern, ' ', question)
    tokens = TOKENIZER(question_for_search)['input_ids']
    return tokens, date_list


def calculate_similarity(tokens, example_tokens_list):
    """计算问题与已有问题的Jaccard相似度"""
    similarity_list = [
        len(set(tokens) & set(example_tokens)) / (len(set(tokens)) + len(set(example_tokens)))
        for example_tokens in example_tokens_list
    ]
    return similarity_list


def get_top_similar_indices(similarity_list, top_n=5):
    """获取相似度最高的top_n个索引"""
    indices = sorted(range(len(similarity_list)), key=lambda i: similarity_list[i], reverse=True)[:top_n]
    return indices


def generate_prompt(examples, fa, question):
    """生成最终的prompt"""
    prompt_template = ChatPromptTemplate.from_template(prompts.ANSWER_TEMPLATE)
    chain = prompt_template | LLM
    response = chain.invoke({"examples": examples, "FA": fa, "question": question})
    return response.content


def generate_answer(question, fa, example_question_list, example_info_list, example_fa_list, example_token_list, example_num=5):
    """生成答案"""
    tokens, date_list = tokenize_and_filter(question)
    similarity_list = calculate_similarity(tokens, example_token_list)
    top_indices = get_top_similar_indices(similarity_list, example_num)

    examples = ""
    for index in top_indices:
        examples += f"问题：{example_question_list[index]}\n"
        examples += f"资料：{example_info_list[index]}\n"
        examples += f"答案：{example_fa_list[index]}\n"

    return generate_prompt(examples, fa, question)


def load_examples(file_path):
    """加载问题模板和相关数据"""
    df = pd.read_csv(file_path, delimiter=",", header=0)
    example_question_list = df['问题'].tolist()
    example_info_list = df['资料'].tolist()
    example_fa_list = df['FA'].tolist()
    example_token_list = [TOKENIZER(q)['input_ids'] for q in example_question_list]
    return example_question_list, example_info_list, example_fa_list, example_token_list


def process_questions(result_csv_file, example_question_list, example_info_list, example_fa_list, example_token_list):
    """处理问题并生成答案"""
    answer_file = open(answer_path, 'w', newline='', encoding='utf-8-sig')
    csvwriter = csv.writer(answer_file)
    csvwriter.writerow(['问题id', '问题', '资料', 'FA'])

    for _, row in result_csv_file.iterrows():
        if row['flag'] == 1:
            result = generate_answer(row['问题'], row['执行结果'], example_question_list, example_info_list, example_fa_list, example_token_list)
            csvwriter.writerow([row['问题id'], row['问题'], row['执行结果'], result])

    answer_file.close()


if __name__ == '__main__':
    # 加载问题模板和相关数据
    example_question_list, example_info_list, example_fa_list, example_token_list = load_examples(sql_examples_path)

    # 加载待处理的问题文件
    result_csv_file = pd.read_csv(question_sql_check_path, delimiter=",", header=0)

    # 处理问题并生成答案
    process_questions(result_csv_file, example_question_list, example_info_list, example_fa_list, example_token_list)
