# 人工智能NLP-Agent数字人项目-04-基金数据问答任务工单V1.1--2.14
import csv
import re
import pandas as pd
from utils.instances import LLM, TOKENIZER
from utils import prompts, configFinRAG
from langchain_core.prompts import ChatPromptTemplate


# Function to clean and process the question
def clean_question(question):
    pattern1 = r'\d{8}'  # Matches 8-digit numbers
    date_list = re.findall(pattern1, question)
    for t_date in date_list:
        question = question.replace(t_date, ' ')
    return question


# Function to calculate similarity scores for example questions
def calculate_similarity(question_tokens, example_tokens_list):
    similarity_scores = [
        len(question_tokens & example_tokens) / (len(question_tokens) + len(example_tokens))
        for example_tokens in example_tokens_list
    ]
    return similarity_scores


# Function to generate the answer
def generate_answer(question, fa, llm, example_question_list, example_info_list, example_fa_list, example_token_list,
                    example_num=5):
    cleaned_question = clean_question(question)
    question_tokens_set = set(TOKENIZER(cleaned_question)['input_ids'])

    # Calculate similarity scores
    similarity_scores = calculate_similarity(question_tokens_set, example_token_list)
    max_index = sorted(range(len(similarity_scores)), key=lambda i: similarity_scores[i], reverse=True)[:example_num]

    # Create the prompt by limiting to 2000 characters
    examples = ""
    for i in max_index:
        example = f"问题：{example_question_list[i]}\n资料：{example_info_list[i]}\n答案：{example_fa_list[i]}\n"
        if len(examples) + len(example) < 2000:
            examples += example
        else:
            break

    prompt = ChatPromptTemplate.from_template(prompts.ANSWER_TEMPLATE)
    chain = prompt | llm
    response = chain.invoke({"examples": examples, "FA": fa, "question": question})
    return response.content


# Function to load example data
def load_example_data(file_path):
    sql_examples_file = pd.read_csv(file_path)
    example_question_list = sql_examples_file['问题'].tolist()
    example_info_list = sql_examples_file['资料'].tolist()
    example_fa_list = sql_examples_file['FA'].tolist()
    example_token_list = [set(TOKENIZER(q)['input_ids']) for q in example_question_list]
    return example_question_list, example_info_list, example_fa_list, example_token_list


# Function to process the question and generate answers
def process_questions(result_file, example_data, llm, output_file):
    example_question_list, example_info_list, example_fa_list, example_token_list = example_data

    with open(output_file, 'w', newline='', encoding='utf-8-sig') as answer_file:
        csvwriter = csv.writer(answer_file)
        csvwriter.writerow(['问题id', '问题', '资料', 'FA'])

        for _, row in result_file.iterrows():
            if row['flag'] == 1:
                result = generate_answer(
                    row['问题'],
                    row['执行结果'],
                    llm,
                    example_question_list,
                    example_info_list,
                    example_fa_list,
                    example_token_list
                )
                csvwriter.writerow([row['问题id'], row['问题'], row['执行结果'], result])


# Main execution
if __name__ == '__main__':
    # Load example data
    example_data = load_example_data(configFinRAG.sql_examples_path)

    # Read the result file
    result_csv_file = pd.read_csv(configFinRAG.question_sql_check_path)

    # Process questions and write results
    process_questions(result_csv_file, example_data, LLM, configFinRAG.answer_path)

