import re
import copy
import logging
from typing import List, Tuple

import pandas as pd
from langchain_core.prompts import ChatPromptTemplate

from utils.configFinRAG import sql_examples_path, question_sql_check_path, answer_path
from utils.instances import LLM, TOKENIZER
from utils import prompts

# 设置日志配置
logging.basicConfig(level=logging.INFO)

def tokenize(text: str) -> List[int]:
    """Tokenize the given text."""
    return TOKENIZER(text)['input_ids']

def calculate_similarity(tokens_a: List[int], tokens_b: List[int]) -> float:
    """Calculate Jaccard similarity between two token lists."""
    set_a = set(tokens_a)
    set_b = set(tokens_b)
    intersection = len(set_a & set_b)
    union = len(set_a) + len(set_b) - intersection
    return intersection / union if union != 0 else 0

def extract_dates(text: str) -> List[str]:
    """Extract dates from the text using regex."""
    pattern = r'\d{8}'
    return re.findall(pattern, text)

def generate_answer(question: str, fa: str, llm, example_question_list: List[str], example_info_list: List[str],
                    example_fa_list: List[str], tmp_example_token_list: List[List[int]], example_num: int = 5) -> str:
    # 提取数字并替换为占位符
    temp_question = question
    date_list = extract_dates(temp_question)
    for t_date in date_list:
        temp_question = temp_question.replace(t_date, ' ')

    temp_tokens = tokenize(temp_question)

    # 计算与已有问题的相似度
    similarity_list = [calculate_similarity(temp_tokens, tokens) for tokens in tmp_example_token_list]

    # 找到最相似的问题索引
    max_indices = sorted(range(len(similarity_list)), key=lambda i: similarity_list[i], reverse=True)[:example_num]

    # 防止提示语过长
    prompt_content = ""
    short_index_list = []
    for index in max_indices:
        prompt_content += f"问题：{example_question_list[index]}\n资料：{example_info_list[index]}\n答案：{example_fa_list[index]}\n"
        if len(prompt_content) > 2000:
            break
        short_index_list.append(index)

    # 组装prompt
    prompt = ChatPromptTemplate.from_template(prompts.ANSWER_TEMPLATE)
    examples = "".join([f"问题：{example_question_list[i]}\n资料：{example_info_list[i]}\n答案：{example_fa_list[i]}\n" for i in short_index_list])

    chain = prompt | llm
    response = chain.invoke({"examples": examples, "FA": fa, "question": temp_question})
    return response.content

def main():
    # 第一步：读取问题和FA模板，使用tokenizer进行token化
    sql_examples_file = pd.read_csv(sql_examples_path, delimiter=",", header=0)
    example_lists = {
        'questions': [],
        'infos': [],
        'fa': [],
        'tokens': []
    }

    for _, row in sql_examples_file.iterrows():
        example_lists['questions'].append(row['问题'])
        example_lists['infos'].append(row['资料'])
        example_lists['fa'].append(row['FA'])
        tokens = tokenize(row['问题'])
        example_lists['tokens'].append(tokens)

    # 第二步：拿到答案
    result_csv_file = pd.read_csv(question_sql_check_path, delimiter=",", header=0)

    with open(answer_path, 'w', newline='', encoding='utf-8-sig') as answer_file:
        csvwriter = csv.writer(answer_file)
        csvwriter.writerow(['问题id', '问题', '资料', 'FA'])

        # 第三步：循环问题，使用Jaccard进行相似度计算问题与模板中的问题相似度最高的几条记录
        for _, row in result_csv_file.iterrows():
            if row['flag'] == 1:
                try:
                    result = generate_answer(row['问题'], row['执行结果'], LLM,
                                             example_lists['questions'], example_lists['infos'],
                                             example_lists['fa'], example_lists['tokens'])
                    csvwriter.writerow([row['问题id'], row['问题'], row['执行结果'], result])
                except Exception as e:
                    logging.error(f"Error processing question {row['问题id']}: {e}")

if __name__ == '__main__':
    main()