# 人工智能NLP-Agent数字人项目-04-基金数据问答任务工单V1.1-2025.2.12
import csv
import re
import pandas as pd
from utils.instances import TOKENIZER, LLM
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
import utils.configFinRAG as configFinRAG


def tokenize_question(question):
    """Tokenize the question and return the token list."""
    tokens = TOKENIZER(question)
    return tokens['input_ids']


def compute_similarity(question_tokens, example_token_list):
    """Calculate Jaccard similarity between the question and example tokens."""
    similarity_list = [
        len(set(question_tokens) & set(example_tokens)) / (len(set(question_tokens)) + len(set(example_tokens)))
        for example_tokens in example_token_list
    ]
    return similarity_list


def get_most_similar_examples(similarity_list, num_examples=5):
    """Get the most similar examples based on the similarity list."""
    max_indices = sorted(range(len(similarity_list)), key=lambda i: similarity_list[i], reverse=True)[:num_examples]
    return max_indices


def generate_sql(question, llm, example_question_list, example_sql_list, example_token_list, example_num=5):
    # Filter out numeric patterns and tokenize the question
    date_list = re.findall(r'\d{8}', question)
    question_for_search = re.sub(r'\d{8}', ' ', question)  # Replace digits with space
    question_tokens = tokenize_question(question_for_search)

    # Calculate similarity with existing examples
    similarity_list = compute_similarity(question_tokens, example_token_list)

    # Get the most similar examples
    max_index = get_most_similar_examples(similarity_list, example_num)

    # Limit the length of examples to avoid overly large prompt
    short_index_list = []
    temp_length_test = ""
    for index in max_index:
        temp_length_test += example_question_list[index] + example_sql_list[index]
        if len(temp_length_test) > 2000:
            break
        short_index_list.append(index)

    # Prepare prompt examples
    examples = ''.join(
        [f"问题：{example_question_list[index]}\nSQL：{example_sql_list[index]}\n" for index in short_index_list])
    prompt = ChatPromptTemplate.from_template(prompts.GENERATE_SQL_TEMPLATE)
    response = prompt | llm.invoke({"examples": examples, "table_info": prompts.TABLE_INFO, "question": question})

    # Extract SQL query from response
    sql = response.content
    start_index = sql.find('```sql') + len('```sql')
    end_index = sql.find('```', start_index)

    if start_index >= 0 and end_index > start_index:
        sql = sql[start_index:end_index].strip()
        return response, sql
    else:
        print(f"SQL generation error for question: {question}")
        return "error", "error"


def process_questions():
    # Load example questions and SQLs
    sql_examples_file = pd.read_csv(configFinRAG.sql_examples_path)
    g_example_question_list = sql_examples_file['问题'].tolist()
    g_example_sql_list = sql_examples_file['SQL'].tolist()
    g_example_token_list = [tokenize_question(q) for q in g_example_question_list]

    # Load test questions
    question_csv_file = pd.read_csv(configFinRAG.question_classify_path)

    # Open CSV to write results
    with open(configFinRAG.question_sql_path, 'w', newline='', encoding='utf-8-sig') as question_sql_file:
        csvwriter = csv.writer(question_sql_file)
        csvwriter.writerow(['问题id', '问题', 'SQL', 'prompt'])

        for _, row in question_csv_file.iterrows():
            if row['分类'] == '查询数据库':
                result_prompt, result = generate_sql(
                    row['问题'], LLM, g_example_question_list, g_example_sql_list, g_example_token_list
                )
                csvwriter.writerow([str(row['问题id']), row['问题'], result, result_prompt])
            else:
                print(f"Skipping question: {row['问题']}")


if __name__ == '__main__':
    process_questions()

