import csv
import re
import copy
import pandas as pd

from utils.instances import TOKENIZER, LLM
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
import utils.configFinRAG as configFinRAG

def generate_sql(question, llm, example_question_list, example_sql_list, tmp_example_token_list, example_num=5):
    pattern1 = r'\d{8}'  # 过滤掉一些数字的正则表达式
    sql_pattern_start = '```sql'
    sql_pattern_end = '```'
    temp_question = question

    # 提取数字
    date_list = re.findall(pattern1, temp_question)
    temp_question2_for_search = temp_question
    # 将数字都替换为空格
    for t_date in date_list:
        # 修正替换语句，需要重新赋值
        temp_question2_for_search = temp_question2_for_search.replace(t_date, ' ')

    temp_tokens = TOKENIZER(temp_question2_for_search)['input_ids']
    temp_tokens_set = set(temp_tokens)

    # 计算与已有问题的相似度--使用Jaccard进行相似度计算
    similarity_list = []
    for tokens in tmp_example_token_list:
        token_set = set(tokens)
        intersection = len(temp_tokens_set & token_set)
        union = len(temp_tokens_set) + len(token_set)
        similarity_list.append(intersection / union if union > 0 else 0)

    # 求与第X个问题相似的问题
    t = copy.copy(similarity_list)
    # 求m个最大的数值及其索引
    max_index = []
    for _ in range(example_num):
        if not t:
            break
        number = max(t)
        index = t.index(number)
        t[index] = 0
        max_index.append(index)

    # 防止提示语过长
    temp_length_test = ""
    short_index_list = []  # 匹配到的问题下标
    for index in max_index:
        temp_length_test += example_question_list[index] + example_sql_list[index]
        if len(temp_length_test) > 2000:
            break
        short_index_list.append(index)

    # 组装prompt
    prompt = ChatPromptTemplate.from_template(prompts.GENERATE_SQL_TEMPLATE)
    examples = '\n'.join([f"问题：{example_question_list[index]}\nSQL：{example_sql_list[index]}" for index in short_index_list])

    chain = prompt | llm
    response = chain.invoke({"examples": examples, "table_info": prompts.TABLE_INFO, "question": temp_question})
    sql = response.content
    start_index = sql.find(sql_pattern_start) + len(sql_pattern_start)
    end_index = -1
    if start_index >= 0:
        end_index = sql[start_index:].find(sql_pattern_end) + start_index
    if start_index < end_index:
        sql = sql[start_index:end_index]
        return prompt.invoke({"examples": examples, "table_info": prompts.TABLE_INFO, "question": temp_question}), sql
    else:
        print("generate sql error:", temp_question)
        return "error", "error"

if __name__ == '__main__':
    # 第一步：读取问题和SQL模板，使用tokenizer进行token化
    sql_examples_file = pd.read_csv(configFinRAG.sql_examples_path, delimiter=",", header=0)
    g_example_question_list = sql_examples_file['问题'].tolist()
    g_example_sql_list = sql_examples_file['SQL'].tolist()
    g_example_token_list = [TOKENIZER(question)['input_ids'] for question in g_example_question_list]

    # 第二步：测试问题及结果文件
    question_csv_file = pd.read_csv(configFinRAG.question_classify_path, delimiter=",", header=0)

    with open(configFinRAG.question_sql_path, 'w', newline='', encoding='utf-8') as question_sql_file:
        csvwriter = csv.writer(question_sql_file)
        csvwriter.writerow(['问题id', '问题', 'SQL', 'prompt'])

        # 第三步：循环问题，使用Jaccard进行相似度计算问题与模板中的问题相似度最高的几条记录
        query_questions = question_csv_file[question_csv_file['分类'] == '查询数据库']
        for _, row in query_questions.iterrows():
            result_prompt, result = generate_sql(row['问题'], LLM, g_example_question_list,
                                                 g_example_sql_list, g_example_token_list)
            csvwriter.writerow([str(row['问题id']), str(row['问题']), result, result_prompt])
        non_query_questions = question_csv_file[question_csv_file['分类'] != '查询数据库']
        for _, row in non_query_questions.iterrows():
            print("pass question:", row['问题'])