import csv
import re
import pandas as pd
import logging
from utils.instances import TOKENIZER, LLM
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
import utils.configFinRAG as configFinRAG

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# 封装 Jaccard 相似度计算函数
def jaccard_similarity(list1, list2):
    try:
        intersection = len(set(list1) & set(list2))
        union = len(set(list1)) + len(set(list2))
        return intersection / union if union != 0 else 0
    except Exception as e:
        logging.error(f"Jaccard 相似度计算出错: {e}")
        return 0


def generate_sql(user_question, language_model, example_questions, example_sqls, example_token_lists, num_examples=5):
    try:
        # 匹配基金代码，一般基金代码是6位数字
        fund_code_pattern = r'\d{6}'
        sql_start_pattern = '```sql'
        sql_end_pattern = '```'
        temp_question = user_question
        # 找出问题里的基金代码
        fund_code_list = re.findall(fund_code_pattern, temp_question)
        processed_question = temp_question
        # 移除问题中的基金代码，避免影响相似度计算
        for fund_code in fund_code_list:
            processed_question = processed_question.replace(fund_code, ' ')
        question_tokens = TOKENIZER(processed_question)['input_ids']
        # 计算问题与示例问题的 Jaccard 相似度
        similarities = [jaccard_similarity(question_tokens, token_list) for token_list in example_token_lists]
        # 按相似度从高到低排序，取前 num_examples 个
        top_indices = sorted(range(len(similarities)), key=lambda i: similarities[i], reverse=True)[:num_examples]
        prompt_length = 0
        selected_indices = []
        # 根据提示长度筛选合适的示例问题
        for index in top_indices:
            prompt_length += len(example_questions[index]) + len(example_sqls[index])
            if prompt_length > 2000:
                break
            selected_indices.append(index)
        # 创建聊天提示模板
        prompt_template = ChatPromptTemplate.from_template(prompts.GENERATE_SQL_TEMPLATE)
        # 拼接选中的示例问题和对应的 SQL 语句
        example_prompts = '\n'.join([
            f"问题：{example_questions[index]}\nSQL：{example_sqls[index]}"
            for index in selected_indices
        ])
        # 构建提示链并调用语言模型生成 SQL
        chain = prompt_template | language_model
        response = chain.invoke(
            {"examples": example_prompts, "table_info": prompts.TABLE_INFO, "question": temp_question})
        sql = response.content
        # 提取生成的 SQL 语句
        start_index = sql.find(sql_start_pattern) + len(sql_start_pattern)
        end_index = sql[start_index:].find(sql_end_pattern) + start_index if start_index >= 0 else -1
        if start_index < end_index:
            sql = sql[start_index:end_index]
            return prompt_template.invoke({"examples": example_prompts, "table_info": prompts.TABLE_INFO,
                                           "question": temp_question}), sql
        else:
            logging.error(f"生成 SQL 出错: {user_question}")
            return "error", "error"
    except Exception as e:
        logging.error(f"generate_sql 函数出错: {e}")
        return "error", "error"


def read_example_data():
    try:
        # 读取包含示例问题和对应 SQL 模板的文件
        sql_examples_df = pd.read_csv(configFinRAG.sql_examples_path, delimiter=",", header=0)
        example_questions = sql_examples_df['问题'].tolist()
        example_sqls = sql_examples_df['SQL'].tolist()
        # 对示例问题进行分词
        example_token_lists = [TOKENIZER(question)['input_ids'] for question in example_questions]
        return example_questions, example_sqls, example_token_lists
    except Exception as e:
        logging.error(f"读取示例数据出错: {e}")
        return [], [], []


def process_questions(example_questions, example_sqls, example_token_lists):
    try:
        # 读取待处理的测试问题文件
        question_df = pd.read_csv(configFinRAG.question_classify_path, delimiter=",", header=0)

        # 打开文件用于写入生成的结果
        with open(configFinRAG.question_sql_path, 'w', newline='', encoding='utf-8-sig') as question_sql_file:
            csv_writer = csv.writer(question_sql_file)
            csv_writer.writerow(['问题id', '问题', 'SQL', 'prompt'])

            # 遍历测试问题
            for _, row in question_df.iterrows():
                if row['分类'] == '查询数据库':
                    # 检查问题是否与基金相关
                    if '基金' in row['问题']:
                        result_prompt, result = generate_sql(
                            row['问题'], LLM, example_questions, example_sqls, example_token_lists
                        )
                        csv_writer.writerow([
                            str(row['问题id']),
                            str(row['问题']),
                            result,
                            result_prompt
                        ])
                    else:
                        logging.info(f"非基金相关问题，跳过: {row['问题']}")
                else:
                    logging.info(f"非查询数据库类问题，跳过: {row['问题']}")
    except Exception as e:
        logging.error(f"处理问题出错: {e}")


if __name__ == '__main__':
    try:
        example_questions, example_sqls, example_token_lists = read_example_data()
        if example_questions and example_sqls and example_token_lists:
            process_questions(example_questions, example_sqls, example_token_lists)
        else:
            logging.error("示例数据读取失败，程序终止。")
    except Exception as e:
        logging.error(f"程序运行出错: {e}")
