import csv
import re
import pandas as pd
import time

from utils.instances import TOKENIZER, LLM
from utils import prompts
from langchain_core.prompts import ChatPromptTemplate
import utils.configFinRAG as configFinRAG

# 定义常量，增强代码可读性
DATE_PATTERN = r'\d{8}'
SQL_BLOCK_START = '```sql'
SQL_BLOCK_END = '```'
MAX_PROMPT_LENGTH = 2000
REQUEST_INTERVAL = 1  # 每次请求间隔（秒）
MAX_RETRIES = 3  # 最大重试次数
RETRY_DELAY = 5  # 重试等待时间（秒）

def preprocess_question(question):
    """
    预处理问题，提取日期并替换为空格
    """
    return re.sub(DATE_PATTERN, ' ', question)

def calculate_jaccard_similarities(search_tokens, example_tokens):
    """
    计算查询问题与示例问题的 Jaccard 相似度
    """
    search_set = set(search_tokens)
    return [
        len(search_set.intersection(set(tokens))) / (len(search_set.union(set(tokens))) or 1)
        for tokens in example_tokens
    ]

def select_top_similar_indices(similarities, example_num):
    """
    选择相似度最高的几个示例的索引
    """
    indices = range(len(similarities))
    sorted_indices = sorted(indices, key=lambda i: similarities[i], reverse=True)
    return sorted_indices[:example_num]

def filter_short_indices(example_questions, example_sqls, top_indices):
    """
    筛选不会使提示语过长的索引
    """
    short_indices = []
    current_length = 0
    for index in top_indices:
        length = len(example_questions[index]) + len(example_sqls[index])
        if current_length + length > MAX_PROMPT_LENGTH:
            break
        short_indices.append(index)
        current_length += length
    return short_indices

def assemble_examples(example_questions, example_sqls, short_indices):
    """
    组装示例部分的提示语
    """
    return '\n'.join(
        f'问题：{example_questions[i]}\nSQL：{example_sqls[i]}'
        for i in short_indices
    )

def extract_sql_from_response(response_text):
    """
    从响应中提取 SQL 语句
    """
    start = response_text.find(SQL_BLOCK_START)
    if start != -1:
        start += len(SQL_BLOCK_START)
        end = response_text[start:].find(SQL_BLOCK_END)
        if end != -1:
            return response_text[start:start + end]
    return None

def generate_sql(question, llm, example_questions, example_sqls, example_tokens, example_num=5):
    """
    生成 SQL 语句
    """
    search_question = preprocess_question(question)
    search_tokens = TOKENIZER(search_question)['input_ids']

    similarities = calculate_jaccard_similarities(search_tokens, example_tokens)
    top_indices = select_top_similar_indices(similarities, example_num)
    short_indices = filter_short_indices(example_questions, example_sqls, top_indices)

    examples = assemble_examples(example_questions, example_sqls, short_indices)

    prompt = ChatPromptTemplate.from_template(prompts.GENERATE_SQL_TEMPLATE)
    chain = prompt | llm

    for retry in range(MAX_RETRIES):
        time.sleep(REQUEST_INTERVAL)
        try:
            response = chain.invoke({
                'examples': examples,
                'table_info': prompts.TABLE_INFO,
                'question': question
            })
            break
        except Exception as e:
            if hasattr(e, 'status_code') and e.status_code == 429:
                print(f'请求过多（429错误），{RETRY_DELAY} 秒后进行第 {retry + 1} 次重试...')
                time.sleep(RETRY_DELAY)
            else:
                raise

    else:
        print(f"问题 '{question}' 重试 {MAX_RETRIES} 次后仍失败。")
        return 'error', 'error'

    sql = extract_sql_from_response(response.content)
    if sql:
        return (
            prompt.invoke({
                'examples': examples,
                'table_info': prompts.TABLE_INFO,
                'question': question
            }),
            sql
        )
    print(f"为问题 '{question}' 生成 SQL 失败。")
    return 'error', 'error'

def read_data(file_path, columns):
    """
    通用的读取 CSV 文件函数
    """
    try:
        data = pd.read_csv(file_path, delimiter=',', header=0)
        if not all(col in data.columns for col in columns):
            print(f"错误：文件 {file_path} 中缺少必要列。")
            return None
        return data
    except FileNotFoundError:
        print(f"错误：文件 {file_path} 未找到。")
        return None

def process_questions(example_questions, example_sqls, example_tokens, question_csv):
    """
    处理每个问题并写入结果文件
    """
    try:
        with open(configFinRAG.question_sql_path, 'w', newline='', encoding='utf-8-sig') as result_file:
            writer = csv.writer(result_file)
            writer.writerow(['问题id', '问题', 'SQL', 'prompt'])
            for _, row in question_csv.iterrows():
                if row['分类'] == '查询数据库':
                    prompt, sql = generate_sql(
                        row['问题'], LLM, example_questions, example_sqls, example_tokens
                    )
                    writer.writerow([row['问题id'], row['问题'], sql, prompt])
                else:
                    print(f"跳过问题：{row['问题']}")
    except PermissionError:
        print(f"错误：没有权限写入文件 {configFinRAG.question_sql_path}。")
    except Exception as e:
        print(f"处理问题时出现意外错误：{e}")

def main():
    example_data = read_data(configFinRAG.sql_examples_path, ['问题', 'SQL'])
    if example_data is None:
        return

    example_questions = example_data['问题'].tolist()
    example_sqls = example_data['SQL'].tolist()
    example_tokens = [TOKENIZER(q)['input_ids'] for q in example_questions]

    question_csv = read_data(configFinRAG.question_classify_path, ['问题id', '问题', '分类'])
    if question_csv is None:
        return

    process_questions(example_questions, example_sqls, example_tokens, question_csv)

if __name__ == '__main__':
    main()