"""
Spider数据集评估工具
用于评估模型在Spider金融数据上的表现
"""
import json
import os
import pandas as pd
from tqdm import tqdm
import sqlite3
import re
import time
import matplotlib.pyplot as plt
from ai_service import generate_sql, extract_sql_from_ai_response
# 使用新的模型文件
from spider_models import DatabaseSchema, Table, Column, EvaluationResult, EvaluationSummary

def load_finance_data():
    """加载已筛选的金融相关数据"""
    # 如果已经筛选过数据，直接加载
    if os.path.exists("knowledge/spider/finance_questions.json"):
        with open("knowledge/spider/finance_questions.json", "r", encoding="utf-8") as f:
            return json.load(f)
    
    # 否则重新筛选并导入
    # print("未找到筛选后的数据，正在重新筛选...")
    # SPIDER_PATH = "spider_data/spider_data"
    # _, finance_questions, knowledge_entries = import_spider_data(SPIDER_PATH)
    # return knowledge_entries

def load_database_schema(db_id):
    """从Spider数据集加载数据库模式"""
    db_path = f"spider_data/spider_data/database/{db_id}/{db_id}.sqlite"
    
    if not os.path.exists(db_path):
        print(f"数据库文件不存在: {db_path}")
        return None
    
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    
    # 获取所有表
    cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
    tables = cursor.fetchall()
    
    # 创建一个空的表列表
    tables_list = []
    
    for table_name in tables:
        table_name = table_name[0]
        # 跳过SQLite系统表
        if table_name.startswith('sqlite_'):
            continue
            
        # 获取表结构
        cursor.execute(f"PRAGMA table_info({table_name});")
        columns_info = cursor.fetchall()
        
        # 创建一个空的列列表
        columns_list = []
        
        for col in columns_info:
            # col格式: (cid, name, type, notnull, dflt_value, pk)
            column = Column(
                name=col[1],
                type=col[2],
                description=f"Column {col[1]} in table {table_name}"
            )
            columns_list.append(column)
        
        table = Table(
            name=table_name, 
            description=f"Table {table_name} in {db_id} database",
            columns=columns_list
        )
        tables_list.append(table)
    
    conn.close()
    
    # 创建数据库模式
    schema = DatabaseSchema(name=db_id, tables=tables_list)
    return schema

def normalize_sql(sql):
    """规范化SQL查询以便比较"""
    if not sql:
        return ""
        
    # 移除所有注释
    sql = re.sub(r'--.*?(\n|$)', ' ', sql)
    sql = re.sub(r'/\*.*?\*/', ' ', sql, flags=re.DOTALL)
    
    # 替换所有引号为双引号
    sql = re.sub(r"'(.*?)'", r'"\1"', sql)
    
    # 移除AS关键字（但保留别名）
    sql = re.sub(r'\bAS\b', ' ', sql, flags=re.IGNORECASE)
    
    # 标准化空白字符
    sql = re.sub(r'\s+', ' ', sql).strip()
    
    # 移除SELECT、FROM等关键字后的多余空格
    sql = re.sub(r'(SELECT|FROM|WHERE|GROUP BY|ORDER BY|HAVING|LIMIT|JOIN|ON|AND|OR|NOT|UNION|INTERSECT|EXCEPT)\s+', r'\1 ', sql, flags=re.IGNORECASE)
    
    # 移除逗号后的空格
    sql = re.sub(r',\s+', ',', sql)
    
    # 移除括号内外的空格
    sql = re.sub(r'\(\s+', '(', sql)
    sql = re.sub(r'\s+\)', ')', sql)
    
    # 移除表别名（如 "table1 t1" 变为 "table1"）
    sql = re.sub(r'([a-zA-Z0-9_]+)(\s+[a-zA-Z0-9_]+)(\s*)(,|\s+|$|WHERE|ON|GROUP|ORDER|HAVING|LIMIT)', 
                r'\1\3\4', sql, flags=re.IGNORECASE)
    
    # 移除列引用中的表别名（如 "t1.column" 变为 "column"）
    sql = re.sub(r'([a-zA-Z0-9_]+)\.([a-zA-Z0-9_]+)', r'\2', sql)
    
    # 转为小写
    sql = sql.lower()
    
    # 移除末尾分号
    if sql.endswith(';'):
        sql = sql[:-1]
    
    return sql.strip()

def are_sqls_equivalent(sql1, sql2):
    """判断两个SQL查询是否在语义上等价"""
    # 首先进行规范化
    norm_sql1 = normalize_sql(sql1)
    norm_sql2 = normalize_sql(sql2)
    
    # 直接比较规范化后的SQL
    if norm_sql1 == norm_sql2:
        return True
    
    # 尝试更宽松的比较：移除所有空格后比较
    if re.sub(r'\s+', '', norm_sql1) == re.sub(r'\s+', '', norm_sql2):
        return True
    
    # 尝试更宽松的比较：比较SELECT的列和FROM的表
    try:
        # 提取SELECT部分
        select1 = re.search(r'select\s+(.*?)\s+from', norm_sql1, re.IGNORECASE)
        select2 = re.search(r'select\s+(.*?)\s+from', norm_sql2, re.IGNORECASE)
        
        # 提取FROM部分
        from1 = re.search(r'from\s+(.*?)($|\s+where|\s+group|\s+order|\s+having|\s+limit)', norm_sql1 + ' ', re.IGNORECASE)
        from2 = re.search(r'from\s+(.*?)($|\s+where|\s+group|\s+order|\s+having|\s+limit)', norm_sql2 + ' ', re.IGNORECASE)
        
        if select1 and select2 and from1 and from2:
            # 比较SELECT的列（忽略顺序）
            cols1 = set(c.strip() for c in select1.group(1).split(','))
            cols2 = set(c.strip() for c in select2.group(1).split(','))
            
            # 比较FROM的表（忽略顺序）
            tables1 = set(t.strip() for t in from1.group(1).split(','))
            tables2 = set(t.strip() for t in from2.group(1).split(','))
            
            # 如果是简单的SELECT-FROM查询（没有WHERE等子句），且列和表匹配，则认为等价
            if cols1 == cols2 and tables1 == tables2:
                # 检查是否都是简单查询
                is_simple1 = 'where' not in norm_sql1 and 'group by' not in norm_sql1 and 'order by' not in norm_sql1
                is_simple2 = 'where' not in norm_sql2 and 'group by' not in norm_sql2 and 'order by' not in norm_sql2
                
                if is_simple1 and is_simple2:
                    return True
    except:
        # 如果解析失败，继续使用其他方法
        pass
    
    return False

def execute_sql_query(db_path, sql):
    """执行SQL查询并返回结果"""
    try:
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        cursor.execute(sql)
        results = cursor.fetchall()
        column_names = [description[0] for description in cursor.description] if cursor.description else []
        conn.close()
        return results, column_names, None
    except Exception as e:
        return None, None, str(e)

def are_results_equivalent(results1, cols1, results2, cols2):
    """比较两个查询结果是否等价"""
    # 如果有任何一个结果为None，则不等价
    if results1 is None or results2 is None:
        return False
        
    # 如果列名不同，尝试按列名重新排序
    if cols1 and cols2 and set(cols1) == set(cols2) and cols1 != cols2:
        # 创建列名到索引的映射
        col_to_idx1 = {col: idx for idx, col in enumerate(cols1)}
        # 重新排序results1以匹配cols2的顺序
        new_results1 = []
        for row in results1:
            new_row = tuple(row[col_to_idx1[col]] for col in cols2)
            new_results1.append(new_row)
        results1 = new_results1
    
    # 将结果转换为集合进行比较（忽略顺序）
    set1 = set(results1)
    set2 = set(results2)
    
    return set1 == set2

# 添加新的函数来判断SQL复杂度
def determine_query_complexity(sql):
    """判断SQL查询的复杂度，使用更细粒度的分类
    
    返回:
        1: 基础查询 - 单表、无条件或简单条件
        2: 简单查询 - 单表、有WHERE条件、简单排序
        3: 中等查询 - 简单JOIN或子查询、基础聚合
        4: 复杂查询 - 多表JOIN、GROUP BY、复杂条件
        5: 高级查询 - 复杂嵌套、多级聚合、HAVING、复杂函数、窗口函数、集合操作
    """
    sql = sql.lower()
    
    # 检查是否有聚合函数
    agg_functions = ['count(', 'sum(', 'avg(', 'min(', 'max(']
    has_aggregation = any(agg in sql for agg in agg_functions)
    has_group_by = 'group by' in sql
    has_having = 'having' in sql
    
    # 检查是否有JOIN或多个表
    join_keywords = ['join', 'inner join', 'left join', 'right join', 'full join']
    has_join = any(join in sql for join in join_keywords)
    
    # 计算FROM子句中的表数量
    from_tables = 1
    from_match = re.search(r'from\s+(.*?)($|\s+where|\s+group|\s+order|\s+having|\s+limit)', sql + ' ')
    if from_match:
        from_clause = from_match.group(1)
        # 计算逗号分隔的表数量
        from_tables = len([t for t in from_clause.split(',') if t.strip()])
    
    # 检查是否有子查询
    has_subquery = '(' in sql and 'select' in sql.split('(')[1]
    
    # 检查WHERE子句复杂度
    has_where = 'where' in sql
    has_complex_where = False
    if has_where:
        # 提取WHERE子句
        where_match = re.search(r'where\s+(.*?)($|\s+group|\s+order|\s+having|\s+limit)', sql + ' ')
        if where_match:
            where_clause = where_match.group(1)
            # 检查是否有复杂条件（多个AND/OR、IN、EXISTS等）
            has_complex_where = ('and' in where_clause or 'or' in where_clause or 
                                'in (' in where_clause or 'exists' in where_clause or
                                'between' in where_clause)
    
    # 检查ORDER BY复杂度
    has_order_by = 'order by' in sql
    has_complex_order = False
    if has_order_by:
        # 提取ORDER BY子句
        order_match = re.search(r'order by\s+(.*?)($|\s+limit)', sql + ' ')
        if order_match:
            order_clause = order_match.group(1)
            # 检查是否有多个排序字段或复杂函数
            has_complex_order = (',' in order_clause or '(' in order_clause)
    
    # 检查是否有窗口函数
    has_window_function = 'over (' in sql or 'partition by' in sql
    
    # 检查是否有UNION、INTERSECT或EXCEPT
    has_set_operations = any(op in sql for op in ['union', 'intersect', 'except'])
    
    # 判断复杂度
    if not has_where and not has_order_by and from_tables == 1 and not has_aggregation:
        return 1  # 基础查询 - 单表、无条件或简单条件
    elif from_tables == 1 and not has_complex_where and not has_aggregation and not has_subquery:
        return 2  # 简单查询 - 单表、有WHERE条件、简单排序
    elif (from_tables == 2 or (has_join and from_tables <= 2)) or (has_aggregation and not has_group_by) or (has_subquery and not has_complex_where):
        return 3  # 中等查询 - 简单JOIN或子查询、基础聚合
    elif (from_tables > 2 or (has_join and from_tables > 2)) or (has_group_by and not has_having) or has_complex_where:
        return 4  # 复杂查询 - 多表JOIN、GROUP BY、复杂条件
    else:
        return 5  # 高级查询 - 复杂嵌套、多级聚合、HAVING、复杂函数、窗口函数、集合操作

def evaluate_model_by_execution(test_data, num_samples=None):
    """通过执行SQL查询结果或SQL语法比较来评估模型"""
    results = []
    correct = 0
    total = 0
    
    # 添加二次执行统计
    second_execution_stats = {
        "triggered": 0,  # 触发二次执行的次数
        "improved": 0,   # 二次执行改善结果的次数
        "total": 0       # 总评估次数
    }
    
    # 添加复杂度统计 - 现在有5个级别
    complexity_stats = {
        1: {"correct": 0, "total": 0, "response_times": []},
        2: {"correct": 0, "total": 0, "response_times": []},
        3: {"correct": 0, "total": 0, "response_times": []},
        4: {"correct": 0, "total": 0, "response_times": []},
        5: {"correct": 0, "total": 0, "response_times": []}
    }
    
    # 加载原始Spider数据以获取正确的SQL
    with open("spider_data/spider_data/train_spider.json", "r", encoding="utf-8") as f:
        spider_data = json.load(f)
    
    # 创建问题到SQL的映射
    question_to_sql = {item["question"]: item["query"] for item in spider_data}
    
    # 限制测试样本数量
    if num_samples and num_samples < len(test_data):
        test_data = test_data[:num_samples]
    
    for item in tqdm(test_data, desc="评估进度"):
        question = item["content"]
        
        # 跳过没有对应SQL的问题
        if question not in question_to_sql:
            continue
            
        # 从原始数据中获取数据库ID
        db_id = None
        for spider_item in spider_data:
            if spider_item["question"] == question:
                db_id = spider_item["db_id"]
                break
        
        if not db_id:
            continue
            
        # 数据库路径
        db_path = f"spider_data/spider_data/database/{db_id}/{db_id}.sqlite"
        if not os.path.exists(db_path):
            print(f"数据库文件不存在: {db_path}")
            continue
            
        # 加载数据库模式
        db_schema = load_database_schema(db_id)
        if not db_schema:
            continue
            
        # 生成SQL并记录响应时间
        options = {"allow_joins": True, "complex_aggregations": True, "max_results": 10}
        start_time = time.time()
        generated_sql_response = generate_sql(question, db_schema, options)
        response_time = time.time() - start_time
        generated_sql = extract_sql_from_ai_response(generated_sql_response)
        
        # 获取正确的SQL
        correct_sql = question_to_sql[question]
        
        # 确定查询复杂度
        complexity = determine_query_complexity(correct_sql)
        
        # 执行生成的SQL
        gen_results, gen_cols, gen_error = execute_sql_query(db_path, generated_sql)
        
        # 执行正确的SQL
        correct_results, correct_cols, correct_error = execute_sql_query(db_path, correct_sql)
        
        # 检查是否正确（通过比较执行结果或SQL语法）
        syntax_match = are_sqls_equivalent(generated_sql, correct_sql)
        execution_match = False
        
        # 首先检查是否有执行错误
        if gen_error or correct_error:
            execution_match = False
        else:
            # 通过执行结果比较
            execution_match = are_results_equivalent(gen_results, gen_cols, correct_results, correct_cols)
        
        # 判断第一次执行是否正确
        is_correct_first = syntax_match or execution_match
        
        # 如果第一次执行不正确，尝试二次执行
        refined_sql = None
        refined_syntax_match = False
        refined_execution_match = False
        
        if not is_correct_first:
            second_execution_stats["triggered"] += 1
            
            # 使用反馈优化SQL
            from ai_service import refine_sql_with_feedback
            refined_sql = refine_sql_with_feedback(
                question, 
                generated_sql, 
                gen_error, 
                gen_results, 
                db_schema
            )
            
            # 提取并执行优化后的SQL
            refined_sql = extract_sql_from_ai_response(refined_sql)
            
            # 只有当优化后的SQL与原SQL不同时才执行
            if refined_sql and refined_sql.strip() != generated_sql.strip():
                refined_results, refined_cols, refined_error = execute_sql_query(db_path, refined_sql)
                
                # 检查优化后的SQL是否正确
                refined_syntax_match = are_sqls_equivalent(refined_sql, correct_sql)
                
                if not refined_error and not correct_error:
                    refined_execution_match = are_results_equivalent(
                        refined_results, refined_cols, correct_results, correct_cols
                    )
                
                # 如果优化后的SQL正确，使用优化后的结果
                if refined_syntax_match or refined_execution_match:
                    second_execution_stats["improved"] += 1
                    generated_sql = refined_sql
                    syntax_match = refined_syntax_match
                    execution_match = refined_execution_match
        
        # 最终判断是否正确
        is_correct = syntax_match or execution_match
        
        if is_correct:
            correct += 1
            complexity_stats[complexity]["correct"] += 1
        
        total += 1
        second_execution_stats["total"] += 1
        complexity_stats[complexity]["total"] += 1
        complexity_stats[complexity]["response_times"].append(response_time)
        
        # 记录结果
        result = EvaluationResult(
            question=question,
            generated_sql=generated_sql,
            correct_sql=correct_sql,
            syntax_match=syntax_match,
            execution_match=execution_match,
            db_id=db_id,
            normalized_generated=normalize_sql(generated_sql),
            normalized_correct=normalize_sql(correct_sql),
            complexity=complexity,
            response_time=response_time,
            second_execution=refined_sql is not None,  # 添加是否进行了二次执行的标记
            improved_by_second=not is_correct_first and is_correct  # 添加是否通过二次执行改善的标记
        )
        results.append(result.dict())
    
    # 计算总体准确率
    accuracy = correct / total if total > 0 else 0
    
    # 计算各复杂度类别的准确率和平均响应时间
    complexity_accuracy = {}
    for level in complexity_stats:
        stats = complexity_stats[level]
        if stats["total"] > 0:
            accuracy_rate = stats["correct"] / stats["total"]
            avg_response_time = sum(stats["response_times"]) / len(stats["response_times"]) if stats["response_times"] else 0
            complexity_accuracy[level] = {
                "accuracy": accuracy_rate,
                "total": stats["total"],
                "correct": stats["correct"],
                "avg_response_time": avg_response_time
            }
    
    # 计算二次执行的效果
    second_execution_rate = second_execution_stats["triggered"] / second_execution_stats["total"] if second_execution_stats["total"] > 0 else 0
    second_execution_improvement = second_execution_stats["improved"] / second_execution_stats["triggered"] if second_execution_stats["triggered"] > 0 else 0
    
    # 创建评估摘要
    summary = EvaluationSummary(
        accuracy=accuracy,
        correct=correct,
        total=total,
        results=results,
        complexity_accuracy=complexity_accuracy,
        second_execution_stats={
            "triggered_count": second_execution_stats["triggered"],
            "improved_count": second_execution_stats["improved"],
            "triggered_rate": second_execution_rate,
            "improvement_rate": second_execution_improvement
        }
    )
    
    # 保存评估结果
    os.makedirs("evaluation", exist_ok=True)
    with open("evaluation/spider_results_execution.json", "w", encoding="utf-8") as f:
        json.dump(summary.dict(), f, ensure_ascii=False, indent=2)
    
    print(f"评估完成! 总体准确率: {accuracy:.2%} ({correct}/{total})")
    
    # 打印二次执行的统计信息
    print("\n二次执行统计:")
    print(f"触发二次执行次数: {second_execution_stats['triggered']} ({second_execution_rate:.2%})")
    print(f"二次执行改善结果次数: {second_execution_stats['improved']} ({second_execution_improvement:.2%})")
    
    # 打印各复杂度类别的准确率
    print("\n按查询复杂度的准确率:")
    print(f"基础查询 (单表、无条件): {complexity_accuracy.get(1, {}).get('accuracy', 0):.2%} "
          f"({complexity_accuracy.get(1, {}).get('correct', 0)}/{complexity_accuracy.get(1, {}).get('total', 0)})")
    print(f"简单查询 (单表、简单条件): {complexity_accuracy.get(2, {}).get('accuracy', 0):.2%} "
          f"({complexity_accuracy.get(2, {}).get('correct', 0)}/{complexity_accuracy.get(2, {}).get('total', 0)})")
    print(f"中等查询 (简单JOIN或聚合): {complexity_accuracy.get(3, {}).get('accuracy', 0):.2%} "
          f"({complexity_accuracy.get(3, {}).get('correct', 0)}/{complexity_accuracy.get(3, {}).get('total', 0)})")
    print(f"复杂查询 (多表JOIN、GROUP BY): {complexity_accuracy.get(4, {}).get('accuracy', 0):.2%} "
          f"({complexity_accuracy.get(4, {}).get('correct', 0)}/{complexity_accuracy.get(4, {}).get('total', 0)})")
    print(f"高级查询 (复杂嵌套、多级聚合): {complexity_accuracy.get(5, {}).get('accuracy', 0):.2%} "
          f"({complexity_accuracy.get(5, {}).get('correct', 0)}/{complexity_accuracy.get(5, {}).get('total', 0)})")
    
    # 打印各复杂度类别的平均响应时间
    print("\n按查询复杂度的平均响应时间(秒):")
    print(f"基础查询: {complexity_accuracy.get(1, {}).get('avg_response_time', 0):.2f}秒")
    print(f"简单查询: {complexity_accuracy.get(2, {}).get('avg_response_time', 0):.2f}秒")
    print(f"中等查询: {complexity_accuracy.get(3, {}).get('avg_response_time', 0):.2f}秒")
    print(f"复杂查询: {complexity_accuracy.get(4, {}).get('avg_response_time', 0):.2f}秒")
    print(f"高级查询: {complexity_accuracy.get(5, {}).get('avg_response_time', 0):.2f}秒")
    
    return accuracy, results, complexity_accuracy

# 添加生成图表的函数
def generate_charts(complexity_accuracy):
    """生成准确率和响应时间图表"""
    # 准备数据 - 现在有5个级别
    categories = ["Basic", "Simple", "Medium", "Complex", "Advanced"]
    accuracy_values = [
        complexity_accuracy.get(1, {}).get('accuracy', 0) * 100,
        complexity_accuracy.get(2, {}).get('accuracy', 0) * 100,
        complexity_accuracy.get(3, {}).get('accuracy', 0) * 100,
        complexity_accuracy.get(4, {}).get('accuracy', 0) * 100,
        complexity_accuracy.get(5, {}).get('accuracy', 0) * 100
    ]
    response_times = [
        complexity_accuracy.get(1, {}).get('avg_response_time', 0),
        complexity_accuracy.get(2, {}).get('avg_response_time', 0),
        complexity_accuracy.get(3, {}).get('avg_response_time', 0),
        complexity_accuracy.get(4, {}).get('avg_response_time', 0),
        complexity_accuracy.get(5, {}).get('avg_response_time', 0)
    ]
    sample_counts = [
        complexity_accuracy.get(1, {}).get('total', 0),
        complexity_accuracy.get(2, {}).get('total', 0),
        complexity_accuracy.get(3, {}).get('total', 0),
        complexity_accuracy.get(4, {}).get('total', 0),
        complexity_accuracy.get(5, {}).get('total', 0)
    ]
    
    # 计算总体准确率
    total_correct = sum(complexity_accuracy.get(i, {}).get('correct', 0) for i in [1, 2, 3, 4, 5])
    total_samples = sum(sample_counts)
    overall_accuracy = (total_correct / total_samples) * 100 if total_samples > 0 else 0
    
    # 设置图表样式
    plt.style.use('ggplot')
    
    # 1. 创建准确率对比图
    fig, ax = plt.subplots(figsize=(14, 8))
    
    # 使用渐变色彩方案
    colors = ['#4575b4', '#74add1', '#abd9e9', '#fdae61', '#d73027']
    
    # 主要准确率柱状图
    bars = ax.bar(
        categories, 
        accuracy_values, 
        width=0.6,
        color=colors,
        edgecolor='black',
        linewidth=1.5,
        alpha=0.8
    )
    
    # 添加样本数量文本 - 移到柱状图底部
    for i, bar in enumerate(bars):
        ax.text(
            bar.get_x() + bar.get_width()/2., 
            1,  # 固定位置在底部
            f'n={sample_counts[i]}',
            ha='center', 
            va='bottom',
            fontsize=10,
            fontweight='bold',
            color='black'
        )
    
    # 添加数据标签 - 调整位置避免重叠
    for bar in bars:
        height = bar.get_height()
        # 如果高度太小，将标签放在柱子上方
        if height < 10:
            y_pos = height + 3
        else:
            y_pos = height - 5  # 放在柱子内部顶部
            
        ax.text(
            bar.get_x() + bar.get_width()/2., 
            y_pos,
            f'{height:.1f}%', 
            ha='center', 
            va='bottom',
            fontsize=12,
            fontweight='bold',
            color='black' if height < 50 else 'white'  # 根据柱高调整颜色
        )
    
    # 添加总体准确率水平线 - 调整文本位置
    ax.axhline(
        y=overall_accuracy, 
        color='red', 
        linestyle='--', 
        linewidth=2,
        alpha=0.7
    )
    
    # 将总体准确率文本放在图表右侧，避免与柱状图重叠
    ax.text(
        len(categories) - 0.5,  # 放在最后一个柱子右侧
        overall_accuracy + 5,  # 稍微高于线
        f'Overall: {overall_accuracy:.1f}%',
        color='red',
        fontsize=12,
        fontweight='bold',
        ha='right'  # 右对齐
    )
    
    # 设置图表标题和标签
    ax.set_title('Accuracy by Query Complexity', fontsize=16, fontweight='bold', pad=20)
    ax.set_ylabel('Accuracy (%)', fontsize=14, fontweight='bold')
    ax.set_ylim(0, max(max(accuracy_values) * 1.15, overall_accuracy * 1.2))  # 给顶部留出更多空间
    
    # 添加网格线
    ax.grid(axis='y', linestyle='--', alpha=0.7)
    
    # 添加图例 - 移到图表顶部
    complexity_descriptions = [
        'Basic: Single table, no conditions',
        'Simple: Single table, basic WHERE',
        'Medium: Simple JOIN or aggregation',
        'Complex: Multi-table JOIN, GROUP BY',
        'Advanced: Nested queries, complex aggregations'
    ]
    legend_elements = [
        plt.Rectangle((0,0), 1, 1, color=c, edgecolor='black', linewidth=1.5, alpha=0.8) 
        for c in colors
    ]
    ax.legend(legend_elements, complexity_descriptions, loc='upper center', fontsize=10, 
              bbox_to_anchor=(0.5, 1.0), ncol=2)  # 放在图表上方，分两列
    
    # 添加注释 - 移到左下角
    ax.text(
        0.02, 0.02, 
        f'Total samples: {total_samples}\nEvaluation date: {time.strftime("%Y-%m-%d")}',
        transform=ax.transAxes,
        fontsize=10,
        verticalalignment='bottom'
    )
    
    # 保存准确率图表
    os.makedirs("evaluation/charts", exist_ok=True)
    plt.savefig("evaluation/charts/accuracy_comparison.png", dpi=300, bbox_inches='tight')
    plt.close()
    
    # 2. 创建响应时间图表
    fig, ax = plt.subplots(figsize=(14, 8))
    
    # 创建柱状图和折线图组合
    width = 0.6
    bars = ax.bar(
        categories, 
        response_times, 
        width=width,
        color=colors,
        edgecolor='black',
        linewidth=1.5,
        alpha=0.6
    )
    
    # 添加折线
    line_x = range(len(categories))
    ax.plot(
        line_x, 
        response_times, 
        marker='o', 
        markersize=10,
        linewidth=3, 
        color='#D62728',
        markeredgecolor='black',
        markeredgewidth=1.5
    )
    
    # 添加数据标签 - 调整位置避免重叠
    max_time = max(response_times) if response_times else 0
    for i, (bar, time_value) in enumerate(zip(bars, response_times)):
        # 将时间标签放在点上方
        ax.text(
            bar.get_x() + bar.get_width()/2., 
            time_value + max_time*0.05,  # 放在点上方一定距离
            f'{time_value:.2f}s', 
            ha='center',
            fontsize=12,
            fontweight='bold',
            color='black'
        )
        
        # 添加样本数量 - 放在柱子底部
        if time_value > max_time * 0.1:  # 如果柱子高度足够
            y_pos = time_value * 0.1  # 放在柱子底部10%处
            text_color = 'white'
        else:
            y_pos = max_time * 0.05  # 放在固定位置
            text_color = 'black'
            
        ax.text(
            bar.get_x() + bar.get_width()/2., 
            y_pos,
            f'n={sample_counts[i]}',
            ha='center', 
            color=text_color,
            fontsize=10,
            fontweight='bold'
        )
    
    # 设置图表标题和标签
    ax.set_title('Response Time by Query Complexity', fontsize=16, fontweight='bold', pad=20)
    ax.set_ylabel('Response Time (seconds)', fontsize=14, fontweight='bold')
    ax.set_ylim(0, max_time * 1.25)  # 给顶部留出更多空间
    
    # 添加网格线
    ax.grid(axis='y', linestyle='--', alpha=0.7)
    
    # 添加图例 - 移到图表顶部
    legend_elements = [
        plt.Rectangle((0,0), 1, 1, color=c, edgecolor='black', linewidth=1.5, alpha=0.6) 
        for c in colors
    ]
    legend_elements.append(plt.Line2D([0], [0], color='#D62728', marker='o', markersize=8, linewidth=2, markeredgecolor='black'))
    legend_labels = complexity_descriptions + ['Response Time Trend']
    ax.legend(legend_elements, legend_labels, loc='upper center', fontsize=10, 
              bbox_to_anchor=(0.5, 1.0), ncol=3)  # 放在图表上方，分三列
    
    # 添加注释 - 移到左下角
    ax.text(
        0.02, 0.02, 
        f'Total samples: {total_samples}\nEvaluation date: {time.strftime("%Y-%m-%d")}',
        transform=ax.transAxes,
        fontsize=10,
        verticalalignment='bottom'
    )
    
    # 保存响应时间图表
    plt.savefig("evaluation/charts/response_time_analysis.png", dpi=300, bbox_inches='tight')
    plt.close()
    
    print("Charts generated and saved to evaluation/charts/ directory")

def main():
    """主函数"""
    print("加载金融相关数据...")
    finance_data = load_finance_data()
    print(f"加载完成，共 {len(finance_data)} 条金融相关问题")
    
    print("开始评估模型...")
    print("使用执行匹配或语法匹配方法评估...")
    accuracy, results, complexity_accuracy = evaluate_model_by_execution(finance_data)
    
    print("生成评估报告...")
    # 创建详细报告
    report = pd.DataFrame(results)
    report.to_csv("evaluation/spider_detailed_report_execution.csv", index=False)
    
    # 创建复杂度分类报告 - 更新为5个级别
    complexity_df = pd.DataFrame([
        {"complexity": "基础查询", 
         "description": "单表、无条件",
         "accuracy": complexity_accuracy.get(1, {}).get('accuracy', 0),
         "total": complexity_accuracy.get(1, {}).get('total', 0),
         "avg_response_time": complexity_accuracy.get(1, {}).get('avg_response_time', 0)},
        {"complexity": "简单查询", 
         "description": "单表、简单条件",
         "accuracy": complexity_accuracy.get(2, {}).get('accuracy', 0),
         "total": complexity_accuracy.get(2, {}).get('total', 0),
         "avg_response_time": complexity_accuracy.get(2, {}).get('avg_response_time', 0)},
        {"complexity": "中等查询", 
         "description": "简单JOIN或聚合",
         "accuracy": complexity_accuracy.get(3, {}).get('accuracy', 0),
         "total": complexity_accuracy.get(3, {}).get('total', 0),
         "avg_response_time": complexity_accuracy.get(3, {}).get('avg_response_time', 0)},
        {"complexity": "复杂查询", 
         "description": "多表JOIN、GROUP BY",
         "accuracy": complexity_accuracy.get(4, {}).get('accuracy', 0),
         "total": complexity_accuracy.get(4, {}).get('total', 0),
         "avg_response_time": complexity_accuracy.get(4, {}).get('avg_response_time', 0)},
        {"complexity": "高级查询", 
         "description": "复杂嵌套、多级聚合",
         "accuracy": complexity_accuracy.get(5, {}).get('accuracy', 0),
         "total": complexity_accuracy.get(5, {}).get('total', 0),
         "avg_response_time": complexity_accuracy.get(5, {}).get('avg_response_time', 0)}
    ])
    complexity_df.to_csv("evaluation/spider_complexity_report.csv", index=False)
    
    # 生成图表
    generate_charts(complexity_accuracy)
    
    # 按数据库分组的准确率
    if not results:
        print("没有评估结果")
        return
        
    db_accuracy = {}
    for r in results:
        db_id = r["db_id"]
        if db_id not in db_accuracy:
            db_accuracy[db_id] = {"correct": 0, "total": 0}
        db_accuracy[db_id]["total"] += 1
        if r["syntax_match"] or r["execution_match"]:
            db_accuracy[db_id]["correct"] += 1
    
    # 计算每个数据库的准确率
    for db_id in db_accuracy:
        db_accuracy[db_id]["accuracy"] = db_accuracy[db_id]["correct"] / db_accuracy[db_id]["total"]
    
    # 保存数据库准确率报告
    with open("evaluation/spider_db_accuracy_execution.json", "w", encoding="utf-8") as f:
        json.dump(db_accuracy, f, ensure_ascii=False, indent=2)
    
    print("评估报告已保存到 evaluation/ 目录")

if __name__ == "__main__":
    main()





