#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SQLCC 性能测试结果分析工具
用于分析和可视化SQLCC存储引擎的性能测试结果
"""

import os
import sys
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path

# 使用非交互式后端
plt.switch_backend('Agg')

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


class PerformanceAnalyzer:
    """SQLCC性能测试结果分析器"""
    
    def __init__(self, results_dir):
        """
        初始化性能分析器
        
        Args:
            results_dir: 性能测试结果目录
        """
        self.results_dir = Path(results_dir)
        # 创建输出目录
        self.output_dir = self.results_dir / "analysis"
        self.output_dir.mkdir(exist_ok=True)
    
    def load_csv_data(self, csv_filename):
        """
        加载CSV格式的性能测试数据
        
        Args:
            csv_filename: CSV文件名
            
        Returns:
            包含测试数据的DataFrame，如果文件不存在则返回空DataFrame
        """
        csv_path = self.results_dir / csv_filename
        if not csv_path.exists():
            print(f"警告: 找不到文件 {csv_filename}，跳过相关分析")
            return pd.DataFrame()
        
        try:
            return pd.read_csv(csv_path)
        except Exception as e:
            print(f"错误: 读取文件 {csv_filename} 时发生错误: {e}")
            return pd.DataFrame()
    
    def analyze_buffer_pool_results(self):
        """分析缓冲池性能测试结果"""
        print("分析缓冲池性能测试结果...")
        
        # 加载缓冲池命中率测试结果
        hit_rate_data = self.load_csv_data("buffer_pool_hit_rate.csv")
        if not hit_rate_data.empty:
            try:
                self.plot_buffer_pool_hit_rate(hit_rate_data)
            except Exception as e:
                print(f"警告: 绘制缓冲池命中率图表时出错: {e}")
        
        # 加载LRU效率测试结果
        lru_data = self.load_csv_data("buffer_pool_lru_efficiency.csv")
        if not lru_data.empty:
            try:
                self.plot_lru_efficiency(lru_data)
            except Exception as e:
                print(f"警告: 绘制LRU效率图表时出错: {e}")
        
        # 加载访问模式性能测试结果
        access_pattern_data = self.load_csv_data("buffer_pool_access_pattern.csv")
        if not access_pattern_data.empty:
            try:
                self.plot_access_pattern(access_pattern_data)
            except Exception as e:
                print(f"警告: 绘制访问模式图表时出错: {e}")
        
        # 加载缓冲池大小扩展性测试结果
        pool_size_data = self.load_csv_data("buffer_pool_size_scalability.csv")
        if not pool_size_data.empty:
            try:
                self.plot_pool_size_scalability(pool_size_data)
            except Exception as e:
                print(f"警告: 绘制缓冲池大小扩展性图表时出错: {e}")
    
    def plot_buffer_pool_hit_rate(self, data):
        """绘制缓冲池命中率图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        workloads = []
        hit_rates = []
        
        for _, row in data.iterrows():
            workload = row['Workload']
            hit_rate = float(row['Hit Rate'].rstrip('%'))
            workloads.append(workload)
            hit_rates.append(hit_rate)
        
        # 绘制柱状图
        bars = plt.bar(workloads, hit_rates, color=['skyblue', 'lightgreen', 'lightcoral', 'plum'])
        plt.ylabel('命中率（%）')
        plt.title('不同工作负载下的缓冲池命中率')
        plt.xticks(rotation=45, ha='right')
        
        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height + 0.5,
                    f'{height:.1f}%', ha='center', va='bottom')
        
        # 添加90%基准线
        plt.axhline(y=90, color='red', linestyle='--', alpha=0.7, label='90% 基准线')
        plt.legend()
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'buffer_pool_hit_rate.png', dpi=300)
        plt.close()
    
    def plot_lru_efficiency(self, data):
        """绘制LRU效率图表"""
        plt.figure(figsize=(10, 6))
        
        # 提取数据
        access_patterns = []
        efficiencies = []
        
        # 检查必要的列是否存在
        if 'Access Pattern' not in data.columns:
            # 如果没有Access Pattern列，尝试使用其他列或默认标签
            if 'Workload' in data.columns:
                pattern_col = 'Workload'
            else:
                pattern_col = data.columns[0]  # 使用第一列
            
            print(f"警告: 找不到'Access Pattern'列，使用'{pattern_col}'列代替")
        else:
            pattern_col = 'Access Pattern'
        
        for _, row in data.iterrows():
            try:
                access_pattern = row[pattern_col]
                # 提取效率值，处理可能的百分比符号
                efficiency_value = row['LRU Efficiency'] if 'LRU Efficiency' in data.columns else row.iloc[1]
                if isinstance(efficiency_value, str) and '%' in efficiency_value:
                    efficiency = float(efficiency_value.rstrip('%'))
                else:
                    efficiency = float(efficiency_value)
                access_patterns.append(access_pattern)
                efficiencies.append(efficiency)
            except Exception as e:
                print(f"警告: 处理行数据时出错: {e}")
                continue
        
        # 绘制柱状图
        bars = plt.bar(access_patterns, efficiencies, color='lightskyblue')
        plt.ylabel('LRU效率（%）')
        plt.title('不同访问模式下的LRU效率')
        plt.xticks(rotation=45, ha='right')
        
        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height + 0.5,
                    f'{height:.1f}%', ha='center', va='bottom')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'lru_efficiency.png', dpi=300)
        plt.close()
    
    def plot_access_pattern(self, data):
        """绘制访问模式性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        access_patterns = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            access_pattern = row['Access Pattern']
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            access_patterns.append(access_pattern)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        bars1 = ax1.bar(access_patterns, throughputs, color='lightgreen')
        ax1.set_xlabel('访问模式')
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('不同访问模式下的吞吐量')
        ax1.tick_params(axis='x', rotation=45)
        ax1.grid(True, axis='y')
        
        # 绘制平均延迟
        bars2 = ax2.bar(access_patterns, avg_latencies, color='lightcoral')
        ax2.set_xlabel('访问模式')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('不同访问模式下的平均延迟')
        ax2.tick_params(axis='x', rotation=45)
        ax2.grid(True, axis='y')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'access_pattern_performance.png', dpi=300)
        plt.close()
    
    def plot_pool_size_scalability(self, data):
        """绘制缓冲池大小扩展性图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        pool_sizes = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            pool_size = int(row['Pool Size'])
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            pool_sizes.append(pool_size)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        ax1.plot(pool_sizes, throughputs, marker='o', linestyle='-', color='royalblue')
        ax1.set_xlabel('缓冲池大小（页面数）')
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('缓冲池大小与吞吐量的关系')
        ax1.grid(True)
        
        # 绘制平均延迟
        ax2.plot(pool_sizes, avg_latencies, marker='s', linestyle='-', color='crimson')
        ax2.set_xlabel('缓冲池大小（页面数）')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('缓冲池大小与平均延迟的关系')
        ax2.grid(True)
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'pool_size_scalability.png', dpi=300)
        plt.close()
    
    def analyze_disk_io_results(self):
        """分析磁盘I/O性能测试结果"""
        print("分析磁盘I/O性能测试结果...")
        
        # 加载顺序读写测试结果
        sequential_data = self.load_csv_data("disk_io_sequential.csv")
        if not sequential_data.empty:
            self.plot_sequential_io(sequential_data)
        
        # 加载随机读写测试结果
        random_data = self.load_csv_data("disk_io_random.csv")
        if not random_data.empty:
            self.plot_random_io(random_data)
        
        # 加载不同页面大小测试结果
        varying_page_data = self.load_csv_data("disk_io_varying_page_size.csv")
        if not varying_page_data.empty:
            self.plot_varying_page_size(varying_page_data)
        
        # 加载并发I/O测试结果
        concurrent_data = self.load_csv_data("disk_io_concurrent.csv")
        if not concurrent_data.empty:
            self.plot_concurrent_io(concurrent_data)
    
    def plot_sequential_io(self, data):
        """绘制顺序I/O性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 分离读取和写入数据
        read_data = data[data['Test Name'].str.contains('Read')]
        write_data = data[data['Test Name'].str.contains('Write')]
        
        # 提取数据
        read_page_sizes = []
        read_throughputs = []
        
        for _, row in read_data.iterrows():
            page_size = int(row['Page Size'])
            throughput = float(row['Throughput(MB/s)'])
            read_page_sizes.append(page_size)
            read_throughputs.append(throughput)
        
        write_page_sizes = []
        write_throughputs = []
        
        for _, row in write_data.iterrows():
            page_size = int(row['Page Size'])
            throughput = float(row['Throughput(MB/s)'])
            write_page_sizes.append(page_size)
            write_throughputs.append(throughput)
        
        # 绘制图表
        plt.figure(figsize=(10, 6))
        plt.plot(read_page_sizes, read_throughputs, marker='o', linestyle='-', label='顺序读取')
        plt.plot(write_page_sizes, write_throughputs, marker='s', linestyle='-', label='顺序写入')
        
        plt.xlabel('页面大小（字节）')
        plt.ylabel('吞吐量（MB/s）')
        plt.title('顺序I/O性能与页面大小的关系')
        plt.legend()
        plt.grid(True)
        
        # 添加数值标签
        for x, y in zip(read_page_sizes, read_throughputs):
            plt.annotate(f'{y:.1f}', (x, y), textcoords="offset points", 
                         xytext=(0,5), ha='center')
        
        for x, y in zip(write_page_sizes, write_throughputs):
            plt.annotate(f'{y:.1f}', (x, y), textcoords="offset points", 
                         xytext=(0,-15), ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'sequential_io_performance.png', dpi=300)
        plt.close()
    
    def plot_random_io(self, data):
        """绘制随机I/O性能图表"""
        plt.figure(figsize=(10, 6))
        
        # 提取数据
        operations = []
        throughputs = []
        
        for _, row in data.iterrows():
            operation = '随机读取' if 'Read' in row['Test Name'] else '随机写入'
            throughput = float(row['Throughput(MB/s)'])
            operations.append(operation)
            throughputs.append(throughput)
        
        # 绘制柱状图
        bars = plt.bar(operations, throughputs, color=['lightblue', 'lightcoral'])
        plt.ylabel('吞吐量（MB/s）')
        plt.title('随机I/O性能')
        
        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height + height*0.01, 
                     f'{height:.2f}', ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'random_io_performance.png', dpi=300)
        plt.close()
    
    def plot_varying_page_size(self, data):
        """绘制不同页面大小下的I/O性能图表"""
        plt.figure(figsize=(10, 6))
        
        # 提取数据
        page_sizes = []
        throughputs = []
        
        for _, row in data.iterrows():
            page_size = int(row['Page Size'])
            throughput = float(row['Throughput(MB/s)'])
            page_sizes.append(page_size)
            throughputs.append(throughput)
        
        # 绘制折线图
        plt.plot(page_sizes, throughputs, marker='o', linestyle='-', color='mediumseagreen')
        plt.xlabel('页面大小（字节）')
        plt.ylabel('吞吐量（MB/s）')
        plt.title('混合I/O性能与页面大小的关系')
        plt.grid(True)
        
        # 添加数值标签
        for x, y in zip(page_sizes, throughputs):
            plt.annotate(f'{y:.1f}', (x, y), textcoords="offset points", 
                         xytext=(0,5), ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'varying_page_size_performance.png', dpi=300)
        plt.close()
    
    def plot_concurrent_io(self, data):
        """绘制并发I/O性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        thread_counts = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            thread_count = int(row['Thread Count'])
            throughput = float(row['Throughput(MB/s)'])
            avg_latency = float(row['Avg Latency(ms)'])
            thread_counts.append(thread_count)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        ax1.plot(thread_counts, throughputs, marker='o', linestyle='-', color='dodgerblue')
        ax1.set_xlabel('线程数')
        ax1.set_ylabel('吞吐量（MB/s）')
        ax1.set_title('并发I/O吞吐量')
        ax1.grid(True)
        
        # 添加数值标签
        for x, y in zip(thread_counts, throughputs):
            ax1.annotate(f'{y:.1f}', (x, y), textcoords="offset points", 
                         xytext=(0,5), ha='center')
        
        # 绘制平均延迟
        ax2.plot(thread_counts, avg_latencies, marker='s', linestyle='-', color='tomato')
        ax2.set_xlabel('线程数')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('并发I/O平均延迟')
        ax2.grid(True)
        
        # 添加数值标签
        for x, y in zip(thread_counts, avg_latencies):
            ax2.annotate(f'{y:.2f}', (x, y), textcoords="offset points", 
                         xytext=(0,-15), ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'concurrent_io_performance.png', dpi=300)
        plt.close()
    
    def analyze_index_operations(self):
        """分析索引操作性能测试结果"""
        print("分析索引操作性能测试结果...")
        
        # 加载索引创建性能测试结果
        create_index_data = self.load_csv_data("index_operations_create.csv")
        if not create_index_data.empty:
            self.plot_create_index_performance(create_index_data)
        
        # 加载索引查询性能测试结果
        query_index_data = self.load_csv_data("index_operations_query.csv")
        if not query_index_data.empty:
            self.plot_query_index_performance(query_index_data)
        
        # 加载索引删除性能测试结果
        drop_index_data = self.load_csv_data("index_operations_drop.csv")
        if not drop_index_data.empty:
            self.plot_drop_index_performance(drop_index_data)
    
    def plot_create_index_performance(self, data):
        """绘制索引创建性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        table_sizes = []
        throughputs = []
        avg_latencies = []
        is_unique = []
        
        for _, row in data.iterrows():
            table_size = int(row['Table Size'])
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            unique_flag = bool(row['Is Unique'])
            table_sizes.append(table_size)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
            is_unique.append(unique_flag)
        
        # 分离唯一索引和非唯一索引数据
        unique_data = [(ts, tp, al) for ts, tp, al in zip(table_sizes, throughputs, avg_latencies, is_unique) if unique]
        non_unique_data = [(ts, tp, al) for ts, tp, al in zip(table_sizes, throughputs, avg_latencies, is_unique) if not unique]
        
        # 按表大小排序
        unique_data.sort(key=lambda x: x[0])
        non_unique_data.sort(key=lambda x: x[0])
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        if unique_data:
            unique_sizes, unique_throughputs, _ = zip(*unique_data)
            ax1.plot(unique_sizes, unique_throughputs, marker='o', linestyle='-', color='blue', label='唯一索引')
        
        if non_unique_data:
            non_unique_sizes, non_unique_throughputs, _ = zip(*non_unique_data)
            ax1.plot(non_unique_sizes, non_unique_throughputs, marker='s', linestyle='-', color='green', label='非唯一索引')
        
        ax1.set_xlabel('表大小（行数）')
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('索引创建性能 - 吞吐量')
        ax1.grid(True)
        ax1.legend()
        
        # 绘制平均延迟
        if unique_data:
            _, _, unique_latencies = zip(*unique_data)
            ax2.plot(unique_sizes, unique_latencies, marker='o', linestyle='-', color='blue', label='唯一索引')
        
        if non_unique_data:
            _, _, non_unique_latencies = zip(*non_unique_data)
            ax2.plot(non_unique_sizes, non_unique_latencies, marker='s', linestyle='-', color='green', label='非唯一索引')
        
        ax2.set_xlabel('表大小（行数）')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('索引创建性能 - 平均延迟')
        ax2.grid(True)
        ax2.legend()
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'create_index_performance.png', dpi=300)
        plt.close()
    
    def plot_query_index_performance(self, data):
        """绘制索引查询性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        operations = []
        with_index = []
        without_index = []
        
        for _, row in data.iterrows():
            operation = row['Query Type']
            with_idx_time = float(row['With Index(ms)'])
            without_idx_time = float(row['Without Index(ms)'])
            operations.append(operation)
            with_index.append(with_idx_time)
            without_index.append(without_idx_time)
        
        # 设置条形宽度
        width = 0.35
        x = np.arange(len(operations))
        
        # 创建子图
        fig, ax = plt.subplots(figsize=(12, 6))
        
        # 绘制柱状图
        bars1 = ax.bar(x - width/2, with_index, width, label='使用索引', color='lightblue')
        bars2 = ax.bar(x + width/2, without_index, width, label='不使用索引', color='lightcoral')
        
        # 设置图表
        ax.set_xlabel('查询类型')
        ax.set_ylabel('执行时间（ms）')
        ax.set_title('索引对查询性能的影响')
        ax.set_xticks(x)
        ax.set_xticklabels(operations, rotation=45, ha='right')
        ax.legend()
        ax.grid(True, axis='y')
        
        # 添加数值标签
        def add_labels(bars):
            for bar in bars:
                height = bar.get_height()
                ax.annotate(f'{height:.2f}',
                           xy=(bar.get_x() + bar.get_width() / 2, height),
                           xytext=(0, 3),  # 3点垂直偏移
                           textcoords="offset points",
                           ha='center', va='bottom')
        
        add_labels(bars1)
        add_labels(bars2)
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'query_index_performance.png', dpi=300)
        plt.close()
    
    def plot_drop_index_performance(self, data):
        """绘制索引删除性能图表"""
        plt.figure(figsize=(10, 6))
        
        # 提取数据
        operations = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            operation = '带IF EXISTS' if row['If Exists'] else '标准删除'
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            operations.append(operation)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        bars1 = ax1.bar(operations, throughputs, color=['lightgreen', 'lightyellow'])
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('索引删除性能 - 吞吐量')
        ax1.grid(True, axis='y')
        
        # 添加数值标签
        for bar in bars1:
            height = bar.get_height()
            ax1.text(bar.get_x() + bar.get_width()/2., height + height*0.01,
                    f'{height:.0f}', ha='center')
        
        # 绘制平均延迟
        bars2 = ax2.bar(operations, avg_latencies, color=['lightcoral', 'lightsalmon'])
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('索引删除性能 - 平均延迟')
        ax2.grid(True, axis='y')
        
        # 添加数值标签
        for bar in bars2:
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height + height*0.01,
                    f'{height:.2f}', ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'drop_index_performance.png', dpi=300)
        plt.close()
    
    def analyze_mixed_workload_results(self):
        """分析混合工作负载性能测试结果"""
        print("分析混合工作负载性能测试结果...")
        
        # 加载读写比例测试结果
        rw_ratio_data = self.load_csv_data("mixed_workload_read_write_ratio.csv")
        if not rw_ratio_data.empty:
            self.plot_read_write_ratio(rw_ratio_data)
        
        # 加载事务大小测试结果
        tx_size_data = self.load_csv_data("mixed_workload_transaction_size.csv")
        if not tx_size_data.empty:
            self.plot_transaction_size(tx_size_data)
        
        # 加载长时间运行测试结果
        long_running_data = self.load_csv_data("mixed_workload_long_running.csv")
        if not long_running_data.empty:
            self.plot_long_running(long_running_data)
        
        # 加载并发测试结果
        concurrent_data = self.load_csv_data("mixed_workload_concurrent.csv")
        if not concurrent_data.empty:
            self.plot_concurrent_workload(concurrent_data)
    
    def plot_read_write_ratio(self, data):
        """绘制读写比例性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        test_names = []
        read_ratios = []
        throughputs = []
        
        for _, row in data.iterrows():
            test_name = row['Test Name']
            read_ratio = float(row['Read Ratio'].rstrip('%'))
            throughput = row['Throughput(ops/sec)']
            test_names.append(test_name)
            read_ratios.append(read_ratio)
            throughputs.append(throughput)
        
        # 绘制散点图
        plt.figure(figsize=(10, 6))
        scatter = plt.scatter(read_ratios, throughputs, s=100, alpha=0.7, c='purple')
        
        # 添加标签
        for i, (test_name, read_ratio, throughput) in enumerate(zip(test_names, read_ratios, throughputs)):
            plt.annotate(test_name, (read_ratio, throughput), 
                        xytext=(5, 5), textcoords='offset points')
        
        plt.xlabel('读操作比例（%）')
        plt.ylabel('吞吐量（ops/sec）')
        plt.title('读写比例与吞吐量的关系')
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'read_write_ratio_performance.png', dpi=300)
        plt.close()
    
    def plot_transaction_size(self, data):
        """绘制事务大小性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        tx_sizes = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            tx_size = int(row['Transaction Size'])
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            tx_sizes.append(tx_size)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        ax1.plot(tx_sizes, throughputs, marker='o', linestyle='-', color='forestgreen')
        ax1.set_xlabel('事务大小（页面数）')
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('事务大小与吞吐量的关系')
        ax1.grid(True)
        
        # 添加趋势线
        z = np.polyfit(tx_sizes, throughputs, 1)
        p = np.poly1d(z)
        ax1.plot(tx_sizes, p(tx_sizes), "r--", alpha=0.7, label=f'趋势线: y={z[0]:.2f}x+{z[1]:.2f}')
        ax1.legend()
        
        # 绘制平均延迟
        ax2.plot(tx_sizes, avg_latencies, marker='s', linestyle='-', color='firebrick')
        ax2.set_xlabel('事务大小（页面数）')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('事务大小与平均延迟的关系')
        ax2.grid(True)
        
        # 添加趋势线
        z = np.polyfit(tx_sizes, avg_latencies, 1)
        p = np.poly1d(z)
        ax2.plot(tx_sizes, p(tx_sizes), "r--", alpha=0.7, label=f'趋势线: y={z[0]:.2f}x+{z[1]:.2f}')
        ax2.legend()
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'transaction_size_performance.png', dpi=300)
        plt.close()
    
    def plot_long_running(self, data):
        """绘制长时间运行性能图表"""
        plt.figure(figsize=(10, 6))
        
        # 提取数据
        test_names = []
        durations = []
        throughputs = []
        
        for _, row in data.iterrows():
            test_name = row['Test Name']
            duration = row['Duration(ms)'] / 1000  # 转换为秒
            throughput = row['Throughput(ops/sec)']
            test_names.append(test_name)
            durations.append(duration)
            throughputs.append(throughput)
        
        # 绘制柱状图
        bars = plt.bar(test_names, throughputs, color='gold')
        plt.ylabel('吞吐量（ops/sec）')
        plt.title('长时间运行性能')
        plt.xticks(rotation=45, ha='right')
        
        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height + height*0.01, f'{height:.0f}', ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'long_running_performance.png', dpi=300)
        plt.close()
    
    def plot_concurrent_workload(self, data):
        """绘制并发工作负载性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 提取数据
        thread_counts = []
        throughputs = []
        avg_latencies = []
        
        for _, row in data.iterrows():
            thread_count = int(row['Thread Count'])
            throughput = row['Throughput(ops/sec)']
            avg_latency = float(row['Avg Latency(ms)'])
            thread_counts.append(thread_count)
            throughputs.append(throughput)
            avg_latencies.append(avg_latency)
        
        # 创建子图
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
        
        # 绘制吞吐量
        ax1.plot(thread_counts, throughputs, marker='o', linestyle='-', color='mediumorchid')
        ax1.set_xlabel('线程数')
        ax1.set_ylabel('吞吐量（ops/sec）')
        ax1.set_title('并发工作负载吞吐量')
        ax1.grid(True)
        
        # 添加数值标签
        for x, y in zip(thread_counts, throughputs):
            ax1.annotate(f'{y:.0f}', (x, y), textcoords="offset points", 
                         xytext=(0,5), ha='center')
        
        # 绘制平均延迟
        ax2.plot(thread_counts, avg_latencies, marker='s', linestyle='-', color='darkorange')
        ax2.set_xlabel('线程数')
        ax2.set_ylabel('平均延迟（ms）')
        ax2.set_title('并发工作负载平均延迟')
        ax2.grid(True)
        
        # 添加数值标签
        for x, y in zip(thread_counts, avg_latencies):
            ax2.annotate(f'{y:.2f}', (x, y), textcoords="offset points", 
                         xytext=(0,-15), ha='center')
        
        plt.tight_layout()
        plt.savefig(self.output_dir / 'concurrent_workload_performance.png', dpi=300)
        plt.close()
    
    def generate_summary_report(self):
        """生成性能测试摘要报告"""
        print("生成性能测试摘要报告...")
        
        report_path = self.output_dir / "performance_summary.md"
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write("# SQLCC 性能测试摘要报告\n\n")
            f.write(f"生成时间: {pd.Timestamp.now()}\n\n")
            
            f.write("## 测试环境\n\n")
            f.write("- 操作系统: Linux\n")
            f.write("- CPU: 多核处理器\n")
            f.write("- 内存: 充足内存支持大缓冲池测试\n")
            f.write("- 存储: 高性能SSD\n\n")
            
            f.write("## 测试结果概览\n\n")
            f.write("本报告包含以下性能测试结果:\n\n")
            f.write("1. 缓冲池性能测试\n")
            f.write("2. 磁盘I/O性能测试\n")
            f.write("3. 混合工作负载性能测试\n")
            f.write("4. 索引操作性能测试\n\n")
            
            f.write("## 图表说明\n\n")
            f.write("所有性能图表已保存至 `analysis` 目录:\n\n")
            
            # 列出生成的图表
            charts = [
                "buffer_pool_hit_rate.png - 缓冲池命中率",
                "lru_efficiency.png - LRU效率",
                "access_pattern_performance.png - 访问模式性能",
                "pool_size_scalability.png - 缓冲池大小扩展性",
                "sequential_io_performance.png - 顺序I/O性能",
                "random_io_performance.png - 随机I/O性能",
                "varying_page_size_performance.png - 不同页面大小性能",
                "concurrent_io_performance.png - 并发I/O性能",
                "read_write_ratio_performance.png - 读写比例性能",
                "transaction_size_performance.png - 事务大小性能",
                "long_running_performance.png - 长时间运行性能",
                "concurrent_workload_performance.png - 并发工作负载性能"
            ]
            
            for chart in charts:
                f.write(f"- {chart}\n")
            
            f.write("\n## 性能基准\n\n")
            f.write("- 单线程页面读取: >10,000 ops/sec\n")
            f.write("- 单线程页面写入: >5,000 ops/sec\n")
            f.write("- 缓冲池命中率: >90%（合适的工作负载）\n")
            f.write("- P99延迟: <5ms（页面读取）\n\n")
            
            f.write("## 结论与建议\n\n")
            f.write("通过全面的性能测试，我们可以:\n\n")
            f.write("1. 全面了解SQLCC存储引擎的性能特征\n")
            f.write("2. 识别性能瓶颈并指导优化工作\n")
            f.write("3. 建立性能基准，确保版本迭代不引入性能回归\n")
            f.write("4. 为不同应用场景提供性能参考数据\n\n")
            
            f.write("性能测试是数据库系统开发的重要环节，将为SQLCC项目的持续优化提供坚实的数据基础。\n")
        
        print(f"性能测试摘要报告已保存至: {report_path}")
    
    def run_analysis(self):
        """运行完整的性能测试分析"""
        print(f"开始分析性能测试结果，结果目录: {self.results_dir}")
        
        # 分析各类测试结果
        self.analyze_buffer_pool_results()
        self.analyze_disk_io_results()
        self.analyze_mixed_workload_results()
        # 新增索引操作性能分析
        self.analyze_index_operations()
        
        # 生成摘要报告
        self.generate_summary_report()
        
        print(f"性能测试分析完成，结果已保存至: {self.output_dir}")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='SQLCC性能测试结果分析工具')
    parser.add_argument('results_dir', help='性能测试结果目录')
    
    args = parser.parse_args()
    
    # 检查结果目录是否存在
    if not os.path.exists(args.results_dir):
        print(f"错误: 结果目录不存在: {args.results_dir}")
        sys.exit(1)
    
    # 创建分析器并运行分析
    analyzer = PerformanceAnalyzer(args.results_dir)
    analyzer.run_analysis()


if __name__ == "__main__":
    main()