import time
import numpy as np
from typing import List, Tuple, Dict, Set, Any, Optional
import matplotlib.pyplot as plt


class Metrics:
    """
    评估指标类，用于评估路径规划系统的性能
    """
    def __init__(self):
        """初始化评估指标"""
        # 路径长度
        self.path_lengths = []
        
        # 任务完成率
        self.success_rates = []
        
        # 冲突数量
        self.conflict_counts = []
        
        # 调度时间
        self.scheduling_times = []
        
        # 执行时间
        self.execution_times = []
        
        # 任务数量
        self.task_counts = []
    
    def add_result(self, path_lengths: Dict[int, int], success_rate: float, conflict_count: int, 
                  scheduling_time: float, execution_time: float, task_count: int):
        """
        添加一次实验结果
        
        Args:
            path_lengths: 路径长度字典 {robot_id: length}
            success_rate: 任务完成率
            conflict_count: 冲突数量
            scheduling_time: 调度时间(秒)
            execution_time: 执行时间(秒)
            task_count: 任务数量
        """
        self.path_lengths.append(path_lengths)
        self.success_rates.append(success_rate)
        self.conflict_counts.append(conflict_count)
        self.scheduling_times.append(scheduling_time)
        self.execution_times.append(execution_time)
        self.task_counts.append(task_count)
    
    def get_average_metrics(self) -> Dict[str, float]:
        """
        获取平均指标
        
        Returns:
            Dict[str, float]: 平均指标字典
        """
        # 计算平均路径长度
        avg_path_length = 0
        if self.path_lengths:
            all_lengths = []
            for lengths in self.path_lengths:
                all_lengths.extend(lengths.values())
            avg_path_length = np.mean(all_lengths) if all_lengths else 0
        
        # 计算平均指标
        avg_success_rate = np.mean(self.success_rates) if self.success_rates else 0
        avg_conflict_count = np.mean(self.conflict_counts) if self.conflict_counts else 0
        avg_scheduling_time = np.mean(self.scheduling_times) if self.scheduling_times else 0
        avg_execution_time = np.mean(self.execution_times) if self.execution_times else 0
        avg_task_count = np.mean(self.task_counts) if self.task_counts else 0
        
        return {
            'avg_path_length': avg_path_length,
            'avg_success_rate': avg_success_rate,
            'avg_conflict_count': avg_conflict_count,
            'avg_scheduling_time': avg_scheduling_time,
            'avg_execution_time': avg_execution_time,
            'avg_task_count': avg_task_count
        }
    
    def print_metrics(self):
        """打印评估指标"""
        metrics = self.get_average_metrics()
        
        print("\n===== 评估指标 =====")
        print(f"平均路径长度: {metrics['avg_path_length']:.2f}")
        print(f"平均任务完成率: {metrics['avg_success_rate']:.2f}%")
        print(f"平均冲突数量: {metrics['avg_conflict_count']:.2f}")
        print(f"平均调度时间: {metrics['avg_scheduling_time']:.4f}秒")
        print(f"平均执行时间: {metrics['avg_execution_time']:.4f}秒")
        print(f"平均任务数量: {metrics['avg_task_count']:.2f}")
    
    def plot_metrics(self, save_path: Optional[str] = None):
        """
        绘制评估指标图表
        
        Args:
            save_path: 保存路径，如果为None则显示图表
        """
        if not self.task_counts:
            print("没有可用的评估数据")
            return
        
        fig, axs = plt.subplots(2, 2, figsize=(16, 14))  # 增大图表尺寸
        
        # 路径长度 vs 任务数量
        axs[0, 0].scatter(self.task_counts, [np.mean(list(lengths.values())) for lengths in self.path_lengths])
        axs[0, 0].set_xlabel('任务数量', fontsize=14)
        axs[0, 0].set_ylabel('平均路径长度', fontsize=14)
        axs[0, 0].set_title('任务数量 vs 平均路径长度', fontsize=16)
        axs[0, 0].grid(True)
        axs[0, 0].tick_params(axis='both', which='major', labelsize=12)
        
        # 成功率 vs 任务数量
        axs[0, 1].scatter(self.task_counts, self.success_rates)
        axs[0, 1].set_xlabel('任务数量', fontsize=14)
        axs[0, 1].set_ylabel('任务完成率 (%)', fontsize=14)
        axs[0, 1].set_title('任务数量 vs 任务完成率', fontsize=16)
        axs[0, 1].grid(True)
        axs[0, 1].set_ylim(0, 105)
        axs[0, 1].tick_params(axis='both', which='major', labelsize=12)
        
        # 冲突数量 vs 任务数量
        axs[1, 0].scatter(self.task_counts, self.conflict_counts)
        axs[1, 0].set_xlabel('任务数量', fontsize=14)
        axs[1, 0].set_ylabel('冲突数量', fontsize=14)
        axs[1, 0].set_title('任务数量 vs 冲突数量', fontsize=16)
        axs[1, 0].grid(True)
        axs[1, 0].tick_params(axis='both', which='major', labelsize=12)
        
        # 调度时间 vs 任务数量
        axs[1, 1].scatter(self.task_counts, self.scheduling_times)
        axs[1, 1].set_xlabel('任务数量', fontsize=14)
        axs[1, 1].set_ylabel('调度时间 (秒)', fontsize=14)
        axs[1, 1].set_title('任务数量 vs 调度时间', fontsize=16)
        axs[1, 1].grid(True)
        axs[1, 1].tick_params(axis='both', which='major', labelsize=12)
        
        plt.tight_layout(pad=3.0)  # 增加子图之间的间距
        
        if save_path:
            plt.savefig(save_path, dpi=300)  # 增加DPI提高图像质量
            plt.close()
        else:
            plt.show()
    
    def save_metrics(self, file_path: str):
        """
        保存评估指标到文件
        
        Args:
            file_path: 文件路径
        """
        metrics = self.get_average_metrics()
        
        with open(file_path, 'w') as f:
            f.write("===== 评估指标 =====\n")
            f.write(f"平均路径长度: {metrics['avg_path_length']:.2f}\n")
            f.write(f"平均任务完成率: {metrics['avg_success_rate']:.2f}%\n")
            f.write(f"平均冲突数量: {metrics['avg_conflict_count']:.2f}\n")
            f.write(f"平均调度时间: {metrics['avg_scheduling_time']:.4f}秒\n")
            f.write(f"平均执行时间: {metrics['avg_execution_time']:.4f}秒\n")
            f.write(f"平均任务数量: {metrics['avg_task_count']:.2f}\n")
            
            f.write("\n===== 详细数据 =====\n")
            for i in range(len(self.task_counts)):
                f.write(f"实验 {i+1}:\n")
                f.write(f"  任务数量: {self.task_counts[i]}\n")
                f.write(f"  任务完成率: {self.success_rates[i]:.2f}%\n")
                f.write(f"  冲突数量: {self.conflict_counts[i]}\n")
                f.write(f"  调度时间: {self.scheduling_times[i]:.4f}秒\n")
                f.write(f"  执行时间: {self.execution_times[i]:.4f}秒\n")
                
                # 路径长度
                path_lengths = self.path_lengths[i]
                avg_length = np.mean(list(path_lengths.values())) if path_lengths else 0
                f.write(f"  平均路径长度: {avg_length:.2f}\n")
                f.write("\n") 