import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import sys
import time
from collections import defaultdict
import pandas as pd
from datetime import datetime

# 添加项目路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from runner import Runner
from agent import Agent
from environment.encirclement_env import EncirclementEnv
from common.arguments import get_args
from common.utils import make_encirclement_env

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class EncirclementEvaluator:
    """多无人机合围任务评估器"""
    
    def __init__(self, args, env):
        self.args = args
        self.env = env
        self.agents = self._init_agents()
        self.evaluation_results = []
        
    def _init_agents(self):
        """初始化智能体并加载训练好的模型"""
        agents = []
        for i in range(self.args.num_uavs):
            agent = Agent(i, self.args, self.args.num_position_action)
            # 加载训练好的模型
            model_path = f"{self.args.save_dir}/{self.args.scenario_name}/agent_{i}"
            
            # 查找最新的模型文件（按编号排序）
            actor_files = []
            critic_files = []
            
            if os.path.exists(model_path):
                for file in os.listdir(model_path):
                    if file.endswith('_actor_params.pkl'):
                        actor_files.append(file)
                    elif file.endswith('_critic_params.pkl'):
                        critic_files.append(file)
            
            # 选择最新的模型文件（编号最大的）
            if actor_files and critic_files:
                # 按编号排序，选择最大的
                actor_files.sort(key=lambda x: int(x.split('_')[0]))
                critic_files.sort(key=lambda x: int(x.split('_')[0]))
                
                latest_actor = actor_files[-1]
                latest_critic = critic_files[-1]
                
                actor_path = os.path.join(model_path, latest_actor)
                critic_path = os.path.join(model_path, latest_critic)
                
                try:
                    agent.policy.actor_network.load_state_dict(
                        torch.load(actor_path, map_location='cpu')
                    )
                    agent.policy.critic_network.load_state_dict(
                        torch.load(critic_path, map_location='cpu')
                    )
                    print(f"✓ 成功加载智能体 {i} 的模型: {latest_actor}, {latest_critic}")
                except Exception as e:
                    print(f"⚠️ 警告: 智能体 {i} 模型加载失败: {e}，将使用随机策略")
            else:
                print(f"⚠️ 警告: 智能体 {i} 的模型文件不存在，将使用随机策略")
                print(f"   查找路径: {model_path}")
                
            agents.append(agent)
        return agents
    
    def build_state(self, uav_id):
        """构建单个无人机的状态"""
        uav = self.env.uavs[uav_id]
        target_pos = self.env.target.position
        other_uav_positions = [other_uav.position for i, other_uav in enumerate(self.env.uavs) if i != uav_id]
        return uav.get_extended_state(target_pos, other_uav_positions)
    
    def evaluate_single_episode(self, episode_id, max_steps=None, visualize=False, save_trajectory=False):
        """评估单个episode"""
        if max_steps is None:
            max_steps = self.args.evaluate_episode_len
            
        step = 0
        total_reward = 0
        self.env.reset()
        episode_success = False
        trajectory = []
        
        # 记录初始状态
        if save_trajectory:
            initial_state = {
                'step': 0,
                'target_pos': self.env.target.position,
                'uav_positions': [uav.position for uav in self.env.uavs],
                'encirclement_progress': self.env.calculate_encirclement_progress(),
                'is_encircled': self.env.check_encirclement()
            }
            trajectory.append(initial_state)
        
        while step < max_steps:
            step_rewards = []
            step_actions = []
            
            # 所有无人机同时行动
            for uav_id, (uav, agent) in enumerate(zip(self.env.uavs, self.agents)):
                # 获取状态
                state = self.build_state(uav_id)
                
                # 选择动作（评估时不使用随机探索）
                action, _ = agent.select_action(state, epsilon=0.0, uav=uav)
                step_actions.append(action)
                
                # 执行动作
                reward = self.env.perform_action(uav, action, step, max_steps, episode_id, use_optimized=True)
                step_rewards.append(reward)
                total_reward += reward
            
            # 记录轨迹
            if save_trajectory:
                step_state = {
                    'step': step + 1,
                    'target_pos': self.env.target.position,
                    'uav_positions': [uav.position for uav in self.env.uavs],
                    'actions': step_actions.copy(),
                    'rewards': step_rewards.copy(),
                    'encirclement_progress': self.env.calculate_encirclement_progress(),
                    'is_encircled': self.env.check_encirclement()
                }
                trajectory.append(step_state)
            
            # 检查是否成功合围
            if self.env.check_encirclement():
                episode_success = True
                break
                
            step += 1
        
        # 可视化
        if visualize:
            self.visualize_episode(episode_id, trajectory if save_trajectory else None)
        
        result = {
            'episode_id': episode_id,
            'success': episode_success,
            'steps': step,
            'total_reward': total_reward,
            'average_reward': total_reward / len(self.env.uavs),
            'encirclement_progress': self.env.calculate_encirclement_progress(),
            'trajectory': trajectory if save_trajectory else None
        }
        
        return result
    
    def evaluate_multiple_episodes(self, num_episodes=None, visualize_best=True, save_detailed_results=True):
        """评估多个episodes"""
        if num_episodes is None:
            num_episodes = self.args.evaluate_episodes
            
        print(f"开始评估 {num_episodes} 个episodes...")
        
        results = []
        best_episode = None
        best_reward = float('-inf')
        
        for episode in range(num_episodes):
            print(f"评估 Episode {episode + 1}/{num_episodes}...", end=' ')
            
            # 保存最佳episode的轨迹
            save_trajectory = (episode == 0)  # 只保存第一个episode的轨迹用于可视化
            
            result = self.evaluate_single_episode(
                episode_id=episode + 1,
                save_trajectory=save_trajectory
            )
            
            results.append(result)
            
            # 更新最佳episode
            if result['total_reward'] > best_reward:
                best_reward = result['total_reward']
                best_episode = result
            
            # 打印结果
            status = "成功" if result['success'] else "失败"
            print(f"{status}, 步数: {result['steps']}, 奖励: {result['total_reward']:.2f}")
        
        # 计算统计信息
        stats = self._calculate_statistics(results)
        
        # 打印评估结果
        self._print_evaluation_summary(stats)
        
        # 可视化最佳episode
        if visualize_best and best_episode and best_episode['trajectory']:
            print(f"\n可视化最佳Episode (Episode {best_episode['episode_id']})...")
            self.visualize_episode(best_episode['episode_id'], best_episode['trajectory'])
        
        # 保存详细结果
        if save_detailed_results:
            self._save_evaluation_results(results, stats)
        
        return results, stats
    
    def _calculate_statistics(self, results):
        """计算评估统计信息"""
        success_count = sum(1 for r in results if r['success'])
        total_episodes = len(results)
        
        rewards = [r['total_reward'] for r in results]
        steps = [r['steps'] for r in results]
        progress = [r['encirclement_progress'] for r in results]
        
        stats = {
            'total_episodes': total_episodes,
            'success_count': success_count,
            'success_rate': success_count / total_episodes,
            'average_reward': np.mean(rewards),
            'std_reward': np.std(rewards),
            'max_reward': np.max(rewards),
            'min_reward': np.min(rewards),
            'average_steps': np.mean(steps),
            'std_steps': np.std(steps),
            'average_progress': np.mean(progress),
            'successful_episodes_avg_steps': np.mean([r['steps'] for r in results if r['success']]) if success_count > 0 else 0,
            'failed_episodes_avg_steps': np.mean([r['steps'] for r in results if not r['success']]) if success_count < total_episodes else 0
        }
        
        return stats
    
    def _print_evaluation_summary(self, stats):
        """打印评估结果摘要"""
        print("\n" + "="*60)
        print("评估结果摘要")
        print("="*60)
        print(f"总Episodes数: {stats['total_episodes']}")
        print(f"成功Episodes数: {stats['success_count']}")
        print(f"成功率: {stats['success_rate']:.2%}")
        print(f"平均奖励: {stats['average_reward']:.2f} ± {stats['std_reward']:.2f}")
        print(f"最高奖励: {stats['max_reward']:.2f}")
        print(f"最低奖励: {stats['min_reward']:.2f}")
        print(f"平均步数: {stats['average_steps']:.1f} ± {stats['std_steps']:.1f}")
        print(f"平均合围进度: {stats['average_progress']:.2%}")
        
        if stats['success_count'] > 0:
            print(f"成功Episodes平均步数: {stats['successful_episodes_avg_steps']:.1f}")
        if stats['success_count'] < stats['total_episodes']:
            print(f"失败Episodes平均步数: {stats['failed_episodes_avg_steps']:.1f}")
        
        print("="*60)
    
    def _save_evaluation_results(self, results, stats):
        """保存评估结果到文件"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存详细结果到Excel
        df_results = pd.DataFrame([
            {
                'Episode': r['episode_id'],
                'Success': r['success'],
                'Steps': r['steps'],
                'Total_Reward': r['total_reward'],
                'Average_Reward': r['average_reward'],
                'Encirclement_Progress': r['encirclement_progress']
            }
            for r in results
        ])
        
        # 添加统计信息
        stats_df = pd.DataFrame([stats])
        
        # 确保data目录存在
        os.makedirs('data', exist_ok=True)
        
        excel_path = f'data/evaluation_results_{timestamp}.xlsx'
        with pd.ExcelWriter(excel_path) as writer:
            df_results.to_excel(writer, sheet_name='详细结果', index=False)
            stats_df.to_excel(writer, sheet_name='统计摘要', index=False)
        
        print(f"\n评估结果已保存到: {excel_path}")
        
        # 绘制结果图表
        self._plot_evaluation_results(results, stats, timestamp)
    
    def _plot_evaluation_results(self, results, stats, timestamp):
        """绘制评估结果图表"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle(f'评估结果分析 (成功率: {stats["success_rate"]:.2%})', fontsize=16)
        
        # 1. 奖励分布
        rewards = [r['total_reward'] for r in results]
        axes[0, 0].hist(rewards, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
        axes[0, 0].axvline(stats['average_reward'], color='red', linestyle='--', label=f'平均值: {stats["average_reward"]:.2f}')
        axes[0, 0].set_title('奖励分布')
        axes[0, 0].set_xlabel('总奖励')
        axes[0, 0].set_ylabel('频次')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 2. 步数分布
        steps = [r['steps'] for r in results]
        axes[0, 1].hist(steps, bins=20, alpha=0.7, color='lightgreen', edgecolor='black')
        axes[0, 1].axvline(stats['average_steps'], color='red', linestyle='--', label=f'平均值: {stats["average_steps"]:.1f}')
        axes[0, 1].set_title('步数分布')
        axes[0, 1].set_xlabel('完成步数')
        axes[0, 1].set_ylabel('频次')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)
        
        # 3. 成功率趋势
        episode_ids = [r['episode_id'] for r in results]
        success_values = [1 if r['success'] else 0 for r in results]
        # 计算滑动平均成功率
        window_size = min(10, len(results) // 2)
        if window_size > 0:
            moving_avg = np.convolve(success_values, np.ones(window_size)/window_size, mode='valid')
            axes[1, 0].plot(episode_ids[window_size-1:], moving_avg, 'b-', linewidth=2, label=f'滑动平均(窗口={window_size})')
        axes[1, 0].scatter(episode_ids, success_values, alpha=0.6, c=['green' if s else 'red' for s in success_values])
        axes[1, 0].set_title('成功率趋势')
        axes[1, 0].set_xlabel('Episode')
        axes[1, 0].set_ylabel('成功 (1) / 失败 (0)')
        axes[1, 0].set_ylim(-0.1, 1.1)
        axes[1, 0].legend()
        axes[1, 0].grid(True, alpha=0.3)
        
        # 4. 合围进度分布
        progress = [r['encirclement_progress'] for r in results]
        axes[1, 1].hist(progress, bins=20, alpha=0.7, color='orange', edgecolor='black')
        axes[1, 1].axvline(stats['average_progress'], color='red', linestyle='--', label=f'平均值: {stats["average_progress"]:.2%}')
        axes[1, 1].set_title('合围进度分布')
        axes[1, 1].set_xlabel('合围进度')
        axes[1, 1].set_ylabel('频次')
        axes[1, 1].legend()
        axes[1, 1].grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 保存图表
        os.makedirs('data', exist_ok=True)
        plot_path = f'data/evaluation_plots_{timestamp}.png'
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        print(f"评估图表已保存到: {plot_path}")
        plt.show()
    
    def visualize_episode(self, episode_id, trajectory=None):
        """可视化单个episode的执行过程"""
        if trajectory is None:
            # 如果没有轨迹数据，只显示当前状态
            self._plot_current_state(episode_id)
        else:
            # 显示完整轨迹
            self._plot_trajectory(episode_id, trajectory)
    
    def _plot_current_state(self, episode_id):
        """绘制当前环境状态"""
        fig, ax = plt.subplots(figsize=(10, 10))
        
        # 绘制目标
        target_pos = self.env.target.position
        ax.plot(target_pos[0], target_pos[1], 'rs', markersize=15, label='目标')
        
        # 绘制合围范围
        circle_min = plt.Circle(target_pos, self.env.min_encirclement_distance, 
                               color='red', fill=False, linestyle='--', alpha=0.5, label='最小合围距离')
        circle_max = plt.Circle(target_pos, self.env.max_encirclement_distance, 
                               color='red', fill=False, linestyle=':', alpha=0.5, label='最大合围距离')
        ax.add_artist(circle_min)
        ax.add_artist(circle_max)
        
        # 绘制无人机
        colors = ['blue', 'green', 'orange', 'purple', 'brown', 'pink']
        for i, uav in enumerate(self.env.uavs):
            color = colors[i % len(colors)]
            ax.plot(uav.position[0], uav.position[1], 'o', color=color, 
                   markersize=12, label=f'无人机{uav.id}')
            
            # 绘制无人机到目标的连线
            ax.plot([uav.position[0], target_pos[0]], 
                   [uav.position[1], target_pos[1]], 
                   color=color, alpha=0.3, linestyle=':')
        
        # 设置图表
        ax.set_xlim(0, self.env.grid_size)
        ax.set_ylim(0, self.env.grid_size)
        ax.legend()
        ax.set_title(f'Episode {episode_id} - 当前状态')
        ax.set_xlabel('X坐标')
        ax.set_ylabel('Y坐标')
        ax.grid(True, alpha=0.3)
        
        # 显示合围状态
        if self.env.check_encirclement():
            ax.text(10, self.env.grid_size-20, '合围成功!', fontsize=16, 
                   bbox=dict(boxstyle="round", facecolor='lightgreen'))
        else:
            progress = self.env.calculate_encirclement_progress()
            ax.text(10, self.env.grid_size-20, f'合围进度: {progress:.1%}', fontsize=16,
                   bbox=dict(boxstyle="round", facecolor='lightyellow'))
        
        plt.show()
    
    def _plot_trajectory(self, episode_id, trajectory):
        """绘制完整轨迹"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
        
        # 左图：轨迹图
        target_pos = trajectory[0]['target_pos']
        ax1.plot(target_pos[0], target_pos[1], 'rs', markersize=15, label='目标')
        
        # 绘制合围范围
        circle_min = plt.Circle(target_pos, self.env.min_encirclement_distance, 
                               color='red', fill=False, linestyle='--', alpha=0.5)
        circle_max = plt.Circle(target_pos, self.env.max_encirclement_distance, 
                               color='red', fill=False, linestyle=':', alpha=0.5)
        ax1.add_artist(circle_min)
        ax1.add_artist(circle_max)
        
        # 绘制无人机轨迹
        colors = ['blue', 'green', 'orange', 'purple', 'brown', 'pink']
        for uav_id in range(len(trajectory[0]['uav_positions'])):
            color = colors[uav_id % len(colors)]
            
            # 提取轨迹
            x_traj = [step['uav_positions'][uav_id][0] for step in trajectory]
            y_traj = [step['uav_positions'][uav_id][1] for step in trajectory]
            
            # 绘制轨迹线
            ax1.plot(x_traj, y_traj, color=color, alpha=0.7, linewidth=2, label=f'无人机{uav_id}轨迹')
            
            # 标记起点和终点
            ax1.plot(x_traj[0], y_traj[0], 'o', color=color, markersize=10, markeredgecolor='black', markeredgewidth=2)
            ax1.plot(x_traj[-1], y_traj[-1], 's', color=color, markersize=10, markeredgecolor='black', markeredgewidth=2)
        
        ax1.set_xlim(0, self.env.grid_size)
        ax1.set_ylim(0, self.env.grid_size)
        ax1.legend()
        ax1.set_title(f'Episode {episode_id} - 无人机轨迹')
        ax1.set_xlabel('X坐标')
        ax1.set_ylabel('Y坐标')
        ax1.grid(True, alpha=0.3)
        
        # 右图：性能指标
        steps = [step['step'] for step in trajectory]
        progress = [step['encirclement_progress'] for step in trajectory]
        
        ax2.plot(steps, progress, 'b-', linewidth=2, label='合围进度')
        ax2.axhline(y=1.0, color='red', linestyle='--', alpha=0.7, label='完全合围')
        ax2.set_xlabel('步数')
        ax2.set_ylabel('合围进度')
        ax2.set_title(f'Episode {episode_id} - 合围进度变化')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        ax2.set_ylim(0, 1.1)
        
        # 标记成功点
        success_step = None
        for step in trajectory:
            if step['is_encircled']:
                success_step = step['step']
                break
        
        if success_step:
            ax2.axvline(x=success_step, color='green', linestyle=':', alpha=0.7, label=f'成功步数: {success_step}')
            ax2.legend()
        
        plt.tight_layout()
        plt.show()
    
    def compare_with_random_policy(self, num_episodes=10):
        """与随机策略进行对比评估"""
        print(f"\n开始与随机策略对比评估 ({num_episodes} episodes)...")
        
        # 评估训练好的策略
        print("评估训练策略...")
        trained_results, trained_stats = self.evaluate_multiple_episodes(
            num_episodes=num_episodes, 
            visualize_best=False, 
            save_detailed_results=False
        )
        
        # 评估随机策略
        print("\n评估随机策略...")
        random_results = []
        for episode in range(num_episodes):
            print(f"随机策略 Episode {episode + 1}/{num_episodes}...", end=' ')
            
            step = 0
            total_reward = 0
            self.env.reset()
            episode_success = False
            
            while step < self.args.evaluate_episode_len:
                # 随机选择动作
                for uav_id, uav in enumerate(self.env.uavs):
                    # 随机动作
                    action = np.zeros(self.args.num_position_action)
                    action[np.random.randint(0, self.args.num_position_action)] = 1
                    action = action.tolist()
                    
                    # 执行动作
                    reward = self.env.perform_action(uav, action, step, self.args.evaluate_episode_len, episode)
                    total_reward += reward
                
                if self.env.check_encirclement():
                    episode_success = True
                    break
                    
                step += 1
            
            result = {
                'episode_id': episode + 1,
                'success': episode_success,
                'steps': step,
                'total_reward': total_reward,
                'average_reward': total_reward / len(self.env.uavs),
                'encirclement_progress': self.env.calculate_encirclement_progress()
            }
            
            random_results.append(result)
            status = "成功" if result['success'] else "失败"
            print(f"{status}, 步数: {result['steps']}, 奖励: {result['total_reward']:.2f}")
        
        # 计算随机策略统计
        random_stats = self._calculate_statistics(random_results)
        
        # 打印对比结果
        self._print_comparison_results(trained_stats, random_stats)
        
        return trained_stats, random_stats
    
    def _print_comparison_results(self, trained_stats, random_stats):
        """打印对比结果"""
        print("\n" + "="*80)
        print("策略对比结果")
        print("="*80)
        print(f"{'指标':<20} {'训练策略':<20} {'随机策略':<20} {'改进倍数':<15}")
        print("-"*80)
        
        metrics = [
            ('成功率', 'success_rate', '{:.2%}'),
            ('平均奖励', 'average_reward', '{:.2f}'),
            ('平均步数', 'average_steps', '{:.1f}'),
            ('平均进度', 'average_progress', '{:.2%}')
        ]
        
        for name, key, fmt in metrics:
            trained_val = trained_stats[key]
            random_val = random_stats[key]
            
            if random_val != 0:
                improvement = trained_val / random_val
            else:
                improvement = float('inf') if trained_val > 0 else 1.0
            
            print(f"{name:<20} {fmt.format(trained_val):<20} {fmt.format(random_val):<20} {improvement:.2f}x")
        
        print("="*80)

def main():
    """主函数"""
    # 获取参数
    args = get_args()
    
    # 设置评估模式
    args.evaluate = True
    
    # 创建环境
    env, args = make_encirclement_env(args)
    
    # 创建评估器
    evaluator = EncirclementEvaluator(args, env)
    
    print("多无人机合围任务评估器")
    print("="*50)
    print(f"模型路径: {args.save_dir}/{args.scenario_name}")
    print(f"评估Episodes数: {args.evaluate_episodes}")
    print(f"每个Episode最大步数: {args.evaluate_episode_len}")
    print("="*50)
    
    # 执行评估
    try:
        # 1. 标准评估
        results, stats = evaluator.evaluate_multiple_episodes(
            num_episodes=args.evaluate_episodes,
            visualize_best=True,
            save_detailed_results=True
        )
        
        # 2. 与随机策略对比（可选）
        print("\n是否进行随机策略对比？(y/n): ", end='')
        if input().lower().strip() == 'y':
            evaluator.compare_with_random_policy(num_episodes=min(10, args.evaluate_episodes))
        
        print("\n评估完成！")
        
    except Exception as e:
        print(f"评估过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == '__main__':
    main()