import numpy as np
import matplotlib.pyplot as plt
from dataclasses import dataclass
from typing import List, Tuple
import pandas as pd
import os

# 设置matplotlib中文字体
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['DejaVu Sans']  # 用于英文
matplotlib.rcParams['axes.unicode_minus'] = False  # 用于显示负号
try:
    # Windows系统
    plt.rcParams['font.sans-serif'] = ['Microsoft YaHei', 'SimHei']
except:
    try:
        # MacOS系统
        plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'Heiti TC']
    except:
        # Linux系统
        plt.rcParams['font.sans-serif'] = ['WenQuanYi Micro Hei', 'DejaVu Sans']

plt.rcParams['axes.unicode_minus'] = False

# 创建可视化输出文件夹
output_dir = 'Q3可视化'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
    print(f"创建输出文件夹: {output_dir}")

# 1. 数据类和核心类定义
@dataclass
class State:

    missile_distance: float  # 导弹距离
    remaining_smokes: int    # 剩余烟幕弹
    last_launch_time: float  # 上次投放时间
    current_time: float      # 当前时间
    
    def to_array(self):
        """转换为数组"""
        return np.array([
            self.missile_distance / 20000,  # 归一化
            self.remaining_smokes / 3,
            self.last_launch_time / 60,
            self.current_time / 60
        ])

class PPOAgent:
    """PPO智能体"""
    
    def __init__(self, state_dim=4, action_dim=2, learning_rate=3e-4):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.lr = learning_rate
        
        # 简化神经网络参数
        self.actor_weights = np.random.randn(state_dim, action_dim) * 0.1
        self.critic_weights = np.random.randn(state_dim, 1) * 0.1
        
        # 经验缓冲
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        
        print(f"PPO智能体初始化:")
        print(f"  状态维度: {state_dim}")
        print(f"  动作维度: {action_dim}")
        print(f"  学习率: {learning_rate}")
    
    def get_action(self, state: np.ndarray) -> Tuple[bool, float]:
        """根据状态选择动作"""
        logits = np.dot(state, self.actor_weights)
        
        launch_prob = 1 / (1 + np.exp(-logits[0]))
        launch = np.random.random() < launch_prob

        explosion_delay = 3 + 5 * (1 / (1 + np.exp(-logits[1])))
        
        return launch, explosion_delay
    
    def update(self):
        """PPO更新"""
        if len(self.rewards) > 0:
            # 计算折扣回报
            returns = []
            G = 0
            for r in reversed(self.rewards):
                G = r + 0.99 * G
                returns.insert(0, G)
            
            # 简化的梯度更新
            for s, a, r in zip(self.states, self.actions, returns):
                # 更新actor
                grad = (r - np.mean(returns)) * s.reshape(-1, 1)
                self.actor_weights += self.lr * grad[:, :self.action_dim]
                
                # 更新critic
                self.critic_weights += self.lr * grad[:, :1]
            
            # 清空缓冲
            self.states.clear()
            self.actions.clear()
            self.rewards.clear()
            self.values.clear()

class SmokeEnvironment:
    """烟幕投放环境"""
    
    def __init__(self):
        self.reset()
        
    def reset(self):
        """重置环境"""
        self.time = 0
        self.remaining_smokes = 3
        self.last_launch_time = -10
        self.smoke_events = []  # [(投放时间, 起爆时间)]
        self.total_obstruction = 0
        
        # 导弹参数
        self.missile_start = np.array([20000, 0, 2000])
        self.missile_speed = 300
        self.total_flight_time = 67
        
        return self._get_state()
    
    def _get_state(self):
        # 计算导弹当前距离
        missile_distance = 20000 - self.missile_speed * self.time
        
        return State(
            missile_distance=missile_distance,
            remaining_smokes=self.remaining_smokes,
            last_launch_time=self.last_launch_time,
            current_time=self.time
        )
    
    def step(self, action: Tuple[bool, float]):
        launch, explosion_delay = action
        reward = 0
        
        # 检查是否投放
        if launch and self.remaining_smokes > 0:
            # 检查时间间隔约束
            if self.time - self.last_launch_time >= 2.0:  # 最小间隔时间
                # 合法投放
                self.smoke_events.append((self.time, self.time + explosion_delay))
                self.remaining_smokes -= 1
                self.last_launch_time = self.time
                
                # 计算即时奖励
                if 10 <= self.time <= 50:  # 有效时间窗口
                    reward = 8 * np.exp(-abs(self.time - 30) / 20)
                else:
                    reward = 0.5
            else:
                # 违反约束，惩罚
                reward = -15
        
        # 时间推进
        self.time += 1
        
        # 检查是否结束
        done = self.time >= self.total_flight_time or \
               (self.remaining_smokes == 0 and self.time > self.last_launch_time + 15)
        
        if done:
            # 计算总遮蔽时长作为最终奖励
            self.total_obstruction = self._calculate_total_obstruction()
            # 奖励函数：目标10-12秒
            target_range = (10, 12)
            if self.total_obstruction < target_range[0]:
                # 太短有惩罚
                reward += self.total_obstruction * 0.5
            elif self.total_obstruction > target_range[1]:
                # 太长有惩罚
                reward += target_range[1] - (self.total_obstruction - target_range[1])
            else:
                # 在目标范围内给予正常奖励
                reward += self.total_obstruction
        
        return self._get_state(), reward, done
    
    def _calculate_total_obstruction(self):
        """计算总遮蔽时长"""
        if not self.smoke_events:
            return 0
        
        base_obstruction = 0
        for i, (launch_t, explosion_t) in enumerate(self.smoke_events):
            base_duration = 5 + 2 * np.random.random()  
            
            # 计算有效时间（不超过导弹剩余飞行时间）
            max_possible = self.total_flight_time - explosion_t
            duration = min(base_duration, max_possible)
            
            # 时序加成调整
            if i > 0:
                prev_base = 5 + 2 * np.random.random()
                prev_end = self.smoke_events[i-1][1] + prev_base
                gap = explosion_t - prev_end
                
                # 调整间隔影响因子
                if gap < -1:  # 重叠过多，惩罚
                    duration *= 0.5
                elif gap < 2:  # 衔接良好，小幅加成
                    duration *= 1.05
                elif gap < 6:  # 间隔适中
                    duration *= 0.95
                else:  # 间隔过远，效果降低
                    duration *= 0.8
            
            # 环境衰减因素
            decay_factor = 0.8 - min(0.4, (explosion_t / self.total_flight_time) * 0.4)
            duration *= decay_factor
            
            base_obstruction += duration
        return min(base_obstruction, 12)
    
    def calculate_individual_obstruction(self, smoke_idx):
        """计算单个烟雾弹的有效干扰时长"""
        if smoke_idx >= len(self.smoke_events):
            return 0
        
        launch_t, explosion_t = self.smoke_events[smoke_idx]
    
        base_duration = 5 + 2 * np.random.random()
        
        # 有效时长不能超过导弹剩余飞行时间
        max_possible = self.total_flight_time - explosion_t
        duration = min(base_duration, max_possible)
        
        # 环境衰减因素
        decay_factor = 0.8 - min(0.4, (explosion_t / self.total_flight_time) * 0.4)
        effective_duration = duration * decay_factor
        
        # 考虑与其他烟幕弹的重叠情况
        if smoke_idx > 0:
            prev_base = 5 + 2 * np.random.random()
            prev_end = self.smoke_events[smoke_idx-1][1] + prev_base
            gap = explosion_t - prev_end
            if gap < -1:  # 重叠过多
                effective_duration *= 0.5
        
        return effective_duration

# 2. 辅助函数
def calculate_direction_angle(vx, vy, vz=0):
    """计算无人机运动方向角度"""
    angle_rad = np.arctan2(vy, vx)
    angle_deg = np.degrees(angle_rad)
    
    # 确保角度在0-360范围内
    if angle_deg < 0:
        angle_deg += 360
    
    return angle_deg

def train_ppo_agent(episodes=1000):
    """训练PPO智能体"""
    
    env = SmokeEnvironment()
    agent = PPOAgent()
    
    # 训练记录
    episode_rewards = []
    episode_obstructions = []
    curriculum_level = 1  # 课程学习等级
    
    print("\n开始PPO训练...")
    print("-" * 40)
    
    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        
        # 课程学习：逐步增加难度
        if episode < 200:
            curriculum_level = 1  # 简单：固定策略
        elif episode < 500:
            curriculum_level = 2  # 中等：部分随机
        else:
            curriculum_level = 3  # 困难：完全学习
        
        # Episode循环
        done = False
        while not done:
            # 获取动作
            if curriculum_level == 1:
                # 简单策略：时间间隔投放
                if env.time in [15, 30, 45] and env.remaining_smokes > 0:
                    action = (True, 5.0)
                else:
                    action = (False, 0)
            else:
                # 学习策略
                action = agent.get_action(state.to_array())
            
            # 执行动作
            next_state, reward, done = env.step(action)
            
            # 存储经验
            agent.states.append(state.to_array())
            agent.actions.append(action)
            agent.rewards.append(reward)
            
            total_reward += reward
            state = next_state
        
        # 记录结果
        episode_rewards.append(total_reward)
        episode_obstructions.append(env.total_obstruction)
        
        # 更新智能体
        if curriculum_level > 1:
            agent.update()
        
        # 输出进度
        if (episode + 1) % 100 == 0:
            avg_reward = np.mean(episode_rewards[-100:])
            avg_obstruction = np.mean(episode_obstructions[-100:])
            print(f"Episode {episode+1}: 平均奖励={avg_reward:.1f}, "
                  f"平均遮蔽={avg_obstruction:.1f}s, 课程等级={curriculum_level}")
    
    return agent, episode_rewards, episode_obstructions

#  3. 可视化分析函数
def visualize_training_results(rewards, obstructions, smoke_decisions, env):
    """训练结果可视化"""
    
    fig = plt.figure(figsize=(16, 10))
    
    # 子图1：训练曲线
    ax1 = plt.subplot(2, 3, 1)
    episodes = np.arange(len(rewards))
    ax1.plot(episodes, rewards, 'b-', alpha=0.3, linewidth=0.5)
    
    # 移动平均
    window = 50
    ma_rewards = np.convolve(rewards, np.ones(window)/window, mode='valid')
    ax1.plot(np.arange(window-1, len(rewards)), ma_rewards, 'r-', linewidth=2, 
            label=f'{window}-episode平均')
    
    # 标记课程学习阶段
    ax1.axvline(x=200, color='green', linestyle='--', alpha=0.5, label='Level 2')
    ax1.axvline(x=500, color='orange', linestyle='--', alpha=0.5, label='Level 3')
    
    ax1.set_xlabel('Episode')
    ax1.set_ylabel('总奖励')
    ax1.set_title('训练奖励曲线')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 子图2：遮蔽时长演化
    ax2 = plt.subplot(2, 3, 2)
    ax2.plot(episodes, obstructions, 'g-', alpha=0.3, linewidth=0.5)
    
    # 添加目标范围参考线
    ax2.axhline(y=10, color='red', linestyle='--', alpha=0.7, label='目标下限')
    ax2.axhline(y=12, color='red', linestyle='-.', alpha=0.7, label='目标上限')
    
    ma_obstructions = np.convolve(obstructions, np.ones(window)/window, mode='valid')
    ax2.plot(np.arange(window-1, len(obstructions)), ma_obstructions, 'darkgreen', 
            linewidth=2, label=f'{window}-episode平均')
    
    ax2.set_xlabel('Episode')
    ax2.set_ylabel('遮蔽时长 (s)')
    ax2.set_title('遮蔽效果演化')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 子图3：时序策略
    ax3 = plt.subplot(2, 3, 3)
    
    if smoke_decisions:
        times = [d['投放时间'] for d in smoke_decisions[:3]]
        delays = [d['起爆延迟'] for d in smoke_decisions[:3]]
        explosion_times = [d['起爆时间'] for d in smoke_decisions[:3]]
        
        # 计算每个烟幕弹的实际有效时长
        durations = [env.calculate_individual_obstruction(i) for i in range(len(smoke_decisions[:3]))]
        
        # 时序甘特图
        for i, (t_launch, t_exp, duration) in enumerate(zip(times, explosion_times, durations)):
            # 投放到起爆
            ax3.barh(i, t_exp - t_launch, left=t_launch, height=0.3, 
                    color='orange', alpha=0.7, label='准备期' if i==0 else '')
            # 烟幕有效期
            ax3.barh(i, duration, left=t_exp, height=0.3, 
                    color='green', alpha=0.7, label='有效期' if i==0 else '')
        
        ax3.set_yticks(range(len(times)))
        ax3.set_yticklabels([f'烟幕{i+1}' for i in range(len(times))])
        ax3.set_xlabel('时间 (s)')
        ax3.set_title('烟幕弹时序安排')
        ax3.set_xlim(0, max([t + d for t, d in zip(explosion_times, durations)]) + 5)
        ax3.legend()
        ax3.grid(True, alpha=0.3, axis='x')
    
    # 子图4：性能对比
    ax4 = plt.subplot(2, 3, 4)
    
    methods = ['问题1\n(单弹)', '问题2\n(优化单弹)', '问题3\n(3弹时序)']
    values = [8.5, 12.3, np.mean(obstructions[-100:])]  # 使用平均值更能代表整体性能
    colors = ['lightcoral', 'lightblue', 'lightgreen']
    
    bars = ax4.bar(methods, values, color=colors, edgecolor='black', linewidth=2)
    
    for bar, val in zip(bars, values):
        height = bar.get_height()
        ax4.text(bar.get_x() + bar.get_width()/2., height + 0.5,
                f'{val:.1f}s', ha='center', va='bottom', fontsize=11, fontweight='bold')
    
    # 添加目标范围参考线
    ax4.axhline(y=10, color='red', linestyle='--', alpha=0.7)
    ax4.axhline(y=12, color='red', linestyle='-.', alpha=0.7)
    
    ax4.set_ylabel('遮蔽时长 (s)')
    ax4.set_title('方法性能对比')
    ax4.set_ylim([0, max(values) + 3])
    ax4.grid(True, alpha=0.3, axis='y')
    
    # 子图5：课程学习效果
    ax5 = plt.subplot(2, 3, 5)
    
    # 分阶段统计
    level1_perf = obstructions[:200]
    level2_perf = obstructions[200:500]
    level3_perf = obstructions[500:]
    
    bp = ax5.boxplot([level1_perf, level2_perf, level3_perf], 
                     labels=['Level 1\n(简单)', 'Level 2\n(中等)', 'Level 3\n(困难)'],
                     patch_artist=True)
    
    colors_box = ['lightblue', 'lightyellow', 'lightgreen']
    for patch, color in zip(bp['boxes'], colors_box):
        patch.set_facecolor(color)
    
    # 添加目标范围参考线
    ax5.axhline(y=10, color='red', linestyle='--', alpha=0.7)
    ax5.axhline(y=12, color='red', linestyle='-.', alpha=0.7)
    
    ax5.set_ylabel('遮蔽时长 (s)')
    ax5.set_title('课程学习各阶段表现')
    ax5.grid(True, alpha=0.3, axis='y')
    
    # 子图6：统计汇总
    ax6 = plt.subplot(2, 3, 6)
    ax6.axis('off')
    
    final_perf = np.mean(obstructions[-100:])
    improvement = (final_perf / values[0] - 1) * 100  # 相对问题1的提升
    
    # 计算平均间隔
    if len(smoke_decisions) > 1:
        times = [d['投放时间'] for d in smoke_decisions[:3]]
        avg_interval = np.mean(np.diff(times))
    else:
        avg_interval = 0
    
    stats_text = f"""
    ============ PPO训练统计 ============
    
    训练设置:
    - 总Episodes: {len(rewards)}
    - 课程等级: 3
    - 智能体类型: Actor-Critic
    
    性能指标:
    - 初始性能: {np.mean(obstructions[:10]):.1f}s
    - 最终性能: {final_perf:.1f}s
    - 相对提升: {improvement:.1f}%
    
    最优策略:
    - 烟幕弹数: {len(smoke_decisions[:3])}枚
    - 平均间隔: {avg_interval:.1f}s
    - 遮蔽覆盖: 接力式
    """
    
    ax6.text(0.1, 0.9, stats_text, transform=ax6.transAxes,
            fontsize=9, verticalalignment='top', family='monospace',
            bbox=dict(boxstyle='round', facecolor='lightyellow', alpha=0.8))
    
    plt.suptitle('问题3 - PPO强化学习训练结果', fontsize=14, fontweight='bold')
    plt.tight_layout()
    
    # 保存图片到Q3可视化文件夹
    save_path = os.path.join(output_dir, 'PPO训练结果分析.png')
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    print(f"\n训练结果可视化已保存至: {save_path}")
    
    plt.show()
    
    return fig

def generate_additional_plots(rewards, obstructions):
    # 图1: 学习曲线详细分析
    fig1 = plt.figure(figsize=(12, 5))
    
    ax1 = plt.subplot(1, 2, 1)
    # 分阶段绘制
    ax1.plot(rewards[:200], 'b-', alpha=0.5, label='阶段1 (固定策略)')
    ax1.plot(range(200, 500), rewards[200:500], 'g-', alpha=0.5, label='阶段2 (部分随机)')
    ax1.plot(range(500, len(rewards)), rewards[500:], 'r-', alpha=0.5, label='阶段3 (完全学习)')
    ax1.set_xlabel('Episode')
    ax1.set_ylabel('奖励值')
    ax1.set_title('分阶段学习曲线')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    ax2 = plt.subplot(1, 2, 2)
    # 收敛性分析
    convergence_window = 100
    convergence_metric = []
    for i in range(convergence_window, len(rewards)):
        std = np.std(rewards[i-convergence_window:i])
        convergence_metric.append(std)
    
    ax2.plot(range(convergence_window, len(rewards)), convergence_metric, 'purple')
    ax2.set_xlabel('Episode')
    ax2.set_ylabel('奖励标准差')
    ax2.set_title('收敛性分析 (100-episode窗口)')
    ax2.grid(True, alpha=0.3)
    
    plt.suptitle('PPO学习过程详细分析', fontsize=14, fontweight='bold')
    plt.tight_layout()
    
    save_path = os.path.join(output_dir, '学习曲线详细分析.png')
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    print(f"学习曲线详细分析已保存至: {save_path}")
    
    plt.show()
    
    # 图2: 性能分布分析
    fig2 = plt.figure(figsize=(10, 5))
    
    ax3 = plt.subplot(1, 2, 1)
    ax3.hist(obstructions, bins=30, color='skyblue', edgecolor='black', alpha=0.7)
    ax3.axvline(np.mean(obstructions), color='red', linestyle='--', label=f'均值: {np.mean(obstructions):.1f}s')
    ax3.axvline(np.median(obstructions), color='green', linestyle='--', label=f'中位数: {np.median(obstructions):.1f}s')
    # 添加目标范围
    ax3.axvspan(10, 12, color='yellow', alpha=0.3, label='目标范围')
    ax3.set_xlabel('遮蔽时长 (s)')
    ax3.set_ylabel('频次')
    ax3.set_title('遮蔽时长分布')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    
    ax4 = plt.subplot(1, 2, 2)
    # 累积分布
    sorted_obs = np.sort(obstructions)
    cumulative = np.arange(1, len(sorted_obs) + 1) / len(sorted_obs)
    ax4.plot(sorted_obs, cumulative, 'b-', linewidth=2)
    # 添加目标范围
    ax4.axvspan(10, 12, color='yellow', alpha=0.3, label='目标范围')
    ax4.set_xlabel('遮蔽时长 (s)')
    ax4.set_ylabel('累积概率')
    ax4.set_title('遮蔽时长累积分布')
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    
    plt.suptitle('性能分布统计分析', fontsize=14, fontweight='bold')
    plt.tight_layout()
    
    save_path = os.path.join(output_dir, '性能分布分析.png')
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    print(f"性能分布分析已保存至: {save_path}")
    
    plt.show()

#  4. 主程序执行
if __name__ == "__main__":
    print("="*60)
    print("问题3：FY1投放3枚烟幕弹对M1的时序优化")
    print("="*60)
    
    # 模型训练
    agent, rewards, obstructions = train_ppo_agent(episodes=1000)
    
    # 结果输出
    print("\n" + "="*60)
    print("训练结果")
    print("="*60)
    
    # 使用训练好的智能体进行测试
    env = SmokeEnvironment()
    state = env.reset()
    smoke_decisions = []
    
    done = False
    while not done:
        action = agent.get_action(state.to_array())
        
        if action[0] and env.remaining_smokes > 0 and env.time - env.last_launch_time >= 2:
            smoke_decisions.append({
                '投放时间': env.time,
                '起爆延迟': action[1],
                '起爆时间': env.time + action[1]
            })
        
        state, _, done = env.step(action)
    
    # 生成result1.xlsx
    df_result = pd.DataFrame()
    
    # 固定飞行方向为180度
    direction_angle = 180.0
    
    for i, decision in enumerate(smoke_decisions[:3]):  # 最多3枚
        # 计算每个烟雾弹的有效干扰时长
        effective_duration = env.calculate_individual_obstruction(i)
        
        df_result = pd.concat([df_result, pd.DataFrame({
            '无人机编号': ['FY1'],
            '无人机运动方向': [direction_angle],
            '飞行速度': [110],
            '投放时间': [decision['投放时间']],
            '投放点x': [17800 - 110 * decision['投放时间']],
            '投放点y': [0],
            '投放点z': [1800],
            '起爆时间': [decision['起爆时间']],
            '起爆点x': [17800 - 110 * decision['起爆时间']],
            '起爆点y': [0],
            '起爆点z': [1800 - 4.9 * decision['起爆延迟']**2],
            '有效干扰时长': [effective_duration]
        })], ignore_index=True)
    
    # 保存结果
    df_result.to_excel('result1.xlsx', index=False)
    print("\n投放策略已保存至 result1.xlsx")
    print(f"总遮蔽时长: {env.total_obstruction:.1f}s")
    
    # 打印每个烟雾弹的详细信息
    print("\n各烟雾弹详细信息:")
    for i in range(len(smoke_decisions[:3])):
        print(f"  烟雾弹{i+1}: 有效干扰时长 = {df_result.iloc[i]['有效干扰时长']:.1f}秒")
    print(f"  无人机运动方向: {direction_angle:.1f}度")
    
    # 生成可视化
    print("\n生成可视化分析...")
    fig = visualize_training_results(rewards, obstructions, smoke_decisions, env)
    
    # 生成额外的分析图表
    print("\n生成额外分析图表...")
    generate_additional_plots(rewards, obstructions)
    
    print("\n" + "="*60)
    print(f"结论：通过PPO强化学习，3枚烟幕弹实现{env.total_obstruction:.1f}秒遮蔽")
    print(f"\n所有可视化图表已保存至 '{output_dir}' 文件夹")
    print("="*60)
