import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import deque
import random
import matplotlib.pyplot as plt
from env.airsim_gym3 import AirSimMultiAgentEnv
import datetime


# Actor 网络
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, action_dim)

    def forward(self, state):
        x = F.relu(self.fc1(state))
        x = F.relu(self.fc2(x))
        return torch.tanh(self.fc3(x))


# Critic 网络
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(state_dim + action_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, 1)

    def forward(self, state, action):
        x = torch.cat([state, action], 1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)


class MATD3:
    def __init__(self, team_size, state_dim, action_dim, hidden_dim, actor_lr, critic_lr, gamma, tau, noise_std,
                 noise_clip):
        self.team_size = team_size
        self.gamma = gamma
        self.tau = tau
        self.noise_std = noise_std
        self.noise_clip = noise_clip

        # 初始化每个智能体的 Actor 和 Critic 网络
        self.actors = [Actor(state_dim, action_dim, hidden_dim) for _ in range(team_size)]
        self.critics1 = [Critic(state_dim, action_dim, hidden_dim) for _ in range(team_size)]
        self.critics2 = [Critic(state_dim, action_dim, hidden_dim) for _ in range(team_size)]

        # 初始化目标网络
        self.target_actors = [Actor(state_dim, action_dim, hidden_dim) for _ in range(team_size)]
        self.target_critics1 = [Critic(state_dim, action_dim, hidden_dim) for _ in range(team_size)]
        self.target_critics2 = [Critic(state_dim, action_dim, hidden_dim) for _ in range(team_size)]

        # 复制参数到目标网络
        for i in range(team_size):
            self.target_actors[i].load_state_dict(self.actors[i].state_dict())
            self.target_critics1[i].load_state_dict(self.critics1[i].state_dict())
            self.target_critics2[i].load_state_dict(self.critics2[i].state_dict())

        # 定义优化器
        self.actor_optimizers = [torch.optim.Adam(self.actors[i].parameters(), lr=actor_lr) for i in range(team_size)]
        self.critic_optimizers1 = [torch.optim.Adam(self.critics1[i].parameters(), lr=critic_lr) for i in
                                   range(team_size)]
        self.critic_optimizers2 = [torch.optim.Adam(self.critics2[i].parameters(), lr=critic_lr) for i in
                                   range(team_size)]

    def select_action(self, states):
        actions = []
        for i in range(self.team_size):
            state = torch.FloatTensor(states[i]).unsqueeze(0)
            action = self.actors[i](state).cpu().data.numpy().flatten()
            actions.append(action)
        return actions

    def update(self, replay_buffer, batch_size, update_actor_interval):
        if len(replay_buffer) < batch_size:
            return

        # 从经验回放缓冲区中采样
        states, actions, rewards, next_states, dones = replay_buffer.sample(batch_size)

        for i in range(self.team_size):
            state = torch.FloatTensor(np.array([s[i] for s in states]))
            action = torch.FloatTensor(np.array([a[i] for a in actions]))
            reward = torch.FloatTensor(np.array([r[i] for r in rewards])).unsqueeze(1)
            next_state = torch.FloatTensor(np.array([ns[i] for ns in next_states]))
            done = torch.FloatTensor(np.array([d[i] for d in dones])).unsqueeze(1)

            # 计算目标 Q 值
            next_action = self.target_actors[i](next_state)
            noise = torch.normal(0, self.noise_std, next_action.shape).clamp(-self.noise_clip, self.noise_clip)
            next_action = (next_action + noise).clamp(-1, 1)

            target_q1 = self.target_critics1[i](next_state, next_action)
            target_q2 = self.target_critics2[i](next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * self.gamma * target_q

            # 更新 Critic 网络
            current_q1 = self.critics1[i](state, action)
            current_q2 = self.critics2[i](state, action)

            critic_loss1 = F.mse_loss(current_q1, target_q.detach())
            self.critic_optimizers1[i].zero_grad()
            critic_loss1.backward()
            self.critic_optimizers1[i].step()

            critic_loss2 = F.mse_loss(current_q2, target_q.detach())
            self.critic_optimizers2[i].zero_grad()
            critic_loss2.backward()
            self.critic_optimizers2[i].step()

            # 延迟更新 Actor 网络
            if update_actor_interval % 2 == 0:
                actor_loss = -self.critics1[i](state, self.actors[i](state)).mean()
                self.actor_optimizers[i].zero_grad()
                actor_loss.backward()
                self.actor_optimizers[i].step()

                # 软更新目标网络
                for target_param, param in zip(self.target_actors[i].parameters(), self.actors[i].parameters()):
                    target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
                for target_param, param in zip(self.target_critics1[i].parameters(), self.critics1[i].parameters()):
                    target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
                for target_param, param in zip(self.target_critics2[i].parameters(), self.critics2[i].parameters()):
                    target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

    def save_model(self, path):
        if not os.path.exists(path):
            os.makedirs(path)
        for i in range(self.team_size):
            torch.save(self.actors[i].state_dict(), os.path.join(path, f"actor_{i}.pth"))
            torch.save(self.critics1[i].state_dict(), os.path.join(path, f"critic1_{i}.pth"))
            torch.save(self.critics2[i].state_dict(), os.path.join(path, f"critic2_{i}.pth"))

    def load_model(self, path):
        for i in range(self.team_size):
            actor_path = os.path.join(path, f"actor_{i}.pth")
            critic1_path = os.path.join(path, f"critic1_{i}.pth")
            critic2_path = os.path.join(path, f"critic2_{i}.pth")
            if os.path.exists(actor_path):
                self.actors[i].load_state_dict(torch.load(actor_path))
            if os.path.exists(critic1_path):
                self.critics1[i].load_state_dict(torch.load(critic1_path))
            if os.path.exists(critic2_path):
                self.critics2[i].load_state_dict(torch.load(critic2_path))


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)
        return states, actions, rewards, next_states, dones

    def __len__(self):
        return len(self.buffer)


def plot_all_metrics(metrics_dict, episode, plots_dir):
    """
    将所有指标绘制到一个包含多个子图的图表中
    - 对曲线进行平滑处理
    - 添加误差带显示
    参数:
    metrics_dict: 包含所有指标数据的字典，格式为 {metric_name: values_list}
    episode: 当前的episode数
    plots_dir: 图表保存目录
    """
    # 创建一个2x3的子图布局
    fig, axes = plt.subplots(2, 3, figsize=(18, 10))
    fig.suptitle(f'Training Metrics (Up to Episode {episode})', fontsize=16)

    # 压平axes数组以便迭代
    axes = axes.flatten()

    # 为每个指标获取x轴值
    any_metric = list(metrics_dict.values())[0]
    x_values = [50 * (i + 1) for i in range(len(any_metric))]

    # 平滑参数 - 窗口大小
    window_size = min(5, len(x_values)) if len(x_values) > 0 else 1

    # 在每个子图中绘制一个指标
    for i, (metric_name, values) in enumerate(metrics_dict.items()):
        if i >= 5:  # 我们只有5个指标
            break

        ax = axes[i]
        values_array = np.array(values)

        # 应用平滑处理
        if len(values) > window_size:
            # 创建平滑曲线
            smoothed = np.convolve(values_array, np.ones(window_size) / window_size, mode='valid')

            # 计算滚动标准差用于误差带
            std_values = []
            for j in range(len(values) - window_size + 1):
                std_values.append(np.std(values_array[j:j + window_size]))
            std_values = np.array(std_values)

            # 调整x轴以匹配平滑后的数据长度
            smoothed_x = x_values[window_size - 1:]

            # 绘制平滑曲线和原始散点
            ax.plot(smoothed_x, smoothed, '-', linewidth=2, label='Smoothed')
            ax.scatter(x_values, values, alpha=0.3, label='Original')

            # 添加误差带
            ax.fill_between(smoothed_x, smoothed - std_values, smoothed + std_values,
                            alpha=0.2, label='±1 StdDev')
        else:
            # 如果数据点太少，只绘制原始数据
            ax.plot(x_values, values, 'o-', label='Data')

        ax.set_title(metric_name.replace('_', ' '))
        ax.set_xlabel('Episodes')
        ax.set_ylabel(metric_name.replace('_', ' '))
        ax.grid(True, alpha=0.3)
        ax.legend()

    # 删除未使用的子图
    if len(metrics_dict) < 6:
        fig.delaxes(axes[5])

    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.savefig(os.path.join(plots_dir, f'training_metrics.png'))
    plt.close(fig)


# 训练循环
def train():
    # 创建结构化的结果目录
    result_base_dir = "results"
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    env_name = f"matd3_airsim_{current_time}"
    result_dir = os.path.join(result_base_dir, env_name)

    # 创建子目录
    logs_dir = os.path.join(result_dir, "logs")
    plots_dir = os.path.join(result_dir, "plots")
    weights_dir = os.path.join(result_dir, "weights")

    # 确保目录存在
    for directory in [logs_dir, plots_dir, weights_dir]:
        if os.path.exists(directory):
            shutil.rmtree(directory)
        os.makedirs(directory)

    # 设置日志文件路径
    log_file = os.path.join(logs_dir, "training_log.txt")

    if os.path.exists(log_file):
        open(log_file, "w").close()

    # 日志记录函数
    def log_message(message):
        with open(log_file, "a") as f:
            f.write(message + "\n")

    env = AirSimMultiAgentEnv()
    team_size = env.num_agents
    state_dim = env.observation_space.shape[1]
    action_dim = env.action_space.shape[0]

    # 定义超参数
    hidden_dim = 128
    actor_lr = 1e-3
    critic_lr = 1e-3
    gamma = 0.99
    tau = 0.005
    noise_std = 0.2
    noise_clip = 0.5
    batch_size = 64
    replay_buffer_capacity = 100000
    total_episodes = 1000
    update_actor_interval = 0

    matd3 = MATD3(team_size, state_dim, action_dim, hidden_dim, actor_lr, critic_lr, gamma, tau, noise_std, noise_clip)
    replay_buffer = ReplayBuffer(replay_buffer_capacity)

    metrics = {
        'avg_total_reward': [],
        'avg_episode_length': [],
        'avg_actor_loss': [],
        'avg_critic1_loss': [],
        'avg_critic2_loss': []
    }

    for episode in range(total_episodes):
        states = env.reset()
        episode_reward = 0
        done = [False] * team_size
        episode_length = 0
        actor_losses = []
        critic1_losses = []
        critic2_losses = []

        while not any(done):
            actions = matd3.select_action(states)
            next_states, rewards, done, _ = env.step(actions)

            replay_buffer.add(states, actions, rewards, next_states, done)
            matd3.update(replay_buffer, batch_size, update_actor_interval)
            update_actor_interval += 1

            states = next_states
            episode_reward += sum(rewards)
            episode_length += 1

        # 记录指标
        metrics['avg_total_reward'].append(episode_reward)
        metrics['avg_episode_length'].append(episode_length)

        if episode % 50 == 0:
            # plot_all_metrics(metrics, episode, plots_dir)
            matd3.save_model(weights_dir)
            log_message(
                f"Episode {episode}: AvgTotalReward={episode_reward}, AvgEpisodeLength={episode_length}")

    env.close()


# 测试循环
def test():
    env = AirSimMultiAgentEnv()
    team_size = env.num_agents
    state_dim = env.observation_space.shape[1]
    action_dim = env.action_space.shape[0]

    # 定义超参数
    hidden_dim = 128
    actor_lr = 1e-3
    critic_lr = 1e-3
    gamma = 0.99
    tau = 0.005
    noise_std = 0.2
    noise_clip = 0.5

    matd3 = MATD3(team_size, state_dim, action_dim, hidden_dim, actor_lr, critic_lr, gamma, tau, noise_std, noise_clip)
    matd3.load_model("results/matd3_airsim_xxxx-xxxxxx/weights")  # 替换为实际的权重目录

    total_test_episodes = 10
    for episode in range(total_test_episodes):
        states = env.reset()
        episode_reward = 0
        done = [False] * team_size

        while not all(done):
            actions = matd3.select_action(states)
            next_states, rewards, done, _ = env.step(actions)
            states = next_states
            episode_reward += sum(rewards)

        print(f"Test Episode {episode}: Reward = {episode_reward}")

    env.close()


if __name__ == "__main__":
    train()
    # test()