import copy
import torch
import torch.nn as nn
from agents.discrete import MADice
from torch.nn import functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from runner.evaluate import RolloutWorkerDiscrete
from torch.utils.data.dataloader import DataLoader
from concurrent.futures import ThreadPoolExecutor as Pool

import os
import time


class Trainer:

    def __init__(self, model: MADice, logdir, offline_data, n_agents, alpha, lr, f_divergence, seed, device="cuda"):
        self.model = model  # 多智能体模型，包含actor、v网络和q网络
        self.device = device
        # 论文3.2节：双拉格朗日乘子，用于静态分布校正的约束优化
        self._lamb_v = nn.Parameter(torch.tensor(0.0, dtype=torch.float32, device=device))  # 价值函数约束的拉格朗日乘子
        self._lamb_e = nn.Parameter(torch.tensor(0.0, dtype=torch.float32, device=device))  # 优势函数约束的拉格朗日乘子

        # 分离不同网络的参数，便于独立优化
        self.v_param = list(self.model.v.parameters())  # 价值网络参数
        # Q网络参数包括个体Q网络和混合网络（论文3.3节价值分解）
        self.q_param = list(self.model.q.parameters()) + list(self.model.q_mix_model.parameters())
        self.actor_param = list(self.model.actor.parameters())  # 策略网络参数

        self.lr = lr
        self.gamma = 0.99
        self.tau = 0.005
        self.grad_norm_clip = 1.0
        self.global_step = 0
        
        self._lamb_scale = 1.0  # 拉格朗日乘子的缩放系数
        self._alpha = alpha  # 论文3.2节：f-散度的温度系数，控制分布校正强度
        self._f_divergence = f_divergence  # 论文3.2节：选择的f-散度类型（kl/chisquare/soft_chisquare）
        self._use_w_tot = True  # 是否使用总占据率比（论文3.3节）
        self._seed = seed

        self.actor_optimizer = torch.optim.Adam(self.actor_param, lr=self.lr)
        self.v_optimizer = torch.optim.Adam(self.v_param, lr=self.lr)
        self.q_optimizer = torch.optim.Adam(self.q_param, lr=self.lr)
        self._optim_lamb_v = torch.optim.Adam([self._lamb_v], lr=self.lr)
        self._optim_lamb_e = torch.optim.Adam([self._lamb_e], lr=self.lr)

        # 目标网络（论文3.3节：稳定Q值估计）
        self.target_model = copy.deepcopy(model).eval()
        self.target_model.load_state_dict(model.state_dict())

        self.writer = SummaryWriter(logdir)
        self.task_name = logdir

        self.save_dir = os.path.join(logdir, "checkpoints")  #  checkpoint存储目录
        os.makedirs(self.save_dir, exist_ok=True)  # 确保目录存在

        if any(name in offline_data.env_name for name in ["protoss", "terran", "zerg"]):
            from envs.smacv2.env import SMACWrapper
            self.ENV_CLS = SMACWrapper
        else:
            from envs.smacv1.env import SMACWrapper
            self.ENV_CLS = SMACWrapper

        self.n_agents = n_agents
        self.offline_data = offline_data
        self.data_loader = DataLoader(offline_data, shuffle=True, pin_memory=True, batch_size=128, num_workers=0, drop_last=True)

    # 论文3.2节：f-散度对应的r函数（用于计算占据率比w）
    def _r_fn(self, x):
        if self._f_divergence == "kl":
            return torch.exp(x - 1)
        elif self._f_divergence == "chisquare":
            return torch.clamp_min(x + 1, 0)
        elif self._f_divergence == "soft_chisquare":
            _x = x.clamp_max(0)
            return torch.where(x < 0, torch.exp(_x), x + 1)
    
    # 论文3.2节：f-散度对应的g函数（用于计算散度值）
    def _g_fn(self, x):
        if self._f_divergence == "kl":
            return (x - 1) * torch.exp(x - 1)
        elif self._f_divergence == "chisquare":
            return 0.5 * x ** 2
        elif self._f_divergence == "soft_chisquare":
            _x = x.clamp_max(0)
            return torch.where(x < 0, torch.exp(_x) * (_x - 1) + 1, 0.5 * x ** 2)
    
    def soft_update_target(self):
        for param, target_param in zip(self.model.parameters(), self.target_model.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

    def update(self, states, obs, rewards, next_states, next_obs, actions, avails, is_inits, n_agents):
        self.model.train()
        self.global_step += 1

        states = states.to(self.device)
        obs = obs.to(self.device)
        rewards = rewards.to(self.device)
        next_states = next_states.to(self.device)
        next_obs = next_obs.to(self.device)
        actions = actions.to(self.device)
        avails = avails.to(self.device)
        is_inits = is_inits.to(self.device)

        rewards = rewards[:, :, 0, :]
        dones = (states==next_states).min(-1)[0].unsqueeze(-1).min(2)[0].float()
        agent_ids = torch.eye(n_agents, device=self.device).expand(obs.shape[0], obs.shape[1], -1, -1)
        obs = torch.cat((obs, agent_ids), -1)
        next_obs = torch.cat((next_obs, agent_ids), -1)

        # -------------------------- Q网络更新（论文3.3节价值分解） --------------------------
        # 计算每个智能体的Q值
        q_values = torch.stack([self.model.q.forward(obs[:, :, j, :]) for j in range(n_agents)], 2)
        # 选择当前动作对应的Q值
        q_values = q_values.gather(-1, actions)
        # 论文3.3节：通过混合网络（mixing network）聚合个体Q值得到全局Q值
        mw_q, mb_q = self.model.q_mix_model.forward(states)
        q_values = (mw_q * q_values).sum(-2) + mb_q.squeeze(-1)
        
        # 计算目标Q值（使用目标网络）
        with torch.no_grad():
            # 计算下一状态的价值V
            next_v_values = torch.stack([self.target_model.v.forward(next_obs[:, :, j, :]) for j in range(n_agents)], 2)
            # 聚合得到全局V值
            mw_next, mb_next = self.target_model.q_mix_model.forward(next_states)
            next_v_values = (mw_next * next_v_values).sum(-2) + mb_next.squeeze(-1)
            # 贝尔曼方程：目标Q = 奖励 + 折扣 * (1-终止) * 下一状态V
            expected_q_values = rewards + self.gamma * (1 - dones) * next_v_values

        # Q网络损失：MSE损失
        q_loss = F.mse_loss(q_values, expected_q_values)

        assert not torch.isnan(q_loss).any()
        self.q_optimizer.zero_grad()
        q_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.q_param, self.grad_norm_clip)
        self.q_optimizer.step()
        
        # -------------------------- V网络更新（论文3.2节静态分布校正） --------------------------
        with torch.no_grad():
            # 重新计算Q值（使用更新后的Q网络）
            q_values = torch.stack([self.model.q.forward(obs[:, :, j, :]) for j in range(n_agents)], 2)
            q_values = q_values.gather(-1, actions)
            mw_q, mb_q = self.model.q_mix_model.forward(states)

        # 计算每个智能体的V值
        v_values = torch.stack([self.model.v.forward(obs[:, :, j, :]) for j in range(n_agents)], 2)
        # 论文3.3节：计算个体优势e_v = w_q * (Q - V)
        e_v = mw_q * (q_values - v_values)

        # 论文3.2节：计算预激活值，用于计算占据率比w
        preactivation_v = (e_v - self._lamb_scale * self._lamb_v) / self._alpha
        w_v = self._r_fn(preactivation_v)  # 占据率比w
        f_w_v = self._g_fn(preactivation_v).detach()  # f-散度项（ detach避免梯度传播）

        e_v = e_v.detach()  # 固定e_v，避免影响V网络更新
        # 初始状态的V值损失（论文3.2节：初始状态约束）
        init_values = v_values[is_inits]
        v_loss0 = (1 - self.gamma) * torch.mean(init_values) if len(init_values) > 0 else torch.tensor(0.0)
        v_loss1 = torch.mean(- self._alpha * f_w_v)  # f-散度正则项
        v_loss2 = torch.mean(w_v * (e_v - self._lamb_v))  # 优势约束项
        v_loss3 = self._lamb_v  # 拉格朗日乘子项
        v_loss = v_loss0 + v_loss1 + v_loss2 + v_loss3  # 总V损失

        assert not torch.isnan(v_loss).any()
        self.v_optimizer.zero_grad()
        self._optim_lamb_v.zero_grad()
        v_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.v_param, self.grad_norm_clip)
        self.v_optimizer.step()
        self._optim_lamb_v.step()  # 更新拉格朗日乘子

        # -------------------------- 拉格朗日乘子λ_e更新（论文3.2节） --------------------------
        with torch.no_grad():
            # 使用目标网络计算Q值
            target_q_values = torch.stack([self.target_model.q.forward(obs[:, :, j, :]) for j in range(n_agents)], 2)
            target_q_values = target_q_values.gather(-1, actions)
            target_w_q, target_b_q = self.target_model.q_mix_model.forward(states)
            # 计算优势函数
            e_values = target_w_q * (target_q_values - v_values)
            # 计算占据率比w_e
            preactivation_e = (e_values - self._lamb_scale * self._lamb_e) / self._alpha
            w_e = self._r_fn(preactivation_e)
            f_w_e = self._g_fn(preactivation_e)
            # 论文3.3节：计算策略更新的权重（限制最大值为2防止梯度爆炸）
            exp_a = w_e.clamp_max(2).squeeze(-1)
            if self._use_w_tot:
                exp_a = exp_a.sum(-1, True)  # 聚合所有智能体的权重
        
        # λ_e的损失函数（论文3.2节：对偶优化问题）
        lamb_e_loss = torch.mean(- self._alpha * f_w_e + w_e * (e_v - self._lamb_scale * self._lamb_e) + self._lamb_e)

        assert not torch.isnan(lamb_e_loss).any()
        self._optim_lamb_e.zero_grad()
        lamb_e_loss.backward()
        self._optim_lamb_e.step()

        # -------------------------- 策略网络更新（论文3.3节） --------------------------
        # 计算每个智能体的策略logits
        logits = torch.stack([self.model.actor.forward(obs[:, :, j, :]) for j in range(n_agents)], dim=2)
        # 考虑可用动作掩码（屏蔽不可行动作）
        logits = logits + avails.log()
        # 归一化得到概率分布
        logits = logits - logits.logsumexp(-1, True)
        dist = Categorical(logits=logits)
        log_probs = dist.log_prob(actions.squeeze(-1))
        # 论文3.3节：加权行为克隆（WBC）损失，权重为占据率比w_e
        actor_loss = -(exp_a * log_probs).mean()

        assert not torch.isnan(actor_loss).any()
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.actor_param, self.grad_norm_clip)
        self.actor_optimizer.step()

        self.soft_update_target()

        losses = {
            "v_loss": v_loss.item(),
            "q_loss": q_loss.item(),
            "actor_loss": actor_loss.item(),
        }
        
        return {k: round(v, 4) for k, v in losses.items()}
    
    def save_model(self, step, is_best=False, suffix=""):
        """
        保存模型训练状态
        :param step: 当前训练步数
        :param is_best: 是否为最优模型（根据测试回报判断）
        :param suffix: 模型文件后缀（如"final"表示最终模型）
        """
        # 待保存的训练状态字典
        save_dict = {
            "model_state_dict": self.model.state_dict(),  # 主模型参数
            "target_model_state_dict": self.target_model.state_dict(),  # 目标网络参数
            "actor_optimizer_state_dict": self.actor_optimizer.state_dict(),  # 策略优化器
            "v_optimizer_state_dict": self.v_optimizer.state_dict(),  # V网络优化器
            "q_optimizer_state_dict": self.q_optimizer.state_dict(),  # Q网络优化器
            "lamb_v_optimizer_state_dict": self._optim_lamb_v.state_dict(),  # λ_v优化器
            "lamb_e_optimizer_state_dict": self._optim_lamb_e.state_dict(),  # λ_e优化器
            "lamb_v": self._lamb_v.data,  # 拉格朗日乘子λ_v
            "lamb_e": self._lamb_e.data,  # 拉格朗日乘子λ_e
            "global_step": self.global_step,  # 当前训练步数（断点续训用）
            "seed": self._seed,  # 随机种子（确保复现性）
        }

        # 生成保存文件名
        # if is_best:
        #     save_path = os.path.join(self.save_dir, f"best_model_step_{step}.pth")
        # elif suffix:
        #     save_path = os.path.join(self.save_dir, f"model_{suffix}_step_{step}.pth")
        # else:
        #     save_path = os.path.join(self.save_dir, f"model_step_{step}.pth")
        save_path = os.path.join(self.save_dir, f"model.pth")

        # 保存模型
        torch.save(save_dict, save_path)
        print(f"模型已保存至: {save_path}")

    def load_madice_model(
            model_path: str, 
            st_dim: int, 
            ob_dim: int, 
            ac_dim: int, 
            n_agents: int, 
            h_dim: int = 256, 
            device: str = "cuda"
        ) -> tuple[MADice, dict, int]:
            """
            加载MADice模型及训练状态
            :param model_path: 模型保存文件路径（.pth）
            :param st_dim: 状态维度（与训练时一致）
            :param ob_dim: 观测维度（与训练时一致）
            :param ac_dim: 动作维度（与训练时一致）
            :param n_agents: 智能体数量（与训练时一致）
            :param h_dim: 隐藏层维度（与训练时一致）
            :param device: 设备（cuda/cpu）
            :return: (加载后的MADice模型, 优化器状态字典, 上次训练步数)
            """
            # 加载保存的状态字典
            save_dict = torch.load(model_path, map_location=device)
            
            # 初始化空模型（结构与训练时一致）
            model = MADice(
                st_dim=st_dim,
                ob_dim=ob_dim,
                ac_dim=ac_dim,
                n_agents=n_agents,
                h_dim=h_dim
            ).to(device)
            
            # 恢复模型参数
            model.load_state_dict(save_dict["model_state_dict"])
            
            # 恢复目标网络参数
            target_model = copy.deepcopy(model).eval()
            target_model.load_state_dict(save_dict["target_model_state_dict"])
            
            # 整理优化器状态（用于断点续训）
            optimizer_states = {
                "actor": save_dict["actor_optimizer_state_dict"],
                "v": save_dict["v_optimizer_state_dict"],
                "q": save_dict["q_optimizer_state_dict"],
                "lamb_v": save_dict["lamb_v_optimizer_state_dict"],
                "lamb_e": save_dict["lamb_e_optimizer_state_dict"],
            }
            
            # 恢复拉格朗日乘子
            model._lamb_v.data = save_dict["lamb_v"].to(device)
            model._lamb_e.data = save_dict["lamb_e"].to(device)
            
            print(f"模型加载完成！最优回报: {save_dict['best_return']:.3f}")

            return model, target_model, optimizer_states

    def eval(self, actor, step, n_episodes=32):
        env = self.ENV_CLS(self.offline_data.env_name, self._seed)
        rollout_worker = RolloutWorkerDiscrete(actor, self.n_agents, self.device)
        avg_return, avg_win_rate = rollout_worker.rollout(env, n_episodes)
        env.close()
        self.writer.add_scalar("test_return_mean", avg_return, step)
        self.writer.add_scalar("test_battle_won_mean", avg_win_rate, step)
        # print(f"Env: {self.task_name} - step: {step} - return: {avg_return:.3f} - winrate: {avg_win_rate:.3f}")
        print(f"Eval - step: {step} - return: {avg_return:.3f} - winrate: {avg_win_rate:.3f}")
        return avg_return

    def train(self, n_epochs, n_evals=300):
        log_interval = n_epochs * len(self.data_loader) // n_evals
        print(f"每 {log_interval} 步评估一次，共评估 {n_evals} 次")
        print(f"len(self.data_loader): {len(self.data_loader)}")
        
        # 计时相关变量
        start_time = time.time()
        last_log_time = start_time
        total_steps = n_evals * log_interval

        # with Pool(4) as p:
        #     tasks = []
        #     while True:
        #         params = self.model.state_dict()
        #         for data in self.data_loader:
        #             losses = self.update(*data, self.n_agents)
        #             # try:
        #             #     losses = self.update(*data, self.n_agents)
        #             # except RuntimeError as e:
        #             #     print(f"Error in update: {e}")
        #             #     self.model.load_state_dict(params)
                    
        #             step = self.global_step // log_interval
        #             if self.global_step % log_interval == 0:
        #                 current_time = time.time()
        #                 elapsed = current_time - start_time
        #                 steps_per_sec = completed_steps / elapsed
        #                 remaining_time = (total_steps - completed_steps) / steps_per_sec
        #                 print(f"[{completed_steps}/{total_steps}][{steps_per_sec:.2f} step/s][remaining:{remaining_time/60:.1f} min]")
        #                 print(f"Step: {self.global_step} - {losses}")
        #                 tasks.append(p.submit(self.eval, copy.deepcopy(self.model.actor), step))
        #                 self.save_model(step=self.global_step)
        #                 if step >= n_evals:
        #                     # for task in tasks:
        #                     #     task.result()
        #                     return
                        
        while True:
            params = self.model.state_dict()
            for data in self.data_loader:
                losses = self.update(*data, self.n_agents)

                step = self.global_step // log_interval
                if self.global_step % log_interval == 0:
                    current_time = time.time()
                    elapsed = current_time - start_time
                    steps_per_sec = self.global_step / elapsed
                    remaining_time = (total_steps - self.global_step) / steps_per_sec
                    print(f"\n=== [{self.global_step}/{total_steps}][{steps_per_sec:.2f} step/s][remaining:{remaining_time/60:.1f} min]")
                    print(f"Step: {self.global_step} - {losses}")
                    self.eval(copy.deepcopy(self.model.actor), step)
                    if step >= n_evals:
                        self.save_model(step=self.global_step)
                        return
                    
                if self.global_step % (log_interval * 10) == 0:
                    self.save_model(step=self.global_step)
