import copy
from components.episode_buffer import EpisodeBatch
from modules.critics.discrete_qv import DiscreteCritic, MLPQnetork
from utils.rl_utils import build_normal_gae_targets
import torch as th
from torch.optim import RMSprop, Adam
import numpy as np
from utils.th_utils import get_parameters_num
import torch.nn.functional as F
from torch.distributions import Categorical
from utils.value_norm import ValueNorm

from controllers.basic_sep_controller import SepBasicMAC
from controllers.basic_controller import BasicMAC

class InSPOLearner:
    def __init__(self, mac: SepBasicMAC, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.n_agents = args.n_agents  # the number of agents
        self.n_actions = args.n_actions  # the number of actions for each agent
        self.scheme = scheme

        self.alpha_start = getattr(args, "alpha_start", 1) # for q function
        self.alpha_end = getattr(args, "alpha_end", 0.001)
        self.entropy_coef_start = getattr(args, "entropy_coef_start", 0.0)
        self.entropy_coef_end = getattr(args, "entropy_coef_end", 0.0)
        if self.alpha_start == 0 and self.entropy_coef_start == 0:
            raise ValueError('alpha_start and entropy_coef_start cannot be 0 at the same time!')

        self.q_penalty_coeff = getattr(args, "q_penalty_coeff", 0)   # for q function
        self.agent_order = getattr(args, "agent_order", "random")   # agent update order
        self.id_scores = [(_i, _s)
                         for (_i, _s) in zip(range(self.n_agents), np.zeros((self.n_agents)))]
        self.IS_ratio_max = getattr(args, "IS_ratio_max", 1)  # clip the resample ratio
        self.IS_ratio_min = getattr(args, "IS_ratio_min", 1)
        self.use_gae = getattr(args, "use_gae", False)
        self.tau = getattr(args, "tau", 0.005)  # lr for soft target update value function
        self.other_policy_pow = getattr(args, "other_policy_pow", 1.0)

        self.device = th.device('cuda' if args.use_cuda else 'cpu')
        self.params = list(mac.parameters())

        self.q_networks = [DiscreteCritic(scheme, args, output_dim=1, agent_id=i, is_state_action_input=True, last_layer_bias=True) for i in range(self.n_agents)]
        self.q_optimizers = [Adam(params=q_network.parameters(), lr=args.critic_lr) for q_network in self.q_networks]
        self.target_qs = [copy.deepcopy(q_network) for q_network in self.q_networks]
        for i in range(self.n_agents):
            for param in self.target_qs[i].parameters():
                param.requires_grad = False
        
        self.policy_optimizers = [Adam(params=agent.parameters(), lr=args.actor_lr) for agent in self.mac.agent]

        self.auto_alpha = getattr(self.args, "auto_alpha", False)
        if self.auto_alpha:
            self.target_KL = getattr(self.args, "target_KL", 0.1)
            self.log_alpha = []
            self.alpha_optimizers = []
            self.alpha = []
            for agent_id in range(self.n_agents):
                _log_alpha_init = th.log(th.tensor(self.alpha_start, dtype=th.float64, device=self.device))
                _log_alpha = th.nn.Parameter(_log_alpha_init)
                self.log_alpha.append(_log_alpha)
                self.alpha_optimizers.append(Adam([_log_alpha], lr=self.args.alpha_lr))
                self.alpha.append(th.exp(_log_alpha.detach()))
        
        self.log_stats_t = -self.args.learner_log_interval - 1
        self.train_t = 0
        
        # train behavior
        self.need_train_behavior = getattr(self.args, 'need_train_behavior', True)
        if self.need_train_behavior:
            self.behavior_train_steps = 0 
            self.behavior_log_stats_t = 0
            self.last_min_loss = 1e6
            self.last_min_ar_loss = 1e6
            self.epoch_since_last_min_loss = 0
            self.mini_epochs= 1
            
            args.mask_before_softmax=True
            args.agent_output_type = 'pi_logits'
            args.action_selector="multinomial"
            args.epsilon_start= 0.0
            args.epsilon_finish= 0.0
            args.epsilon_anneal_time= 500000

            self.behavior_mac = SepBasicMAC(scheme, None, args, data_policy=True)
            self.behavior_params = list(self.behavior_mac.parameters())
            self.behavior_optimizer = Adam(params=self.behavior_params, lr=args.bc_lr)
        
            self.ar_other_mac = SepBasicMAC(scheme, None, args, data_policy=True, autoregressive=True)
            self.ar_other_params = list(self.ar_other_mac.parameters())
            self.ar_other_optimizers = [Adam(params=param, lr=args.bc_lr) for param in self.ar_other_params]

    def save_behavior_model(self, path):
        self.behavior_mac.save_models(path, prefix="behavior")
        self.ar_other_mac.save_models(path, prefix="ar")
        th.save(self.behavior_optimizer.state_dict(), "{}/behavior_opt.th".format(path))
        for i in range(self.n_agents):
            th.save(self.ar_other_optimizers[i].state_dict(), "{}/ar{}_opt.th".format(path, i))
    def load_behavior_model(self,path):
        self.behavior_mac.load_models(path, prefix="behavior")
        self.ar_other_mac.load_models(path, prefix="ar")
        self.behavior_optimizer.load_state_dict(th.load("{}/behavior_opt.th".format(path), map_location=lambda storage, loc: storage))
        for i in range(self.n_agents):
            self.ar_other_optimizers[i].load_state_dict(th.load("{}/ar{}_opt.th".format(path, i), map_location=lambda storage, loc: storage))
    def train_behavior(self, batch: EpisodeBatch):
        # Get the relevant quantities
        self.behavior_train_steps += 1
        seq_len = batch['state'].shape[1] - 1
        actions = batch["actions"][:, :-1]
        actions_onehot = batch['actions_onehot'][:, :-1]  # [batch_size, seq_len, n_agents, n_actions]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]

        mask_agent = mask.unsqueeze(2).repeat(1, 1, actions.shape[2], 1)  # [batch_size, seq_len, n_agents, 1]
        mask_agent_i = mask.unsqueeze(2).repeat(1, 1, actions.shape[2]-1, 1)  # [batch_size, seq_len, n_agents-1, 1]
        
        for _ in range(self.mini_epochs):
            behavior_probs = []
            self.behavior_mac.init_hidden(batch.batch_size)
            for t in range(seq_len):
                bh_agent_outs = self.behavior_mac.forward(batch, t=t)    # [batch_size, n_agents, n_actions]
                behavior_probs.append(bh_agent_outs)
            behavior_probs = th.stack(behavior_probs, dim=1)   # [batch_size, seq_len, n_agents, n_actions]

            behavior_probs[avail_actions == 0] = 1e-10
            behavior_probs_taken = th.gather(behavior_probs, dim=3, index=actions)
            log_behavior_probs_taken = th.log(behavior_probs_taken)
            behavior_loss = -(log_behavior_probs_taken * mask_agent).sum() / mask_agent.sum()

            self.behavior_optimizer.zero_grad()
            behavior_loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.behavior_params, self.args.grad_norm_clip)
            self.behavior_optimizer.step()

            agent_order = list(th.randperm(self.n_agents).numpy())
            ar_loss = 0.
            for agent_id in agent_order:
                ar_other_probs = self.ar_other_mac.forward_ar_data_policy(batch, seq_len, agent_id)
                other_actions = actions_onehot[:, :, th.arange(self.n_agents) != agent_id]  # [batch_size, seq_len, n_agents-1, n_actions]
                ar_other_probs_taken = (ar_other_probs * other_actions).sum(dim=-1, keepdim=True)  # [batch_size, seq_len, n_agents-1, 1]
                log_ar_other_probs_taken = th.log(ar_other_probs_taken + 1e-10)
                ar_other_loss = -(log_ar_other_probs_taken * mask_agent_i).sum() / mask_agent_i.sum()
                # ar_other_loss = -(th.log(ar_other_probs) * other_actions).sum(dim=-1).mean()
                self.ar_other_optimizers[agent_id].zero_grad()
                ar_other_loss.backward()
                ar_grad_norm = th.nn.utils.clip_grad_norm_(self.ar_other_params[agent_id], self.args.grad_norm_clip)
                self.ar_other_optimizers[agent_id].step()
                ar_loss += ar_other_loss.item()

        if self.behavior_train_steps - self.behavior_log_stats_t >= 20:
            self.logger.log_stat("bc_loss", behavior_loss.item(), self.behavior_train_steps)
            self.logger.log_stat("ar_bc_loss", ar_loss, self.behavior_train_steps)
            self.behavior_log_stats_t = self.behavior_train_steps
            self.logger.console_logger.info("Behavior model training loss: {}, ar model loss: {}, training steps: {}".format(behavior_loss.item(), ar_loss, self.behavior_train_steps))
        if behavior_loss.item() < self.last_min_loss and ar_loss < self.last_min_ar_loss:
            self.last_min_loss = behavior_loss.item()
            self.last_min_ar_loss = ar_loss
            self.epoch_since_last_min_loss = 0
        else:
            self.epoch_since_last_min_loss += 1
        behavior_train_done = self.epoch_since_last_min_loss > 20
        if behavior_train_done:
            self.epoch_since_last_min_loss = 0
            self.logger.log_stat("bc_loss", behavior_loss.item(), self.behavior_train_steps)
            self.logger.log_stat("ar_bc_loss", ar_loss, self.behavior_train_steps)

        return behavior_train_done, behavior_loss.item(), ar_loss
    
    def get_agent_order(self):
        if self.agent_order == "fixed":
            agent_order = list(range(self.n_agents))
        elif self.agent_order == "random":
            agent_order = list(th.randperm(self.n_agents).numpy())
        elif self.agent_order == "semi-greedy":
            agent_order = []
            a_i = 0
            self.id_scores = sorted(self.id_scores, key=lambda i_s: i_s[1], reverse=True)
            while a_i < self.n_agents:
                agent_order.append(self.id_scores[0][0])
                self.id_scores.pop(0)
                a_i += 1
                if len(self.id_scores) > 0:
                    next_i = np.random.choice(len(self.id_scores))
                    agent_order.append(self.id_scores[next_i][0])
                    self.id_scores.pop(next_i)
                    a_i += 1

        return agent_order

    def train(self, batch: EpisodeBatch, t_env: int, episode_num=None, per_weight=None):
        # Get the relevant quantities
        seq_len = batch['state'].shape[1] - 1
        states = batch['state']  # [batch_size, seq_len + 1, state_dim]
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        actions_onehot = batch['actions_onehot']  # [batch_size, seq_len+1, n_agents, n_actions]
        terminated = batch["terminated"][:, :-1].float()  # [batch_size, seq_len, 1]
        mask = batch["filled"][:, :-1].float()  # [batch_size, seq_len, 1]
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]
        mask_elems = mask.sum().item()

        ## update actor sequentially
        agent_order = self.get_agent_order()

        # the same variables for each agent
        if self.alpha_start == 0:
            alpha = 0
        else:
            alpha = self.alpha_start * (self.alpha_end / self.alpha_start) ** (t_env / self.args.t_max)  # alpha_start -> alpha_end
        if self.entropy_coef_start == 0:
            entropy_coef = 0
        else:
            entropy_coef = self.entropy_coef_start * (self.entropy_coef_end / self.entropy_coef_start) ** (t_env / self.args.t_max)
        if getattr(self.args, "auto_entropy", 0) > 0:
            return_t = rewards.clone()
            return_t = return_t.sum(dim=1, keepdim=True)    # [batch_size, 1, 1]
            entropy_coef = th.exp(return_t - return_t.max()) * self.args.auto_entropy * (0.0001 / self.args.auto_entropy)**(t_env / self.args.t_max)

        # for q network
        all_actions_onehot_i = th.eye(self.n_actions).repeat(states.shape[:2] + (1, 1)).to(states.device)  # [batch_size, seq_len + 1, n_actions, n_actions]
        all_state_actions_i = th.zeros(
            (self.args.batch_size, seq_len + 1, self.n_actions, states.shape[-1] + self.n_actions)).to(states.device)  # [batch_size, seq_len + 1, n_actions, state_dim+n_actions]
        all_state_actions_i[:, :, :, :states.shape[-1]] = states.unsqueeze(2).repeat(1, 1, self.n_actions, 1)
        all_state_actions_i[:, :, :, states.shape[-1]:] = all_actions_onehot_i

        q_losses = {}
        policy_losses = {}
        policy_entropy = {}
        q_values = {}
        behavior_entropy = {}
        KL_dist = {}
        other_policy_ratios = {}
        # behavior policy probability
        behavior_probs = []
        self.behavior_mac.init_hidden(batch.batch_size)
        for t in range(seq_len + 1):
            behavior_agent_outs = self.behavior_mac.forward(batch, t=t, test_mode=True)    # [batch_size, n_agents, n_actions]
            behavior_probs.append(behavior_agent_outs)
        behavior_probs = th.stack(behavior_probs, dim=1)   # [batch_size, seq_len + 1, n_agents, n_actions]
        # update network for each agent
        for agent_id in agent_order:

            # policy probabilities
            policy_probs = []
            self.mac.init_hidden(batch.batch_size)
            for t in range(seq_len + 1):
                agent_outs = self.mac.forward(batch, t=t, agent_id=agent_id)    # [batch_size, n_agents, n_actions]
                policy_probs.append(agent_outs)
            policy_probs = th.stack(policy_probs, dim=1)   # [batch_size, seq_len + 1, n_agents, n_actions]
            
            ar_other_probs = self.ar_other_mac.forward_ar_data_policy(batch, seq_len+1, agent_id, set_grad=False) + 1e-10   # [batch_size, seq_len+1, n_agents-1, n_actions]
            other_policy_probs = policy_probs[:, :, th.arange(self.n_agents) != agent_id]   # [batch_size, seq_len+1, n_agents-1, n_actions]
            other_actions = actions_onehot[:, :, th.arange(self.n_agents) != agent_id]   # [batch_size, seq_len+1, n_agents-1, n_actions]

            # compute the importance sampling ratio
            other_policy_probs[ar_other_probs < 1e-10] = 0.0
            other_policy_ratio = th.prod(
                ((other_policy_probs[:, :-1].pow(self.other_policy_pow) / ar_other_probs[:, :-1]) * other_actions[:, :-1]).sum(dim=-1), # [batch_size, seq_len, n_agents-1]
                dim=-1, keepdims=True).pow(1 / (self.n_agents - 1)).detach()  # [batch_size, seq_len, 1]
            other_policy_ratio = th.clamp(other_policy_ratio, min=self.IS_ratio_min, max=self.IS_ratio_max)

            IS_ratio_all = other_policy_ratio * mask

            ## importance resampling
            resampling_probs = (IS_ratio_all / (IS_ratio_all.sum() + 1e-10)).flatten()  # [batch_size * seq_len * 1]
            sample_indices = Categorical(resampling_probs).sample(resampling_probs.shape)  # [batch_size * seq_len * 1]

            policy_i_kl = (batch["avail_actions"][:, :, agent_id] * (
                policy_probs[:, :, agent_id] * (th.log(policy_probs[:, :, agent_id] + 1e-10) - th.log(behavior_probs[:, :, agent_id] + 1e-10).detach())
                )).sum(-1, keepdim=True)  # [batch_size, seq_len+1, 1]
            policy_i_entropy = (batch["avail_actions"][:, :, agent_id] * (
                -policy_probs[:, :, agent_id] * th.log(policy_probs[:, :, agent_id] + 1e-10)
                )).sum(-1, keepdim=True)  # [batch_size, seq_len+1, 1]
            
            # compute alpha
            if self.auto_alpha:
                alpha_loss = -((self.log_alpha[agent_id] * (policy_i_kl[:, :-1].detach() - self.target_KL)) * mask).sum() / mask_elems
                self.alpha_optimizers[agent_id].zero_grad()
                alpha_loss.backward()
                self.alpha_optimizers[agent_id].step()
                self.alpha[agent_id] = th.exp(
                    self.log_alpha[agent_id].detach()
                )
                alpha = self.alpha[agent_id]

            q_i_all_actions = self.q_networks[agent_id](all_state_actions_i)[:, :, :, 0]  # [batch_size, seq_len + 1, n_actions]
            with th.no_grad():
                target_q_i_all_actions = self.target_qs[agent_id](all_state_actions_i)[:, :, :, 0]
                
                q_i_all_actions_detach = q_i_all_actions.clone().detach()
                if self.args.softmax_temp == 100:
                    q_i_all_actions_detach[batch["avail_actions"][:, :, agent_id] == 0] = -1e10
                    cur_max_actions = q_i_all_actions_detach.max(dim=-1, keepdim=True)[1]
                    target_max_q_i = th.gather(target_q_i_all_actions, dim=-1, index=cur_max_actions)  # [batch_size, seq_len + 1, 1]
                else:
                    logit_q_detach = q_i_all_actions_detach.clone()
                    logit_q_detach[batch["avail_actions"][:, :, agent_id] == 0] = -1e10
                    logit_q_detach = logit_q_detach - logit_q_detach.max(dim=-1, keepdim=True)[0]
                    cur_softmax_actions = F.softmax(self.args.softmax_temp * logit_q_detach, dim=-1)
                    target_max_q_i = (target_q_i_all_actions * cur_softmax_actions).sum(dim=-1, keepdim=True)
            
            regularized_returns = self.args.gamma * (1 - terminated) * batch["filled"][:, 1:].float() * (
                -alpha * policy_i_kl[:, 1:].detach() + entropy_coef * policy_i_entropy[:, 1:].detach())
            if self.use_gae:
                regularized_returns += build_normal_gae_targets(rewards, batch["filled"][:, 1:].float(), terminated, target_max_q_i, self.args.gamma, self.args.gae_lambda)
            else:
                regularized_returns += rewards + self.args.gamma * target_max_q_i[:, 1:] * (1 - terminated) * batch["filled"][:, 1:].float()
            
            target = regularized_returns

            q_i_pred = th.gather(q_i_all_actions[:, :-1], dim=-1, index=actions[:, :, agent_id])  # [batch_size, seq_len, 1]
            cql_loss = self.args.cql_coeff * (
                th.logsumexp(q_i_all_actions[:, :-1], dim=-1, keepdims=True) - (q_i_all_actions[:, :-1] * behavior_probs[:, :-1, agent_id, :].detach()).sum(dim=-1, keepdims=True)
                )     # [batch_size, seq_len, 1]
            # TD loss
            td_error = 0.5 * (q_i_pred - target.detach()).pow(2)
            td_error += cql_loss
            # Q loss
            if (self.IS_ratio_min == 1) and (self.IS_ratio_max == 1):
                q_i_loss = (td_error * mask).sum() / mask_elems
            else:
                q_i_loss = (td_error * mask).flatten()[sample_indices].sum() / mask.flatten()[sample_indices].sum()
            if self.q_penalty_coeff > 0:
                target_q_taken = th.gather(target_q_i_all_actions, dim=-1, index=batch["actions"][:, :, agent_id])
                q_taken = th.gather(q_i_all_actions, dim=-1, index=batch["actions"][:, :, agent_id])
                ori_q_taken = th.min(target_q_taken, q_taken)[:, :-1]   # [batch_size, seq_len, 1]
                prob_i_taken_detach = th.gather(policy_probs[:, :-1, agent_id], dim=-1, index=actions[:, :, agent_id]).detach()
                prob_i_taken_detach[prob_i_taken_detach >= 0.4] = 1.0
                q_penalty_loss = self.q_penalty_coeff * (1 - prob_i_taken_detach) * (q_i_pred - ori_q_taken).pow(2)
                q_i_loss += (q_penalty_loss * mask).sum() / mask_elems

            q_losses[agent_id] = q_i_loss.item()
            self.q_optimizers[agent_id].zero_grad()
            q_i_loss.backward()
            q_grad_norm = th.nn.utils.clip_grad_norm_(self.q_networks[agent_id].parameters(), self.args.grad_norm_clip)
            self.q_optimizers[agent_id].step()

            self.soft_update_target(self.q_networks[agent_id], self.target_qs[agent_id])

            with th.no_grad():
                q_values[agent_id] = q_i_all_actions
                v_value = th.sum(policy_probs[:, :-1, agent_id] * target_q_i_all_actions[:, :-1], dim=-1, keepdim=True).detach()
                adv_value = target_q_i_all_actions[:, :-1] - v_value
            
            ## variates record
            with th.no_grad():
                unmasked_entropy_i = (-policy_probs[:, :-1, agent_id] * th.log(policy_probs[:, :-1, agent_id] + 1e-10)).sum(dim=-1, keepdim=True)
                policy_entropy_i = (unmasked_entropy_i * mask).sum() / mask_elems
                policy_entropy[agent_id] = policy_entropy_i.detach().item()
                unmasked_behavior_entropy_i = (-behavior_probs[:, :-1, agent_id] * th.log(behavior_probs[:, :-1, agent_id] + 1e-10)).sum(dim=-1, keepdim=True)
                behavior_entropy_i = (unmasked_behavior_entropy_i * mask).sum() / mask_elems
                behavior_entropy[agent_id] = behavior_entropy_i.item()
                unmasked_KL_dist_i = (policy_probs[:, :-1, agent_id] * (
                                th.log(policy_probs[:, :-1, agent_id] + 1e-10) - th.log(behavior_probs[:, :-1, agent_id] + 1e-10))).sum(dim=-1, keepdim=True)
                KL_dist_i = (unmasked_KL_dist_i * mask).sum() / mask_elems
                KL_dist[agent_id] = KL_dist_i.item()
                
                uniform_policy_i = avail_actions[:, :, agent_id] / (avail_actions[:, :, agent_id].sum(dim=-1, keepdim=True) + 1e-10)
                unmasked_max_entropy_i = (-uniform_policy_i * th.log(uniform_policy_i + 1e-10)).sum(dim=-1, keepdim=True)
                max_policy_entropy_i = (unmasked_max_entropy_i * mask).sum() / mask_elems

            adv_i_pred = th.gather((
                adv_value - entropy_coef * th.log(behavior_probs[:, :-1, agent_id, :] + 1e-5) * avail_actions[:, :, agent_id]
                ), dim=-1, index=actions[:, :, agent_id])  # [batch_size, seq_len, 1]
            z = 1 / (alpha + entropy_coef) * adv_i_pred     # [batch_size, seq_len, 1]
            z = th.clamp(z, max=5.0)    # exp(5) = 148, exp(10) = 22026
            exp_a = th.exp(z).detach()    # [batch_size, seq_len, 1]
            prob_i_taken = th.gather(policy_probs[:, :-1, agent_id], dim=-1, index=actions[:, :, agent_id])    # [batch_size, seq_len, 1]
            log_probs = th.log(prob_i_taken + 1e-10)
            if (self.IS_ratio_min == 1) and (self.IS_ratio_max == 1):
                policy_loss = -(exp_a * log_probs * mask).sum() / mask_elems
            else:
                policy_loss = -(exp_a * log_probs * mask).flatten()[sample_indices].sum() / mask.flatten()[sample_indices].sum()
            
            if self.agent_order == "semi-greedy":
                score = th.abs(adv_value / v_value).sum(dim=-1, keepdim=True)     # [batch_size, seq_len, 1]
                score = (score * mask).sum() / mask_elems
                self.id_scores.append((agent_id, score.cpu().numpy()))

            self.policy_optimizers[agent_id].zero_grad()
            policy_loss.backward()
            policy_grad_norm = th.nn.utils.clip_grad_norm_(self.mac.agent[agent_id].parameters(), self.args.grad_norm_clip)
            self.policy_optimizers[agent_id].step()
            policy_losses[agent_id] = policy_loss.item()

            if t_env - self.log_stats_t >= self.args.learner_log_interval:
                if ("matrix_game" in self.args.env):
                    self.logger.log_stat(f'samlpe_agent{agent_id}_action_0', th.sum(actions[:, :, agent_id].flatten()[sample_indices] == 0), t_env)
                    self.logger.log_stat(f'samlpe_agent{agent_id}_action_1', th.sum(actions[:, :, agent_id].flatten()[sample_indices] == 1), t_env)
                if "bridge" in self.args.env:
                    optim_state_indices = th.where(rewards.cumsum(1)[:, -1] > -1.26)[0]
                    optim_mask = th.zeros_like(states[:, :-1])
                    optim_mask[optim_state_indices] = 1
                    sampled_mask = th.zeros_like(states[:, :-1]).flatten()
                    sampled_mask[sample_indices] = 1
                    optim_sampled = sampled_mask * optim_mask.flatten()
                    self.logger.log_stat(f'optimal_states_sampled_{agent_id}', (optim_sampled.sum() / sample_indices.shape[0]).item(), t_env)
                self.logger.log_stat(f'agent_{agent_id}_IS_ratio_max_ratio', ((IS_ratio_all==self.IS_ratio_max) * mask).sum() / mask_elems, t_env)
                self.logger.log_stat(f'agent_{agent_id}_IS_ratio_min_ratio', ((IS_ratio_all==self.IS_ratio_min) * mask).sum() / mask_elems, t_env)
                self.logger.log_stat(f'max_entropy_agent_{agent_id}', max_policy_entropy_i, t_env)
                self.logger.log_stat(f'alpha_{agent_id}', alpha, t_env)


        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            if getattr(self.args, "auto_entropy", 0) > 0:
                self.logger.log_stat(f'entropy_coef_mean', entropy_coef.mean(), t_env)
                self.logger.log_stat(f'entropy_coef_max', entropy_coef.max(), t_env)
            else:
                self.logger.log_stat(f'entropy_coef', entropy_coef, t_env)
            if ("matrix_game" in self.args.env) or ("bridge" in self.args.env):
                for x in range(self.n_agents):
                    for y in range(self.n_actions):
                        self.logger.log_stat(f'policy_agent_{x}_action_{y}', policy_probs[:, 0, x, y].mean().item(), t_env)
                        self.logger.log_stat(f'behavior_agent_{x}_action_{y}', behavior_probs[:, 0, x, y].mean().item(), t_env)
                        self.logger.log_stat(f'q-value_agent{x}_action_{y}', q_values[x][:, 0, y].mean().item(), t_env)
            else:
                # for x in range(self.n_agents):
                for x in range(2):  # only print 2 agent
                    for y in range(self.n_actions):
                        self.logger.log_stat(f'policy_agent_{x}_action_{y}', policy_probs[:, 5, x, y].mean().item(), t_env)
                        self.logger.log_stat(f'behavior_agent_{x}_action_{y}', behavior_probs[:, 5, x, y].mean().item(), t_env)
            for agent in range(self.n_agents):
                self.logger.log_stat(f'policy_loss_agent_{agent}', policy_losses[agent], t_env)
                self.logger.log_stat(f'q_loss_agent_{agent}', q_losses[agent], t_env)
                self.logger.log_stat(f'policy_entropy_agent_{agent}', policy_entropy[agent], t_env)
                self.logger.log_stat(f'behavior_entropy_agent_{agent}', behavior_entropy[agent], t_env)
                self.logger.log_stat(f'KL_dist_agent_{agent}', KL_dist[agent], t_env)
            self.log_stats_t = t_env

    def soft_update_target(self, network, target_network):
        for param, target_param in zip(network.parameters(), target_network.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)


    def cuda(self):
        self.mac.cuda()
        if self.need_train_behavior:
            self.behavior_mac.cuda()
            self.ar_other_mac.cuda()
        for i in range(self.n_agents):
            self.q_networks[i].cuda()
            self.target_qs[i].cuda()

            
    def save_models(self, path, training_state=None):
        if self.need_train_behavior:
            self.behavior_mac.save_models(path, prefix='behavior')
            th.save(self.behavior_optimizer.state_dict(), f"{path}/behavior_optimizer.th")
            self.ar_other_mac.save_models(path, prefix='ar_behavior')
            for i in range(self.n_agents):
                th.save(self.ar_other_optimizers[i].state_dict(), f"{path}/ar_other_optimizer_{i}.th")
        self.mac.save_models(path, prefix="InSPO")
        for i in range(self.n_agents):
            th.save(self.q_networks[i].state_dict(), f"{path}/q_network_{i}.th")
            th.save(self.q_optimizers[i].state_dict(), f"{path}/q_optimizer_{i}.th")
            th.save(self.target_qs[i].state_dict(), f"{path}/target_qs_{i}.th")
            th.save(self.policy_optimizers[i].state_dict(), f"{path}/policy_optimizer_{i}.th")
        th.save(training_state, f"{path}/training_state.th")

    def load_models(self, path):
        if self.need_train_behavior:
            self.behavior_mac.load_models(path, prefix='behavior')
            self.behavior_optimizer.load_state_dict(th.load(f"{path}/behavior_optimizer.th", map_location=lambda storage, loc: storage))
            self.ar_other_mac.load_models(path, prefix='ar_behavior')
            for i in range(self.n_agents):
                self.ar_other_optimizers[i].load_state_dict(th.load(f"{path}/ar_other_optimizer_{i}.th", map_location=lambda storage, loc: storage))
        self.mac.load_models(path, prefix="InSPO")
        for i in range(self.n_agents):
            self.q_networks[i].load_state_dict(th.load(f"{path}/q_network_{i}.th", map_location=lambda storage, loc: storage))
            self.q_optimizers[i].load_state_dict(th.load(f"{path}/q_optimizer_{i}.th", map_location=lambda storage, loc: storage))
            self.target_qs[i].load_state_dict(th.load(f"{path}/target_qs_{i}.th", map_location=lambda storage, loc: storage))
            self.policy_optimizers[i].load_state_dict(th.load(f"{path}/policy_optimizer_{i}.th", map_location=lambda storage, loc: storage))
        training_state = th.load(f"{path}/training_state.th")
        return training_state
