import torch
import torch.nn.functional as F
from utils.networks import MLPNetwork
from utils.misc import soft_update, average_gradients
from utils.agents import TD3MultiAgent, DDPGAgent
import itertools
import numpy as np
from utils.tb_log import log_and_print
from utils.noise import action_noise
import random

MSELoss = torch.nn.MSELoss()

class MATD3CC(object):
    def __init__(
        self, 
        agent_init_params, 
        alg_types, 
        adv_init_params=None,
        gamma=0.95, 
        tau=0.01, 
        lr=0.01, 
        hidden_dim=64, 
        discrete_action=False, 
        gaussian_noise_std=None, 
        agent_max_actions=None, 
        env_id=None,
        **kwargs
    ):
        self.env_id = env_id
        self.is_mamujoco = True if self.env_id == 'HalfCheetah-v2' else False

        assert (ma == agent_max_actions[0] for ma in agent_max_actions)
        self.max_action = agent_max_actions[0]
        self.min_action = -self.max_action

        self.nagents = len(alg_types)
        self.alg_types = alg_types

        self.agent = TD3MultiAgent(
            lr=lr, 
            discrete_action=discrete_action, 
            hidden_dim=hidden_dim, 
            gaussian_noise_std=gaussian_noise_std, 
            **agent_init_params
        )

        if self.env_id in ['simple_tag', 'simple_world']:
            self.num_predators = len(self.agent.policys)
            self.num_preys = len(adv_init_params)

            self.preys = [DDPGAgent(lr=lr, discrete_action=discrete_action, hidden_dim=hidden_dim, **params) for params in adv_init_params]

        self.niter = 0

        self.agent_init_params = agent_init_params
        self.gamma = gamma
        self.tau = tau
        self.lr = lr
        self.discrete_action = discrete_action

        self.pol_dev, self.trgt_pol_dev, self.critic_dev, self.trgt_critic_dev = 'cpu', 'cpu', 'cpu', 'cpu' 

        for k, v in kwargs.items():
            setattr(self, k, v)
        self.sample_action_class =[int(a) for a in self.sample_action_class.split("-")]

    @property
    def policies(self):
        return [a.policy for a in self.agents]

    @property
    def target_policies(self):
        return [a.target_policy for a in self.agents]

    def step(self, observations, explore=False):
        """
        Take a step forward in environment with all agents
        Inputs:
            observations: List of observations for each agent
            explore (boolean): Whether or not to add exploration noise
        Outputs:
            actions: List of actions for each agent
        """
        res = self.agent.step(observations)
        return res

    def calc_gaussian_pdf(self, samples, mu=0):
        pdfs = 1 / (self.cql_sample_noise_level * np.sqrt(2 * np.pi)) * torch.exp( - (samples - mu)**2 / (2 * self.cql_sample_noise_level**2) )
        pdf = torch.prod(pdfs, dim=-1)
        return pdf

    def get_policy_actions(self, states, networks, ns=None):
        if ns:
            num_sampled_actions = ns
        else:
            num_sampled_actions = self.num_sampled_actions
        noisy_actions, random_noises_log_pis = [], []
        for i in range(self.nagents):
            action = networks[i](states[i])

            formatted_action = action.unsqueeze(1).repeat(1, num_sampled_actions, 1).view(action.shape[0] * num_sampled_actions, action.shape[1]) #bs*ns, ac

            random_noises = torch.FloatTensor(formatted_action.shape[0], formatted_action.shape[1])

            random_noises = random_noises.normal_() * self.cql_sample_noise_level
            random_noises_pi = self.calc_gaussian_pdf(random_noises).view(action.shape[0], num_sampled_actions, 1).cuda()
            random_noises_log_pi = torch.log(random_noises_pi)
            random_noises = random_noises.cuda()

            noisy_action = (formatted_action + random_noises).clamp(-self.max_action, self.max_action)
            noisy_actions.append(noisy_action)
            random_noises_log_pis.append(random_noises_log_pi)

        return noisy_actions, random_noises_log_pis #[na(list), bs*ns, ac]|[na(list), bs, ns, 1]

    def update(self, sample, t, parallel=False, only_critic=False):
        """
        Update parameters of agent model based on sample from replay buffer
        Inputs:
            sample: tuple of (observations, actions, rewards, next observations, and episode end masks) 
                    sampled randomly from the replay buffer. Each is a list with entries corresponding to each agent
            agent_i (int): index of agent to update
            parallel (bool): If true, will average gradients across threads
            logger (SummaryWriter from Tensorboard-Pytorch): If passed in, important quantities will be logged
        """
        dic = {} # dictionary for print and log
        states, obs, acs, rews, next_states, next_obs, dones, next_acs = sample
        states = states[0]
        next_states = next_states[0]

        self.agent.critic_optimizer.zero_grad()
        trgt_obs_acs = [next_states]
        for i in range(self.nagents):
            if self.action_noise_scale > 0:
                trgt_obs_acs.append(action_noise(self.agent.target_policys[i](next_obs[i]), sigma=self.action_noise_scale))
            else:
                trgt_obs_acs.append(self.agent.target_policys[i](next_obs[i]))

        trgt_vf_in = torch.cat(trgt_obs_acs, dim=1)
        
        next_q_value1, next_q_value2 = self.agent.target_critic(trgt_vf_in) 
        next_q_value = torch.min(next_q_value1, next_q_value2)

        target_value = rews[0].view(-1, 1) + self.gamma * next_q_value * (1 - dones[0].view(-1, 1)) #bs,1

        vf_in = torch.cat([states]+acs, dim=1)
        
        actual_value1, actual_value2 = self.agent.critic(vf_in) 

        vf_loss = MSELoss(actual_value1, target_value.detach()) + MSELoss(actual_value2, target_value.detach())

        vf_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.agent.critic.parameters(), 0.5)
        self.agent.critic_optimizer.step()
        if only_critic:
            if t % self.logging_interval == 0 and not self.no_log:
                dic.update({"vf_loss":vf_loss.item()})
                log_and_print(list(dic.keys()), list(dic.values()), t, multi=True)
            return 
        self.agent.critic_optimizer.zero_grad()

        curr_pol_vf_ins = [states]
        for i in range(self.nagents):
            self.agent.policy_optimizers[i].zero_grad()
            curr_pol_out = self.agent.policys[i](obs[i])
            curr_pol_vf_ins.append(curr_pol_out)
        vf_in = torch.cat(curr_pol_vf_ins, dim=1)

        pol_loss = -self.agent.critic.Q1(vf_in).mean()
        pol_loss.backward()
        for i in range(self.nagents):
            torch.nn.utils.clip_grad_norm_(self.agent.policys[i].parameters(), 0.5)
            self.agent.policy_optimizers[i].step()
        if t % self.logging_interval == 0 and not self.no_log:
            dic.update({"pol_loss": pol_loss.item()})
            dic.update({"vf_loss": vf_loss.item()})
            log_and_print(list(dic.keys()), list(dic.values()), t, multi=True)

    def update_all_targets(self):
        """
        Update all target networks (called after normal updates have been performed for each agent)
        """
        soft_update(self.agent.target_critic, self.agent.critic, self.tau)
        for i in range(self.nagents):
            soft_update(self.agent.target_policys[i], self.agent.policys[i], self.tau) 
        self.niter += 1

    def prep_training(self, device='gpu'):
        for i in range(self.nagents):
            self.agent.policys[i].train()
            self.agent.target_policys[i].train()
        self.agent.critic.train()
        self.agent.target_critic.train()

        if device == 'gpu':
            fn = lambda x: x.cuda()
        else:
            fn = lambda x: x.cpu()
        if not self.pol_dev == device:
            for i in range(self.nagents):
                self.agent.policys[i] = fn(self.agent.policys[i])
                self.agent.target_policys[i] = fn(self.agent.target_policys[i])

            if self.env_id in ['simple_tag', 'simple_world']:
                for p in self.preys:
                    p.policy = fn(p.policy)

            self.pol_dev = device
        if not self.critic_dev == device:
            self.agent.critic = fn(self.agent.critic)
            self.critic_dev = device
        if not self.trgt_pol_dev == device: 
            for i in range(self.nagents):
                self.agent.target_policys[i] = fn(self.agent.target_policys[i])
            if self.env_id in ['simple_tag', 'simple_world']:
                for p in self.preys:
                    p.target_policy = fn(p.target_policy)

            self.trgt_pol_dev = device 
        if not self.trgt_critic_dev == device:
            self.agent.target_critic = fn(self.agent.target_critic)
            self.trgt_critic_dev = device

    def prep_rollouts(self, device='cpu'):
        for p in self.agent.policys:
            p.eval()
        if device == 'gpu':
            fn = lambda x: x.cuda()
        else:
            fn = lambda x: x.cpu()
        if not self.pol_dev == device:
            for i in range(self.nagents):
                self.agent.policys[i] = fn(self.agent.policys[i])
            if self.env_id in ['simple_tag', 'simple_world']:
                for p in self.preys:
                    p.policy = fn(p.policy)
            self.pol_dev = device

    @classmethod
    def init_from_env(cls, env, env_id, data_type, env_info=None, agent_alg="td3", adversary_alg="ddpg", gamma=0.95, tau=0.01, lr=0.01, hidden_dim=64, 
                    cql=False, batch_size=None, lse_temp=None, num_sampled_actions=None, gaussian_noise_std=None, omar=None, omar_mu=None, omar_sigma=None, 
                    omar_num_samples=None, omar_num_elites=None, omar_iters=None, **kwargs):
        """
        Instantiate instance of this class from multi-agent environment
        """
        if env_id in ['simple_tag', 'simple_world']:
            alg_types = [agent_alg for atype in env.agent_types if atype == 'adversary']
        elif env_id in ['simple_spread']:
            alg_types = [agent_alg for atype in env.agent_types]
        elif env_id in ['HalfCheetah-v2']:
            alg_types = [agent_alg for _ in range(env_info['n_agents'])]

        agent_init_params = {}
        all_n_actions = []
        all_n_obs = []
        agent_max_actions = []
        adv_init_params = []

        if env_id == 'HalfCheetah-v2':
            for agent_idx, algtype in zip(range(len(alg_types)), alg_types):
                acsp = env_info['action_spaces'][agent_idx]
                num_out_pol = acsp.shape[0]
                agent_max_actions.append(acsp.high[0])
                all_n_actions.append(acsp.shape[0])
            agent_init_params={'num_in_pol': env_info['obs_shape'], 'num_out_pol': all_n_actions, 'num_in_critic': env_info['state_shape']+sum(all_n_actions), 'num_agent':env_info['n_agents']}
                
        else:
            if env_id in ['simple_tag', 'simple_world']:
                predator_num = len(alg_types)
                prey_num = len([agent_alg for atype in env.agent_types if atype == 'agent'])
                env_action_space = [env.action_space[i] for i in range(len(env.action_space)) if env.agent_types[i] == 'adversary']
                env_obs_space = [env.observation_space[i] for i in range(len(env.observation_space)) if env.agent_types[i] == 'adversary']
                env_action_space_prey = [env.action_space[i] for i in range(len(env.action_space)) if env.agent_types[i] == 'agent']
                env_obs_space_prey = [env.observation_space[i] for i in range(len(env.observation_space)) if env.agent_types[i] == 'agent']
            else:
                env_action_space, env_obs_space = env.action_space, env.observation_space
            for acsp, obsp in zip(env_action_space, env_obs_space):
                all_n_obs.append(obsp.shape[0])
                all_n_actions.append(acsp.shape[0])
                agent_max_actions.append(acsp.high[0])
            for i in range(1, len(all_n_actions)):
                assert (all_n_actions[i] == all_n_actions[0])
            agent_init_params={'num_in_pol': env.observation_space[0].shape[0], 'num_out_pol': all_n_actions, 'num_in_critic': sum(all_n_obs)+sum(all_n_actions), 'num_agent':len(alg_types)}
            if env_id in ['simple_tag', 'simple_world']:
                for acsp, obsp in zip(env_action_space_prey, env_obs_space_prey):
                    num_in_pol = obsp.shape[0]
                    num_out_pol = acsp.shape[0]
                    num_in_critic = num_in_pol + num_out_pol
                    adv_init_params.append({'num_in_pol': num_in_pol, 'num_out_pol': num_out_pol, 'num_in_critic': num_in_critic})

        env_config_map = {
            'simple_spread': {
                'random': {'omar_coe': 1.0, 'cql_alpha': 0.5},
                'medium-replay': {'omar_coe': 1.0, 'cql_alpha': 1.0},
                'medium': {'omar_coe': 1.0, 'cql_alpha': 5.0},
                'expert': {'omar_coe': 1.0, 'cql_alpha': 2.5},
            },
            'simple_tag': {
                'random': {'omar_coe': 0.9, 'cql_alpha': 0.5},
                'medium-replay': {'omar_coe': 0.9, 'cql_alpha': 0.5},
                'medium': {'omar_coe': 0.7, 'cql_alpha': 0.6},
                'expert': {'omar_coe': 0.9, 'cql_alpha': 2.0},
            },
            'simple_world': {
                'random': {'omar_coe': 1.0, 'cql_alpha': 0.5},
                'medium-replay': {'omar_coe': 0.7, 'cql_alpha': 0.5},
                'medium': {'omar_coe': 0.4, 'cql_alpha': 0.32},
                'expert': {'omar_coe': 0.8, 'cql_alpha': 0.3},
            },
            'HalfCheetah-v2': {
                'random': {'omar_coe': 1.0, 'cql_alpha': 0.1},
                'medium-replay': {'omar_coe': 0.9, 'cql_alpha': 0.4},
                'medium': {'omar_coe': 0.7, 'cql_alpha': 1.0},
                'expert': {'omar_coe': 0.5, 'cql_alpha': 1.7},
            }
        }
        omar_coe = env_config_map[env_id][data_type]['omar_coe']
        cql_alpha = env_config_map[env_id][data_type]['cql_alpha']
        if kwargs.get('cql_alpha', 0) > 0:
            cql_alpha = kwargs['cql_alpha']
        if kwargs.get('omar_coe', 0) > 0:
            omar_coe = kwargs['omar_coe']
        # cql = True if omar else cql
        init_dict = {
            'env_id': env_id,
            'gamma': gamma, 
            'tau': tau, 
            'lr': lr,
            'hidden_dim': hidden_dim,
            'alg_types': alg_types,
            'agent_init_params': agent_init_params,
            'adv_init_params': adv_init_params, 
            'discrete_action': False,
            'cql': cql, 'cql_alpha': cql_alpha, 'lse_temp': lse_temp, 'num_sampled_actions': num_sampled_actions,
            'batch_size': batch_size,
            'gaussian_noise_std': gaussian_noise_std,
            'agent_max_actions': agent_max_actions,
            'omar': omar, 'omar_coe': omar_coe,
            'omar_iters': omar_iters, 'omar_mu': omar_mu, 'omar_sigma': omar_sigma, 'omar_num_samples': omar_num_samples, 'omar_num_elites': omar_num_elites,
        }
        init_dict.update(kwargs)
        
        instance = cls(**init_dict)
        instance.init_dict = init_dict
        
        return instance

    def load_pretrained_preys(self, filename):
        if not torch.cuda.is_available():
            save_dict = torch.load(filename, map_location=torch.device('cpu'))
        else:
            save_dict = torch.load(filename)

        if self.env_id in ['simple_tag', 'simple_world']:
            prey_params = save_dict['agent_params'][self.num_predators:]

        for i, params in zip(range(self.num_preys), prey_params):
            self.preys[i].load_params_without_optims(params)

        for p in self.preys:
            p.policy.eval()
            p.target_policy.eval()

    def save(self, filename):
        """
        保存模型参数到文件
        Inputs:
            filename (str): 保存文件的路径
        """
        save_dict = {
            'agent_params': [a.state_dict() for a in self.agent.policys],
            'target_agent_params': [a.state_dict() for a in self.agent.target_policys],
            'critic_params': self.agent.critic.state_dict(),
            'target_critic_params': self.agent.target_critic.state_dict(),
            'agent_optimizers': [a.state_dict() for a in self.agent.policy_optimizers],
            'critic_optimizer': self.agent.critic_optimizer.state_dict(),
            'init_dict': self.init_dict,
            'niter': self.niter
        }
        
        # 如果是多智能体环境，保存prey的参数
        if hasattr(self, 'preys') and self.preys:
            save_dict['prey_params'] = [p.policy.state_dict() for p in self.preys]
            save_dict['target_prey_params'] = [p.target_policy.state_dict() for p in self.preys]
        
        torch.save(save_dict, filename)

    def load(self, filename):
        """
        从文件加载模型参数
        Inputs:
            filename (str): 加载文件的路径
        """
        # 获取当前设备
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        
        if not torch.cuda.is_available():
            save_dict = torch.load(filename, map_location=torch.device('cpu'))
        else:
            save_dict = torch.load(filename)
        
        # 加载智能体参数并移动到正确设备
        for i, state_dict in enumerate(save_dict['agent_params']):
            self.agent.policys[i].load_state_dict(state_dict)
            self.agent.policys[i] = self.agent.policys[i].to(device)
        
        for i, state_dict in enumerate(save_dict['target_agent_params']):
            self.agent.target_policys[i].load_state_dict(state_dict)
            self.agent.target_policys[i] = self.agent.target_policys[i].to(device)
        
        self.agent.critic.load_state_dict(save_dict['critic_params'])
        self.agent.critic = self.agent.critic.to(device)
        self.agent.target_critic.load_state_dict(save_dict['target_critic_params'])
        self.agent.target_critic = self.agent.target_critic.to(device)
        
        # 加载优化器状态
        for i, state_dict in enumerate(save_dict['agent_optimizers']):
            self.agent.policy_optimizers[i].load_state_dict(state_dict)
            # 确保优化器中的参数也在正确设备上
            for param_group in self.agent.policy_optimizers[i].param_groups:
                for param in param_group['params']:
                    param.data = param.data.to(device)
                    if param.grad is not None:
                        param.grad.data = param.grad.data.to(device)
        
        self.agent.critic_optimizer.load_state_dict(save_dict['critic_optimizer'])
        # 确保优化器中的参数也在正确设备上
        for param_group in self.agent.critic_optimizer.param_groups:
            for param in param_group['params']:
                param.data = param.data.to(device)
                if param.grad is not None:
                    param.grad.data = param.grad.data.to(device)
        
        # 加载prey参数（如果存在）
        if 'prey_params' in save_dict and hasattr(self, 'preys') and self.preys:
            for i, state_dict in enumerate(save_dict['prey_params']):
                self.preys[i].policy.load_state_dict(state_dict)
                self.preys[i].policy = self.preys[i].policy.to(device)
            
            for i, state_dict in enumerate(save_dict['target_prey_params']):
                self.preys[i].target_policy.load_state_dict(state_dict)
                self.preys[i].target_policy = self.preys[i].target_policy.to(device)
        
        self.niter = save_dict.get('niter', 0)
        
        # 更新设备状态变量
        self.pol_dev = device
        self.critic_dev = device
        self.trgt_pol_dev = device
        self.trgt_critic_dev = device
        
        print(f"Model loaded successfully to {device}")

    def load_params_without_optims(self, state_dict):
        """
        仅加载模型参数而不加载优化器状态（用于预训练模型）
        Inputs:
            state_dict (dict): 模型参数字典
        """
        self.load_state_dict(state_dict)
