import torch
import torch.nn as nn
import numpy as np
from torch.optim import optimizer
from .core import Mlp, test_agent, weights_init_, soft_update_model1_with_model2
from torch.distributions import Distribution, Normal
import torch.optim as optim
from common.replay import replay_buffer
from torch import Tensor

LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
Q_LOG_SIG_MAX = 100
Q_LOG_SIG_MIN = -20
ACTION_BOUND_EPSILON = 1e-6

mbpo_target_entropy_dict = {'Hopper-v2':-1, 'HalfCheetah-v2':-3, 'Walker2d-v2':-3, 'Ant-v2':-4, 'Humanoid-v2':-2}
mbpo_epoches = {'Hopper-v2':125, 'Walker2d-v2':300, 'Ant-v2':300, 'HalfCheetah-v2':400, 'Humanoid-v2':300}

class redq_actor(Mlp):
    def __init__(self, obs_dim, hidden_size, action_dim, action_limit = 1.0):
        super().__init__(obs_dim, hidden_size, action_dim)
        self.action_limit = action_limit
        if hidden_size:
            self.last_logstd_layer = nn.Linear(hidden_size[-1], action_dim)
        else:
            self.last_logstd_layer = nn.Linear(obs_dim, action_dim)
        self.apply(weights_init_)
        

    def forward(self, input, deterministic = False, return_log_prob = True):
        for layer in self.hidden_layers:
            input = layer(input)
            input = self.hidden_activation(input)
        action_mean = self.lastlayer(input)
        action_logstd = self.last_logstd_layer(input)
        action_logstd = torch.clamp(action_logstd, LOG_SIG_MIN, LOG_SIG_MAX)
        action_std = torch.exp(action_logstd)

        normal = Normal(action_mean, action_std)

        if deterministic:
            pre_tanh_value = action_mean
            action = torch.tanh(action_mean)
        else:
            pre_tanh_value = normal.rsample()
            action = torch.tanh(pre_tanh_value)
        
        if return_log_prob:
            log_prob = normal.log_prob(pre_tanh_value)
            log_prob -= torch.log(1 - action.pow(2) + ACTION_BOUND_EPSILON)
            log_prob = log_prob.sum(1, keepdim=True)
        else:
            log_prob = None

        return action * self.action_limit, action_mean, action_logstd, log_prob

class redq_dis_critic(Mlp):
    def __init__(self, obs_dim, action_dim, hidden_size, Qnums):
        super().__init__(obs_dim + action_dim, hidden_size, Qnums)
        # if hidden_size:
        #     self.last_logstd_layer = nn.Linear(hidden_size[-1], 1)
        # else:
        #     self.last_logstd_layer = nn.Linear(obs_dim, Qnums)

        self.apply(weights_init_)
    def forward(self, input):
        for layer in self.hidden_layers:
            input = layer(input)
            input = self.hidden_activation(input)
        Q = self.lastlayer(input)
        return Q


class dis_redq(object):
    def __init__(self, env_name, env, test_env = None, device = torch.device("cpu"), logger = None, 
                hidden_size=[256, 256], replaybuff_size=int(1e6), Qnums = 10, use_Qnum = 2, lam = 0, 
                lr = 3e-4, auto_alpha = True, alpha = 0.2, target_entropy = -2, epochs_num = -1, test_num = 1,
                Q_update_num = 20, Policy_update_num = 1, steps_per_epoch = 1000, start_steps = 5000,
                max_step_len = 1000, batch_size = 256, q_target_mode = 'min', gamma = 0.99, tau = 0.995):
        self.env = env
        self.obs_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]
        self.act_limit = env.action_space.high[0].item()

        self.actor = redq_actor(self.obs_dim, hidden_size, self.action_dim, self.act_limit).to(device)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr = lr)

        self.q_network = redq_dis_critic(self.obs_dim, self.action_dim, hidden_size, Qnums).to(device)
        self.q_network_optimizer = optim.Adam(self.q_network.parameters(), lr = lr)
        self.q_target_network = redq_dis_critic(self.obs_dim, self.action_dim, hidden_size, Qnums).to(device)
        self.q_target_network.load_state_dict(self.q_network.state_dict())

    
        
        self.auto_alpha = auto_alpha
        if auto_alpha:
            self.log_alpha = torch.zeros(1, requires_grad = True, device = device)
            self.target_entropy = mbpo_target_entropy_dict[env_name]
            self.alpha_optim = optim.Adam([self.log_alpha], lr = lr)
            self.alpha = self.log_alpha.cpu().exp().item()
        else:
            self.alpha = alpha
            self.log_alpha, self.target_entropy, self.alpha_optim = None, None, None
        

        self.Replay_buf = replay_buffer(self.obs_dim, self.action_dim, replaybuff_size)
        self.mse_criterion = nn.MSELoss()

        if epochs_num < 0:
            self.epochs_num = mbpo_epoches[env_name]
        else:
            self.epochs_num = epochs_num

        self.env_name = env_name
        self.steps_per_epoch = steps_per_epoch
        self.Q_update_num = Q_update_num
        self.start_steps = start_steps
        self.device = device
        self.max_step_len = max_step_len
        self.batch_size = batch_size
        self.use_Qnum = use_Qnum
        self.q_target_mode = q_target_mode
        self.Qnums = Qnums
        self.gamma = gamma
        self.Policy_update_num = Policy_update_num
        self.tau = tau
        self.test_env = test_env
        self.logger = logger
        self.lambd = lam
        self.test_num = test_num
        

    def store_data(self, obs, action, nextobs, reward, done):
        self.Replay_buf.store(obs, action, nextobs, reward, done)

    def sample_data(self, batch_size):
        batch = self.Replay_buf.sample_batch(batch_size)
        obs_tensor = Tensor(batch['obs']).to(self.device)
        action_tensor = Tensor(batch['action']).to(self.device)
        nextobs_tensor = Tensor(batch['nextobs']).to(self.device)
        reward_tensor = Tensor(batch['reward']).unsqueeze(1).to(self.device)
        done_tensor = Tensor(batch['done']).unsqueeze(1).to(self.device)
        return obs_tensor, action_tensor, nextobs_tensor, reward_tensor, done_tensor

    def get_test_action(self, obs):
        with torch.no_grad():
            obs_tensor = Tensor(obs).unsqueeze(0).to(self.device) #unsqueeze作用是在第一维再加一维，这是因为我们的forward使用的是batchsize数据，所以维度相比于单个obs多一维
            action_tensor = self.actor.forward(obs_tensor, 
                            deterministic = True, return_log_prob = False)[0]
            action = action_tensor.cpu().numpy().reshape(-1)
        return action

    def get_action(self, obs):
        with torch.no_grad():
            if self.Replay_buf.size > self.start_steps:
                obs_tensor = Tensor(obs).unsqueeze(0).to(self.device) #unsqueeze作用是在第一维再加一维，这是因为我们的forward使用的是batchsize数据，所以维度相比于单个obs多一维
                action_tensor = self.actor.forward(obs_tensor, 
                                deterministic = False, return_log_prob = False)[0]
                action = action_tensor.cpu().numpy().reshape(-1)
            else:
                action = self.env.action_space.sample()
        return action

    def save_model(self, PATH):
        torch.save(self.actor.state_dict(), PATH)

    def get_target_predictq_no_grad(self, next_obs, reward, done):
        sample_idxs = np.random.choice(self.Qnums, self.use_Qnum, replace=False)
        with torch.no_grad():
            action_next, _, _, action_next_log_prob = self.actor.forward(next_obs)

            q_prediction_next = self.q_target_network(torch.cat([next_obs, action_next], 1))
            q_prediction_next = q_prediction_next[:, sample_idxs]
            min_q_prediction_next, _ = torch.min(q_prediction_next, dim = 1, keepdim = True)
            next_q_with_log_prob = min_q_prediction_next - self.alpha * action_next_log_prob

            y_q = reward + self.gamma * (1 - done) * next_q_with_log_prob

        
        return y_q, None



    def train(self):
        max_ep_len = self.env._max_episode_steps if self.max_step_len > self.env._max_episode_steps else self.max_step_len
        obs, step_len, total_reward = self.env.reset(), 0, 0
        total_step = self.epochs_num * self.steps_per_epoch + 1
        for t in range(total_step):
            action = self.get_action(obs)
            next_obs, reward, done, _ = self.env.step(action)
            step_len += 1
            total_reward += reward
            self.store_data(obs, action, next_obs, reward, done)
            
            Q_update_num = 0 if self.Replay_buf.size <= self.start_steps else self.Q_update_num
            Policy_update_num = 0 if self.Replay_buf.size <= self.start_steps else self.Policy_update_num

            '''Q update'''
            for _ in range(Q_update_num):
                obs_tensor, action_tensor, next_obs_tensor, reward_tensor, done_tensor= self.sample_data(self.batch_size)
                """Q loss"""
                y_q, _ = self.get_target_predictq_no_grad(next_obs_tensor, reward_tensor, done_tensor)

                q_prediction = self.q_network(torch.cat([obs_tensor, action_tensor], 1))
                y_q = y_q.expand((-1, self.Qnums)) if y_q.shape[1] == 1 else y_q
                q_loss_all = self.mse_criterion(q_prediction, y_q) * self.Qnums
                

                self.q_network_optimizer.zero_grad()
                q_loss_all.backward()
                self.q_network_optimizer.step()
               

                soft_update_model1_with_model2(self.q_target_network, self.q_network, self.tau)

           
            
            for _ in range(Policy_update_num):
                obs_tensor, _, next_obs_tensor, reward_tensor, done_tensor = self.sample_data(self.batch_size)
                action_tensor, _, _, action_log_prob = self.actor.forward(obs_tensor)
                q_list = []
                self.q_network.requires_grad_(False)
                q = self.q_network(torch.cat([obs_tensor, action_tensor], 1))
                ave_q = torch.mean(q, dim=1, keepdim=True)
                policy_loss = (self.alpha * action_log_prob - ave_q).mean()
                self.actor_optimizer.zero_grad()
                policy_loss.backward()
                self.actor_optimizer.step()
                self.q_network.requires_grad_(True)


            
                
                if self.auto_alpha:
                    alpha_loss = -(self.log_alpha * (action_log_prob + self.target_entropy).detach()).mean()
                    self.alpha_optim.zero_grad()
                    alpha_loss.backward()
                    self.alpha_optim.step()
                    self.alpha = self.log_alpha.cpu().exp().item()
                else:
                    alpha_loss = Tensor([0])


            if Q_update_num != 0:
                if self.logger:
                    self.logger.add_logname("policy_loss", policy_loss.item(), t)
                    self.logger.add_logname("q_loss_all", q_loss_all.item(), t)
                    self.logger.add_logname("alpha_loss", alpha_loss.item(), t)
                    self.logger.add_logname("alpha", self.alpha, t)
                    
                    self.logger.add_logname("qmean", q_prediction.mean(), t)

            if done or (step_len == max_ep_len):
                self.logger.add_logname("reward/total_reward", total_reward, t)
                obs, step_len, total_reward = self.env.reset(), 0, 0
            else:
                obs = next_obs

            if ((t + 1) % self.steps_per_epoch == 0) and self.test_env:
                test_reward_list = test_agent(self, self.test_env, max_ep_len, self.test_num, logger = self.logger)
                if len(test_reward_list) > 1:
                    key = [str(x) for x in list(range(len(test_reward_list)))]
                    test_reward = dict(zip(key, test_reward_list))
                else:
                    test_reward = test_reward_list.mean()
                self.logger.add_logname("reward/test_reward", test_reward, t)

                

            




        

        


        
