import copy
import pickle
import numpy as np
import os
import torch
from collections import deque
import random


class Critic(torch.nn.Module):
    def __init__(self, input_dims, hidden_dims, output_dims=1):
        super(Critic, self).__init__()

        # Actor1 architecture
        self.head1 = torch.nn.Linear(input_dims, hidden_dims[0])
        self.relu_head1 = torch.nn.ReLU()
        self.hidden1 = self.build_layers(hidden_dims)
        self.out1 = torch.nn.Linear(hidden_dims[-1], output_dims)

        # Actor2 architecture
        self.head2 = torch.nn.Linear(input_dims, hidden_dims[0])
        self.relu_head2 = torch.nn.ReLU()
        self.hidden2 = self.build_layers(hidden_dims)
        self.out2 = torch.nn.Linear(hidden_dims[-1], output_dims)

        for m in self.modules():
            if isinstance(m, torch.nn.Linear):
                torch.nn.init.xavier_uniform_(m.weight, gain=1)
                torch.nn.init.constant_(m.bias, 0)

    @staticmethod
    def build_layers(hidden_dims: list):
        layers = []
        for i in range(len(hidden_dims) - 1):
            layers.extend([torch.nn.Linear(hidden_dims[i], hidden_dims[i + 1]), torch.nn.ReLU()])
        hidden_layer = torch.nn.Sequential(*layers)
        return hidden_layer

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)

        x1 = self.relu_head1(self.head1(x))
        x1 = self.hidden1(x1)
        x1 = self.out1(x1)

        x2 = self.relu_head2(self.head2(x))
        x2 = self.hidden2(x2)
        x2 = self.out2(x2)
        return x1, x2


class GaussianActor(torch.nn.Module):
    def __init__(self, input_dims, hidden_dims, output_dims, act_limit):
        super(GaussianActor, self).__init__()
        self.head = torch.nn.Linear(input_dims, hidden_dims[0])
        self.relu_head = torch.nn.ReLU()
        self.hidden = self.build_layers(hidden_dims)
        self.out_mean = torch.nn.Linear(hidden_dims[-1], output_dims)
        self.out_log_std = torch.nn.Linear(hidden_dims[-1], output_dims)
        self.max_action = 1
        self.min_action = -1
        self.LOG_SIG_MAX = 2
        self.LOG_SIG_MIN = -20
        self.act_limit = act_limit

        for m in self.modules():
            if isinstance(m, torch.nn.Linear):
                torch.nn.init.xavier_uniform_(m.weight, gain=1)
                torch.nn.init.constant_(m.bias, 0)

    @staticmethod
    def build_layers(hidden_dims: list):
        layers = []
        for i in range(len(hidden_dims) - 1):
            layers.extend([torch.nn.Linear(hidden_dims[i], hidden_dims[i + 1]), torch.nn.ReLU()])
        hidden_layer = torch.nn.Sequential(*layers)
        return hidden_layer

    def forward(self, observation, deterministic=False, with_logprob=True):
        x = self.relu_head(self.head(observation))
        x = self.hidden(x)
        mu = self.out_mean(x)
        log_std = torch.clamp(self.out_log_std(x), self.LOG_SIG_MIN, self.LOG_SIG_MAX)
        std = torch.exp(log_std)
        pi_distribution = torch.distributions.Normal(mu, std)
        if deterministic:
            action = mu
        else:
            action = pi_distribution.rsample()  # mean + std * N(0,1).sample()
        if with_logprob:
            # 从高斯分布中计算对数概率，然后对Tanh压缩进行修正。
            # 注意：修正公式有点神奇。要理解其来源，可以查看原始的SAC论文（arXiv 1801.01290）
            # 并查看附录C。这是等式21的一个更为数值稳定的等价形式。
            # 尝试自己推导它可能会有点困难。
            log_pi = pi_distribution.log_prob(action).sum(dim=1, keepdim=True)
            log_pi -= (2 * (np.log(2) - action - torch.nn.functional.softplus(-2 * action))).sum(dim=1, keepdim=True)
        else:
            log_pi = None
        action = torch.FloatTensor([self.act_limit]) * torch.tanh(action)

        return action.squeeze(0), log_pi


class SoftActorCritic:
    def __init__(self, env):
        self.critic = Critic(env.observation_space.shape[0] + env.action_space.shape[0], [128, 128, 128], 1)
        self.critic_target = copy.deepcopy(self.critic)
        self.hard_update_target()
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-4)

        self.actor = GaussianActor(env.observation_space.shape[0], [128, 128, 128], env.action_space.shape[0],
                                   env.action_space.high[0])
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-4)

        self.buffer_memory = deque(maxlen=1000000)
        self.batch_size = 256
        self.gamma = 0.99
        self.tau = 0.001
        self.iter = 0
        self.policy_freq = 2
        self.env = env

        # Whether to automatically learn the temperature alpha
        self.adaptive_alpha = True
        if self.adaptive_alpha:
            # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
            self.target_entropy = -env.action_space.shape[0]
            # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
            self.log_alpha = torch.zeros(1, requires_grad=True)
            self.alpha = self.log_alpha.exp()
            self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=1e-4)
        else:
            self.alpha = 0.2

    def choose_action(self, state, deterministic=False):
        state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
        action, log_pi = self.actor(state, deterministic)
        return action.squeeze(0).detach().numpy(), log_pi

    def soft_update_target(self):
        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

    def hard_update_target(self):
        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
            target_param.data.copy_(param.data)

    def learn(self):
        self.iter += 1
        batch_samples = random.sample(self.buffer_memory, self.batch_size)
        batch_state, batch_action, batch_reward, batch_new_state, batch_done = zip(*batch_samples)
        batch_state = torch.FloatTensor(batch_state)
        batch_action = torch.FloatTensor(batch_action)
        batch_reward = torch.FloatTensor(batch_reward)
        batch_new_state = torch.FloatTensor(batch_new_state)
        batch_done = torch.FloatTensor(batch_done)

        with torch.no_grad():
            next_state_action, next_state_log_pi = self.actor(batch_new_state)
            next_q1_target, next_q2_target = self.critic_target(batch_new_state, torch.FloatTensor(next_state_action))
            next_q_value = batch_reward + self.gamma * (1 - batch_done) * (
                    torch.min(next_q1_target, next_q2_target) - self.alpha * next_state_log_pi)
        q1, q2 = self.critic(batch_state, batch_action)
        # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
        q_loss = torch.nn.MSELoss()(q1, next_q_value) + torch.nn.MSELoss()(q2, next_q_value)
        self.critic_optimizer.zero_grad()
        q_loss.backward()
        self.critic_optimizer.step()

        # Freeze critic networks so you don't waste computational effort
        for params in self.critic.parameters():
            params.requires_grad = False

        action, log_pi = self.actor(batch_state)
        q1_pi, q2_pi = self.critic(batch_state, torch.FloatTensor(action))
        q_pi = torch.min(q1_pi, q2_pi)
        # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
        actor_loss = (self.alpha * log_pi - q_pi).mean()
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        # Unfreeze critic networks
        for params in self.critic.parameters():
            params.requires_grad = True

        # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
        if self.adaptive_alpha:
            # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
            alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
            self.alpha_optimizer.zero_grad()
            alpha_loss.backward()
            self.alpha_optimizer.step()
            self.alpha = self.log_alpha.exp()

        if self.iter % self.policy_freq == 0:
            self.soft_update_target()

    # def model_save(self, epoch, model_folder='./model_save', max_models=5):
    #     if not os.path.exists(model_folder):
    #         os.makedirs(model_folder)
    #     buffer_memery_path = os.path.join(model_folder, 'buffer.pkl')
    #     model_path = os.path.join(model_folder, f'epoch_{epoch}.pth')
    #     with open(buffer_memery_path, 'wb') as file:
    #         pickle.dump(self.buffer_memory, file)
    #     torch.save({
    #         'actor_model_state_dict': self.actor.state_dict(),
    #         'critic_model_state_dict': self.critic.state_dict(),
    #         'critic_target_model_state_dict': self.critic_target.state_dict(),
    #         'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
    #         'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),
    #     }, model_path)
    #     model_files = [f for f in os.listdir(model_folder) if f.startswith('epoch_') and f.endswith('.pth')]
    #     model_files = sorted(model_files, key=lambda x: int(x.split('_')[-1].split('.')[0]))
    #     while len(model_files) > max_models:
    #         old_model_path = os.path.join(model_folder, model_files[0])
    #         os.remove(old_model_path)
    #         model_files.pop(0)
    #
    # def model_load(self, model_path, buffer_path):
    #     with open(buffer_path, 'rb') as file:
    #         self.buffer_memory = pickle.load(file)
    #
    #     checkpoint = torch.load(model_path)
    #     self.actor.load_state_dict(checkpoint['actor_model_state_dict'])
    #     self.critic.load_state_dict(checkpoint['critic_model_state_dict'])
    #     self.critic_target.load_state_dict(checkpoint['critic_target_model_state_dict'])
    #     self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
    #     self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])

    def save_model(self, epoch, model_folder='./model_save'):
        if not os.path.exists(model_folder):
            os.makedirs(model_folder)
        model_path = os.path.join(model_folder, f'epoch_{epoch}.pth')
        torch.save({
            'actor_model_state_dict': self.actor.state_dict(),
            'critic_model_state_dict': self.critic.state_dict(),
            'critic_target_model_state_dict': self.critic_target.state_dict(),
            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
            'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),
        }, model_path)

    def load_model(self, model_path):
        checkpoint = torch.load(model_path)
        self.actor.load_state_dict(checkpoint['actor_model_state_dict'])
        self.critic.load_state_dict(checkpoint['critic_model_state_dict'])
        self.critic_target.load_state_dict(checkpoint['critic_target_model_state_dict'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])
