import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
import math
# from torch.utils.tensorboard import SummaryWriter
from utils import set_init
import logging
module_logger = logging.getLogger("mainModule.model")

'''
    总体介绍：
    本网络为ac的神经网络，
    按照惯例 为一个神经网络，有3个部分：
    1. 决定动作类型的discrete的分类
    2.决定动作参数的continuous的分类
    3.以及对值函数的输出
    forward 分别传回三个值，由于有continuous的分类所以还要有mu sigma。
    loss 时也需要写一下，分开continuous的与discrete的。
    参照：
    https://github.com/MorvanZhou/pytorch-A3C  
    https://github.com/higgsfield/RL-Adventure-2
'''


class Net(nn.Module):
    """
    Arguments:
        s_dim {int} -- [状态的大小]
        a_t_dim {int} -- [动作类型的个数]
        a_p_dim {int} -- [动作参数的个数]
    Network：
        critic :
            input : s_dim
            output : s_value
        actor_t :
            input : s_dim
            output : 动作的分布，和为1
        actor_p_mu：
            input : s_dim
            output : 计算策略的概率
        actor_p_sigma：
            input : s_dim
            output : 计算策略的sigma，折扣率？
    """

    def __init__(self, s_dim=59, a_t_dim=4, a_p_dim=6, hidden_size=512):
        super(Net, self).__init__()

        self.critic = nn.Sequential(
            nn.Linear(s_dim, hidden_size * 3),
            nn.ReLU(),
            nn.Linear(hidden_size * 3, hidden_size * 2),
            nn.ReLU(),
            nn.Linear(hidden_size * 2, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

        self.actor_t = nn.Sequential(
            nn.Linear(s_dim, hidden_size * 3),
            nn.ReLU(),
            nn.Linear(hidden_size * 3, hidden_size * 2),
            nn.ReLU(),
            nn.Linear(hidden_size * 2, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, a_t_dim),
            nn.Softmax(dim=1),
        )
        self.actor_p_mu = nn.Sequential(
            nn.Linear(s_dim, hidden_size * 3),
            nn.ReLU(),
            nn.Linear(hidden_size * 3, hidden_size * 2),
            nn.ReLU(),
            nn.Linear(hidden_size * 2, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, a_p_dim),
        )
        self.actor_p_sigma = nn.Sequential(
            nn.Linear(s_dim, hidden_size * 3),
            nn.ReLU(),
            nn.Linear(hidden_size * 3, hidden_size * 2),
            nn.ReLU(),
            nn.Linear(hidden_size * 2, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, a_p_dim),
        )
        # set_init([self.critic, self.actor_t, self.actor_p_mu, self.actor_p_sigma])
        self.ap_distribution = torch.distributions.Normal
        self.at_distribution = torch.distributions.Categorical
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.init_cuda()
    def init_cuda(self):

        if torch.cuda.is_available():
            self.cuda()

    def forward(self, x):
        """
        x：state
        """
        action = self.actor_t(x)
        mus = self.actor_p_mu(x)
        sigmas = self.actor_p_sigma(x)
        values = self.critic(x)
        return action, mus, sigmas, values

    def choose_action(self, s):
        '''
        s.shape:
        [1,59],class tensor

        squeeze(imput, dim=None，out=none):
            对指定的dim维数，如果维数=1，则删除
            如果不指定dim参数，则对所有维数，如果有维数=1，删除
        :param s: 输入状态
        :return: 
            a sample of pm(action_t)，from pm（the distribution of sigma and mu）
        '''
        self.eval()
        # eval函数：将本层及子层的training设定为False
        s=s.to(self.device)
        prob, mu, sigma, _ = self.forward(s)
        am = self.at_distribution(prob)
        module_logger.debug(prob)
        pm = [self.ap_distribution(x.view(1, ).data, y.view(1, ).data) for x, y in
              zip(torch.squeeze(mu, 0), torch.squeeze(sigma, 0))]
        return [am.sample().cpu().numpy()[0], [x.sample().cpu().numpy() for x in pm]]

    def loss_func(self, s, a_t, a_p, v_t):
        """
        计算误差
        最终通过此反向传递，更新参数
        loss = l1 + c1*l2+c2*l3
        l1 is the loss of the policy,
        L2 is the value error and l3 is a regularization term. 
        These parts are multiplied by constants c1 and c2,
        which determine what part we stress more. 
        in this function
        c_loss is l2
        a_loss is l1
        """
        self.train()
        prob, mu, sigma, values = self.forward(s)
        td = v_t - values
        # 值函数的损失
        c_loss = td.pow(2)
        am = self.at_distribution(prob)
        # print(mu)
        # 由于每个序列 长度不同，所以需要做一些调整 param_lan 是参数的长度
        param_len = mu.shape[0] * mu.shape[1]
        mu = mu.view(-1, param_len).squeeze()
        sigma = sigma.view(-1, param_len).squeeze()
        # print("mu",mu)
        pm = [self.ap_distribution(x.view(1, ).data, y.view(1, ).data) for x, y in zip(mu, sigma)]
        # m = self.distribution(mu, sigma)
        log_prob = sum([p.log_prob(a) for p, a in zip(pm, a_p)])
        entropy = 0.5 + 0.5 * math.log(2 * math.pi) + sum([torch.log(p.scale) for p in pm])  # exploration
        # log_prob * td.detach() + 0.005 * entropy 是连续动作的策略损失
        # am.log_prob(a) * td.detach().squeeze()是离散动作的策略损失
        exp_v = log_prob * td.detach() + 0.005 * entropy + am.log_prob(a_t) * td.detach().squeeze()
        a_loss = -exp_v
        total_loss = (a_loss + c_loss).mean()
        return total_loss
