import torch
import torch.nn as nn
from AI_pytorch_A3C.Tools import v_wrap, set_init, push_and_pull, record
import torch.nn.functional as F
import math, os
os.environ["OMP_NUM_THREADS"] = "1"


class Net(nn.Module):
    def __init__(self, s_dim, a_dim,aa_dim):
        super(Net, self).__init__()
        self.s_dim = s_dim
        self.a_dim = a_dim
        self.aa_dim=aa_dim
        self.a1 = nn.Linear(s_dim, 256)     # Actor 网络的层
        self.a2=nn.Linear(256,256)
        self.a22=nn.Linear(256,256)     # 攻击输出
        self.aa=nn.Linear(256,aa_dim)
        self.mu = nn.Linear(256, a_dim)     # A层的值输出
        self.sigma = nn.Linear(256, a_dim)      # A层的探索
        self.c1 = nn.Linear(s_dim, 256)     # Critic 网络的层
        self.c2 = nn.Linear(256, 256)
        self.v = nn.Linear(256, 1)      # C层的输出
        self.norm=torch.nn.BatchNorm1d(256)
        set_init([self.a1,self.a2, self.mu, self.sigma, self.c1,self.c2, self.v])
        self.distribution = torch.distributions.Normal      # 正态分布输出动作
        self.discrete_distribution=torch.distributions.Categorical      # 离散取值


    def forward(self,state):
        a1=F.relu(self.a1(state))    # 全连接和激励函数间加正则
        a2=F.relu(self.a2(a1))
        a22=F.relu(self.a22(a1))
        mu = 0.17*F.tanh(self.mu(a2))
        sigma = 0.1*F.softplus(self.sigma(a2)) + 0.0001
        aa=F.softmax(self.aa(a22),dim=1)

        c1 = F.relu(self.c1(state))
        c2=F.relu(self.c2(c1))
        values = self.v(c2)
        return mu, sigma, aa,values

    def choose_action(self, s):
        self.training = False
        mu, sigma,aa, _ = self.forward(s)
        m = self.distribution(loc=mu.view(2, ).data, scale=sigma.view(2, ).data)
        attack=self.discrete_distribution(aa.data)
        return [m.sample().numpy(),attack.sample().numpy()]

    def loss_func(self, s, a,a_, v_t):
        self.train()
        mu, sigma, attack,values = self.forward(s)      # 要修改
        td = v_t - values
        c_loss = td.pow(2)

        m = self.distribution(loc=mu, scale=sigma)
        log_prob = m.log_prob(a)
        entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale)  # exploration
        exp_v = log_prob * td.detach() + 0.005 * entropy
        a_loss = -exp_v
        total_loss = (a_loss + c_loss).mean()


        m_a = self.discrete_distribution(attack)
        logp=m_a.log_prob(a_)
        exp_v_a =  logp* td.detach()
        a_loss_a = -exp_v_a
        total_loss_a = (c_loss + a_loss_a).mean()

        return total_loss,total_loss_a