#!/usr/bin/env python
# -*- coding:utf-8 -*-

# file:sac_net.py
# author:刘浩宇
# datetime: 2023/11/29 21:55
# software: PyCharm

"""
This is function description
"""
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Normal

device = "cuda" if torch.cuda.is_available() else "cpu"


class PolicyNetContinuous(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, action_dim,
                 fire_min: list, fire_max: list,
                 water_min=None, water_max=None):
        super(PolicyNetContinuous, self).__init__()
        self.fireMin = torch.tensor(fire_min, dtype=torch.float).to(device)
        self.fireMax = torch.tensor(fire_max, dtype=torch.float).to(device)
        self.log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
        self.waterMin = water_min
        self.waterMax = water_max
        self.layers = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )
        self.mu = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim)
        )
        self.std = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim),
            nn.Softplus()
        )

    def forward(self, x):
        x = self.layers(x)
        mu = self.mu(x)
        std_log = self.std(x)+0.001
        # print("std=", std)
        dist = Normal(mu, std_log)
        normal_sample = dist.rsample()  # rsample()是重参数化采样
        log_prob = dist.log_prob(normal_sample)

        # noise = torch.randn_like(mu, requires_grad=True)
        # normal_sample = mu + std * noise
        # log_prob = std_log + self.log_sqrt_2pi + noise.pow(2).__mul__(0.5)

        action = normal_sample
        # 计算tanh_normal分布的对数概率密度
        log_prob = log_prob - torch.log(1 - torch.tanh(action).pow(2) + 1e-7)
        # log_prob = torch.log(1 - torch.tanh(action).pow(2) + 1e-7)
        # log_prob = torch.sum(log_prob, dim=1, keepdim=True)

        # 线性变换到真实动作区间
        # action = (action+1) * (self.fireMax-self.fireMin) * 0.5 + self.fireMin
        return action, log_prob


class QValueNetContinuous(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(QValueNetContinuous, self).__init__()
        self.layer = nn.Sequential(
            nn.Linear(state_dim+action_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )

    def forward(self, x, a):
        cat = torch.cat([x, a], dim=1)
        return self.layer(cat)
