import tensorflow as tf
import tensorflow_probability as tf_p

from src.models.helpers.activation_functions import (
    activation_penalized_tanh,
)


class ValueNetwork(tf.keras.Model):
    def __init__(
            self,
            hidden_layer_units: list,
            activation_hidden: str,
            kernel_initializer_hidden: str
    ):
        """
        初始化值网络模型。

        参数:
            hidden_layer_units: 隐藏层的单元数列表
            activation_hidden: 隐藏层的激活函数，可以是 'penalized_tanh' 或其他
            kernel_initializer_hidden: 隐藏层权重的初始化方法
        """
        super().__init__()
        # 激活函数设置
        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh
        
        # 隐藏层设置
        self.hidden_layers = []
        for size in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    size,
                    activation=activation_hidden,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    bias_initializer='zeros'  # 偏置初始化为零
                ))

        self.output_layer = tf.keras.layers.Dense(1, dtype=tf.float32)  # 输出层

    @tf.function
    def call(
            self,
            inputs
    ) -> tf.Tensor:
        """
        前向传播计算。

        参数:
            inputs: 输入张量

        返回:
            tf.Tensor: 网络输出
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        output = self.output_layer(x)

        return output

    def initialize_inputs(
            self,
            inputs
    ) -> None:
        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)


class PolicyNetwork(tf.keras.Model):
    def __init__(
            self,
            hidden_layer_units: list,
            num_actions: int,
            activation_hidden: str,
            kernel_initializer_hidden: str
    ) -> None:
        """
        初始化策略网络模型。

        参数:
            hidden_layer_units: 隐藏层的单元数列表
            num_actions: 输出动作的数量
            activation_hidden: 隐藏层的激活函数，可以是 'penalized_tanh' 或其他
            kernel_initializer_hidden: 隐藏层权重的初始化方法
        """
        super().__init__()
        # 激活函数设置
        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh
        
        # 隐藏层设置
        self.hidden_layers = []
        for size in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    size,
                    activation=activation_hidden,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    bias_initializer='zeros'  # 偏置初始化为零
                ))

        self.output_layer = tf.keras.layers.Dense(num_actions,
                                                  # activation='softmax',
                                                  dtype=tf.float32)  # 输出层

    @tf.function
    def call(
            self,
            inputs,
    ) -> tf.Tensor:
        """
        前向传播计算。

        参数:
            inputs: 输入张量

        返回:
            tf.Tensor: 网络输出
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        output = self.output_layer(x)

        return output

    def initialize_inputs(
            self,
            inputs
    ) -> None:
        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)


class PolicyNetworkSoft(tf.keras.Model):
    def __init__(
            self,
            num_actions: int,
            hidden_layer_units: list,
            activation_hidden: str = 'relu',
            kernel_initializer_hidden: str = 'glorot_uniform',
    ) -> None:
        """
        初始化策略网络（带软输出）的模型。

        参数:
            num_actions: 输出动作的数量
            hidden_layer_units: 隐藏层的单元数列表
            activation_hidden: 隐藏层的激活函数（默认为 'relu'）
            kernel_initializer_hidden: 隐藏层权重的初始化方法（默认为 'glorot_uniform'）
        """
        super().__init__()

        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh

        # 隐藏层设置
        self.hidden_layers: list = []
        for units in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    units=units,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    activation=activation_hidden,  # 激活函数
                    bias_initializer='zeros',  # 偏置初始化为零
                )
            )
        self.output_layer_means = tf.keras.layers.Dense(units=num_actions, dtype=tf.float32)  # 输出均值层
        self.output_layer_log_stds = tf.keras.layers.Dense(units=num_actions, dtype=tf.float32)  # 输出对数标准差层

    @tf.function
    def call(
            self,
            inputs,
            training=None,
            masks=None,
    ) -> tuple[tf.Tensor, tf.Tensor]:
        """
        前向传播计算，返回均值和对数标准差。

        参数:
            inputs: 输入张量
            training: 训练标志
            masks: 掩码（未使用）

        返回:
            tuple[tf.Tensor, tf.Tensor]: 均值和对数标准差
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        means = self.output_layer_means(x)
        log_stds = self.output_layer_log_stds(x)

        # 对数标准差通常被限制在[-20, 2]区间
        log_stds = tf.clip_by_value(log_stds, -20, 2)

        return (
            means,
            log_stds
        )

    @tf.function
    def get_action_and_log_prob_density(
            self,
            state,
    ) -> tuple[tf.Tensor, tf.Tensor]:
        """
        根据当前状态获取动作及其对数概率密度。

        参数:
            state: 当前状态张量

        返回:
            tuple[tf.Tensor, tf.Tensor]: 动作和其对数概率密度
        """
        if state.shape.ndims == 1:
            state = tf.expand_dims(state, axis=0)

        means, log_stds = self.call(state)
        stds = tf.exp(log_stds)
        distributions = tf_p.distributions.Normal(loc=means, scale=stds)
        actions = distributions.sample()
        action_log_prob_densities = distributions.log_prob(actions)

        return (
            actions,
            action_log_prob_densities,
        )

    def initialize_inputs(
            self,
            inputs
    ) -> None:import tensorflow as tf
import tensorflow_probability as tf_p

from src.models.helpers.activation_functions import (
    activation_penalized_tanh,
)


class ValueNetwork(tf.keras.Model):
    def __init__(
            self,
            hidden_layer_units: list,
            activation_hidden: str,
            kernel_initializer_hidden: str
    ):
        """
        初始化值网络模型。

        参数:
            hidden_layer_units: 隐藏层的单元数列表
            activation_hidden: 隐藏层的激活函数，可以是 'penalized_tanh' 或其他
            kernel_initializer_hidden: 隐藏层权重的初始化方法
        """
        super().__init__()
        # 激活函数设置
        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh
        
        # 隐藏层设置
        self.hidden_layers = []
        for size in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    size,
                    activation=activation_hidden,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    bias_initializer='zeros'  # 偏置初始化为零
                ))

        self.output_layer = tf.keras.layers.Dense(1, dtype=tf.float32)  # 输出层

    @tf.function
    def call(
            self,
            inputs
    ) -> tf.Tensor:
        """
        前向传播计算。

        参数:
            inputs: 输入张量

        返回:
            tf.Tensor: 网络输出
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        output = self.output_layer(x)

        return output

    def initialize_inputs(
            self,
            inputs
    ) -> None:
        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)


class PolicyNetwork(tf.keras.Model):
    def __init__(
            self,
            hidden_layer_units: list,
            num_actions: int,
            activation_hidden: str,
            kernel_initializer_hidden: str
    ) -> None:
        """
        初始化策略网络模型。

        参数:
            hidden_layer_units: 隐藏层的单元数列表
            num_actions: 输出动作的数量
            activation_hidden: 隐藏层的激活函数，可以是 'penalized_tanh' 或其他
            kernel_initializer_hidden: 隐藏层权重的初始化方法
        """
        super().__init__()
        # 激活函数设置
        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh
        
        # 隐藏层设置
        self.hidden_layers = []
        for size in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    size,
                    activation=activation_hidden,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    bias_initializer='zeros'  # 偏置初始化为零
                ))

        self.output_layer = tf.keras.layers.Dense(num_actions,
                                                  # activation='softmax',
                                                  dtype=tf.float32)  # 输出层

    @tf.function
    def call(
            self,
            inputs,
    ) -> tf.Tensor:
        """
        前向传播计算。

        参数:
            inputs: 输入张量

        返回:
            tf.Tensor: 网络输出
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        output = self.output_layer(x)

        return output

    def initialize_inputs(
            self,
            inputs
    ) -> None:
        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)


class PolicyNetworkSoft(tf.keras.Model):
    def __init__(
            self,
            num_actions: int,
            hidden_layer_units: list,
            activation_hidden: str = 'relu',
            kernel_initializer_hidden: str = 'glorot_uniform',
    ) -> None:
        """
        初始化策略网络（带软输出）的模型。

        参数:
            num_actions: 输出动作的数量
            hidden_layer_units: 隐藏层的单元数列表
            activation_hidden: 隐藏层的激活函数（默认为 'relu'）
            kernel_initializer_hidden: 隐藏层权重的初始化方法（默认为 'glorot_uniform'）
        """
        super().__init__()

        if activation_hidden == 'penalized_tanh':
            activation_hidden = activation_penalized_tanh

        # 隐藏层设置
        self.hidden_layers: list = []
        for units in hidden_layer_units:
            self.hidden_layers.append(
                tf.keras.layers.Dense(
                    units=units,
                    kernel_initializer=kernel_initializer_hidden,  # 权重初始化方法
                    activation=activation_hidden,  # 激活函数
                    bias_initializer='zeros',  # 偏置初始化为零
                )
            )
        self.output_layer_means = tf.keras.layers.Dense(units=num_actions, dtype=tf.float32)  # 输出均值层
        self.output_layer_log_stds = tf.keras.layers.Dense(units=num_actions, dtype=tf.float32)  # 输出对数标准差层

    @tf.function
    def call(
            self,
            inputs,
            training=None,
            masks=None,
    ) -> tuple[tf.Tensor, tf.Tensor]:
        """
        前向传播计算，返回均值和对数标准差。

        参数:
            inputs: 输入张量
            training: 训练标志
            masks: 掩码（未使用）

        返回:
            tuple[tf.Tensor, tf.Tensor]: 均值和对数标准差
        """
        x = inputs
        for layer in self.hidden_layers:
            x = layer(x)
        means = self.output_layer_means(x)
        log_stds = self.output_layer_log_stds(x)

        # 对数标准差通常被限制在[-20, 2]区间
        log_stds = tf.clip_by_value(log_stds, -20, 2)

        return (
            means,
            log_stds
        )

    @tf.function
    def get_action_and_log_prob_density(
            self,
            state,
    ) -> tuple[tf.Tensor, tf.Tensor]:
        """
        根据当前状态获取动作及其对数概率密度。

        参数:
            state: 当前状态张量

        返回:
            tuple[tf.Tensor, tf.Tensor]: 动作和其对数概率密度
        """
        if state.shape.ndims == 1:
            state = tf.expand_dims(state, axis=0)

        means, log_stds = self.call(state)
        stds = tf.exp(log_stds)
        distributions = tf_p.distributions.Normal(loc=means, scale=stds)
        actions = distributions.sample()
        action_log_prob_densities = distributions.log_prob(actions)

        return (
            actions,
            action_log_prob_densities,
        )

    def initialize_inputs(
            self,
            inputs
    ) -> None:
        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)

        """
        确保每个方法被追踪一次以便保存
        """
        self(inputs)
        self.call(inputs)
