from traceback import print_tb
from jax import grad
import jax.numpy as jnp
from jax import jit
import time
import numpy.random as npr
import jax
import jax.numpy as jnp
from jax import jit, grad, lax, random
from jax.example_libraries import optimizers
from jax.example_libraries import stax
from jax.example_libraries.stax import Dense, FanOut, Relu, Softplus, Sigmoid, FanInSum
from jax.nn import sigmoid
from functools import partial
from jax import vmap

from flax import linen as nn
from flax.training import train_state
import flax

class GRU(nn.Module):
    in_dims: int = 64
    out_dims: int = 4
    hidden_dims: int = 64

    @nn.compact
    def __call__(self, state, input):
        new_state, output = nn.GRUCell()(state, input)
        output = nn.Dense(self.out_dims)(output)

        output = nn.tanh(output)

        return new_state, output

    def initial_state(self, batch_size):
        # Zero initialization
        return nn.GRUCell.initialize_carry(jax.random.PRNGKey(0), (batch_size, ), self.hidden_dims,
                                            init_fn=nn.initializers.zeros)
    
    # def initial_state_rnd(self, batch_size, key):
    #     return nn.GRUCell.initialize_carry(jax.random.PRNGKey(key), (batch_size, ), self.hidden_dims,
    #                                         init_fn=nn.initializers.uniform)

    def initial_state_rnd(self, batch_size, key):
        # random initialization
        state = jax.random.uniform(jax.random.PRNGKey(key), (batch_size, self.hidden_dims), minval=-1, maxval=1)
        return state

    @staticmethod
    def state_metrics(state):
        return {}
    
class GRU_action(nn.Module):
    in_dims: int = 64
    out_dims: int = 4
    hidden_dims: int = 64

    @nn.compact
    def __call__(self, state, input):
        new_state, output0 = nn.GRUCell()(state, input)
        output = nn.Dense(self.out_dims)(state)

        output = nn.tanh(output)

        return new_state, output

    def initial_state(self, batch_size):
        # Zero initialization
        return nn.GRUCell.initialize_carry(jax.random.PRNGKey(0), (batch_size, ), self.hidden_dims,
                                            init_fn=nn.initializers.zeros)
    
    def initial_state_rnd(self, batch_size, key):
        # random initialization
        state = jax.random.uniform(jax.random.PRNGKey(key), (batch_size, self.hidden_dims), minval=-1, maxval=1)
        return state

    @staticmethod
    def state_metrics(state):
        return {}

class RNN(nn.Module):
    out_dims: int = 4
    hidden_dims: int = 64

    @nn.compact
    def __call__(self, state, input):
        
        i1 = jnp.concatenate((state, input), axis=-1)
        new_state = nn.Dense(self.hidden_dims)(i1)
        new_state = nn.tanh(new_state)

        out = nn.Dense(self.out_dims)(new_state)
        out = nn.tanh(out)
        
        return new_state, out

    def initial_state(self, batch_size):
        # Zero initialization
        state = jnp.zeros((batch_size, self.hidden_dims))
        return state

    def initial_state_rnd(self, batch_size, key):
        # random initialization
        # state = 1.5 * jax.random.normal(jax.random.PRNGKey(key), (batch_size, self.hidden_dims))
        state = jax.random.uniform(jax.random.PRNGKey(key), (batch_size, self.hidden_dims), minval=-1, maxval=1)
        return state

    @staticmethod
    def state_metrics(state):
        return {}

class RNN_analyse(nn.Module):
    out_dims: int = 4
    hidden_dims: int = 64

    def setup(self):
        self.Dense_0 = nn.Dense(self.hidden_dims)
        self.Dense_1 = nn.Dense(self.out_dims)

    @nn.compact
    def __call__(self, state, input, reward_sig=None):

        # reward_sig = jnp.where(jnp.abs(reward_sig)<=1, 0, reward_sig)

        # erase reward
        # input0 = jnp.where(input.ndim==1,input.at[4].set(1), input.at[:,4].set(1))
        input0 = input.at[0,4].set(0)
        i10 = jnp.concatenate((state, input0), axis=-1)

        i1 = jnp.concatenate((state, input), axis=-1)
        # new_state = self.Dense_0(i1)
        # new_state = nn.tanh(new_state)

        new_state = self.Dense_0(i10)
        new_state = jnp.where(input[0,4]==-2, nn.tanh(new_state - 2 * reward_sig), nn.tanh(new_state))

        # new_state_masked = jnp.where(jnp.abs(reward_sig)<=1.0, 0, new_state)
        # new_state = jnp.where(input[0,4]==-2, new_state_masked, new_state)

        out = self.Dense_1(new_state)
        out = nn.tanh(out)

        reward_perturbation = nn.tanh(self.Dense_0(i1)) - nn.tanh(self.Dense_0(i10))

        # new_state = new_state.at[:,2].set(0)
        # new_state = new_state.at[:,4].set(0)
        # new_state = new_state.at[:,5].set(0)
        # new_state = new_state.at[:,9].set(0)
        # new_state = new_state.at[:,23].set(0)
        # new_state = new_state.at[:,31].set(0)

        # new_state = new_state.at[:,3].set(0)
        # new_state = new_state.at[:,6].set(0)
        # new_state = new_state.at[:,12].set(0)
        # new_state = new_state.at[:,13].set(0)
        # new_state = new_state.at[:,14].set(0)
        # new_state = new_state.at[:,15].set(0)
        # new_state = new_state.at[:,16].set(0)
        # new_state = new_state.at[:,20].set(0)
        # new_state = new_state.at[:,21].set(0)
        # new_state = new_state.at[:,28].set(0)
        # new_state = new_state.at[:,29].set(0)
        # new_state = new_state.at[:,30].set(0)

        return new_state, out , reward_perturbation

    def initial_state(self, batch_size):
        # Zero initialization
        state = jnp.zeros((batch_size, self.hidden_dims))
        return state

    def initial_state_rnd(self, batch_size, key):
        # random initialization
        state = 1.5 * jax.random.normal(jax.random.PRNGKey(key), (batch_size, self.hidden_dims))
        return state

    @staticmethod
    def state_metrics(state):
        return {}

class RNN_th_rs1(nn.Module):
    out_dims: int = 4
    hidden_dims: int = 64

    def setup(self):
        self.recurrent_layer = nn.Dense(self.hidden_dims)
        self.output_layer = nn.Dense(self.out_dims)
        self.reward_layer = nn.Dense(self.hidden_dims)

    @nn.compact
    def __call__(self, state, input):

        # erase reward
        input0 = jnp.where(input.ndim==1,input.at[4].set(0), input.at[:,4].set(0))
        i10 = jnp.concatenate((state, input), axis=-1)
        i1 = jnp.concatenate((state, input0), axis=-1)

        new_state = self.recurrent_layer(i1)
        new_state = nn.tanh(new_state)

        reward_signature = self.recurrent_layer(i10) - self.recurrent_layer(i1)
        
        new_state = new_state + reward_signature

        out = self.output_layer(new_state)
        out = nn.tanh(out)
        
        return new_state, out, reward_signature

    def initial_state(self, batch_size):
        # Zero initialization
        state = jnp.zeros((batch_size, self.hidden_dims))
        return state

    def initial_state_rnd(self, batch_size, key):
        # Zero initialization
        state = 1.5 * jax.random.normal(jax.random.PRNGKey(key), (batch_size, self.hidden_dims))
        return state

    @staticmethod
    def state_metrics(state):
        return {}