import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from collections import deque
    

INF = 1e5


class OfflineRLPolicy(nn.Module):
    def __init__(
            self,
            state_feature_dim,
            plm,
            plm_embed_size,
            max_length=None,
            max_ep_len=100,
            device='cuda' if torch.cuda.is_available() else 'cpu',
            device_out = None,
            residual = False, 
            conv_size = 4,  
            which_layer = -1,  # for early stopping: specify which layer to stop
            action_tanh=True,
            **kwargs
    ):
        super().__init__()
        
        if device_out is None:
            device_out = device

        self.max_length = max_length

        self.plm = plm
        self.plm_embed_size = plm_embed_size

        # =========== multimodal encoder (start) ===========
        self.state_feature_dim = state_feature_dim
        self.embed_timestep = nn.Embedding(max_ep_len + 1, plm_embed_size)
        self.embed_return = nn.Linear(1, plm_embed_size)
        self.embed_action = nn.Linear(1, plm_embed_size)
        self.embed_state = nn.Linear(state_feature_dim, plm_embed_size)
        # self.embed_state2 = nn.Linear(state_feature_dim, plm_embed_size).to(device)    
        # self.embed_state3 = nn.Linear(state_feature_dim * (6 - conv_size + 1), plm_embed_size).to(device)    
        # self.embed_state4 = nn.Linear(state_feature_dim * (6 - conv_size + 1), plm_embed_size).to(device)    
        # self.embed_state5 = nn.Linear(state_feature_dim, plm_embed_size).to(device)
        # self.embed_state6 = nn.Linear(state_feature_dim, plm_embed_size).to(device)    

        self.embed_ln = nn.LayerNorm(plm_embed_size)
        # =========== multimodal encoder (end) ===========
    
        # self.action_head = nn.Linear(plm_embed_size)
        self.action_head = nn.Sequential(
            *([nn.Linear(plm_embed_size, 1)] + ([nn.Tanh()] if action_tanh else []))
        )

        self.device = device
        self.device_out = device_out

        # the following are used for evaluation
        self.states_dq = deque([torch.zeros((1, 0, plm_embed_size), device=device)], maxlen=max_length)
        self.returns_dq = deque([torch.zeros((1, 0, plm_embed_size), device=device)], maxlen=max_length)
        self.actions_dq = deque([torch.zeros((1, 0, plm_embed_size), device=device)], maxlen=max_length)

        self.residual = residual
        self.which_layer = which_layer
        self.modules_except_plm = nn.ModuleList([  # used to save and load modules except plm
            self.embed_timestep, self.embed_return, self.embed_action, self.embed_ln, 
            self.embed_state, self.action_head
        ])

    def forward(self, states, actions, returns, timesteps, attention_mask=None):
        """
        Forward function, used for training.
        """
        # assert actions.shape[0] == 1, 'batch size should be 1 to avoid CUDA memory exceed'
        batch_size, seq_length = states.shape[0], states.shape[1]

        action_embeddings = self.embed_action(actions)  # shape: (1, seq_len, embed_size)
        returns_embeddings = self.embed_return(returns)  # shape: (1, seq_len, embed_size)
        time_embeddings = self.embed_timestep(timesteps)  # shape: (1, seq_len, embed_size)

        action_embeddings = action_embeddings + time_embeddings
        returns_embeddings = returns_embeddings + time_embeddings

        states_embeddings = self.embed_state(states) + time_embeddings
        
        stacked_inputs = torch.stack(
            (returns_embeddings, states_embeddings, action_embeddings), dim=1
        ).permute(0, 2, 1, 3).reshape(batch_size, 3*seq_length, self.plm_embed_size)
        stacked_inputs_ln = self.embed_ln(stacked_inputs)
        
        # Step 4: feed stacked embeddings into the plm
        # 4.1 create attention mask
        if attention_mask is None:
            # 1 if can be attended to, 0 if not
            attention_mask = torch.ones((stacked_inputs_ln.shape[0], stacked_inputs_ln.shape[1]), dtype=torch.long, device=self.device)

        # we feed in the input embeddings (not word indices as in NLP) to the model
        transformer_outputs = self.plm(
            inputs_embeds=stacked_inputs_ln,
            attention_mask=attention_mask,
            output_hidden_states=True,
            stop_layer_idx=self.which_layer,
        )
        logits = transformer_outputs['last_hidden_state']
        if self.residual:
            logits = logits + stacked_inputs_ln  # residual add
        logits = logits.reshape(batch_size, seq_length, 3, self.plm_embed_size).permute(0, 2, 1, 3)
        logits_used = logits[:,1]
        action_pred = self.action_head(logits_used)

        return action_pred


    def get_action(self, states, actions, returns, timesteps, **kwargs):
        # we don't care about the past rewards in this model

        states = states.reshape(1, -1, self.state_feature_dim)
        actions = actions.reshape(1, -1, 1)
        returns = returns.reshape(1, -1, 1)
        timesteps = timesteps.reshape(1, -1)

        if self.max_length is not None:
            states = states[:,-self.max_length:]
            actions = actions[:,-self.max_length:]
            returns = returns[:,-self.max_length:]
            timesteps = timesteps[:,-self.max_length:]

            # pad all tokens to sequence length
            attention_mask = torch.cat([torch.zeros(self.max_length-states.shape[1]), torch.ones(states.shape[1])])
            attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1)
            states = torch.cat(
                [torch.zeros((states.shape[0], self.max_length-states.shape[1], self.state_feature_dim), device=states.device), states],
                dim=1).to(dtype=torch.float32)
            actions = torch.cat(
                [torch.zeros((actions.shape[0], self.max_length - actions.shape[1], 1),
                             device=actions.device), actions],
                dim=1).to(dtype=torch.float32)
            returns = torch.cat(
                [torch.zeros((returns.shape[0], self.max_length-returns.shape[1], 1), device=returns.device), returns],
                dim=1).to(dtype=torch.float32)
            timesteps = torch.cat(
                [torch.zeros((timesteps.shape[0], self.max_length-timesteps.shape[1]), device=timesteps.device), timesteps],
                dim=1
            ).to(dtype=torch.long)
        else:
            attention_mask = None

        _, action_preds, return_preds = self.forward(
            states, actions, None, returns, timesteps, attention_mask=attention_mask, **kwargs)

        return action_preds[0,-1]
    

