import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class MLP(nn.Module):
    """ 
    initiallize deep Q network.
    """
    def __init__(self, n_states,n_actions,hidden_dim=128):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(n_states, hidden_dim) # 输入层
        self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
        #self.fc4 = nn.Linear(hidden_dim,hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, n_actions) # 输出层
        
    def forward(self, x):
        # 各层对应的激活函数
        x = F.relu(self.fc1(x)) 
        x = F.relu(self.fc2(x))
        #x = F.relu(self.fc4(x))
        return self.fc3(x)
    
class ReplayBuffer(object):
    '''
    use ndarray as buffer to speed up sampling and storing.
    '''
    def __init__(
            self, 
            capacity: int,
            obs_dim:int
        ):
        self.capacity = capacity
        # (s,a,r,s',terminated,truncated)
        self.obs_buf=np.zeros((capacity,obs_dim),dtype=np.float32)
        self.act_buf=np.zeros((capacity,1),dtype=np.int64)
        self.reward_buf=np.zeros(capacity,dtype=np.float32)
        self.next_obs_buf=np.zeros((capacity,obs_dim),dtype=np.float32)
        self.ter_buf=np.zeros(capacity,dtype=np.float32) # terminated
        self.tru_buf=np.zeros(capacity,dtype=np.float32) # truncated
        self.ptr=0 # current position in buffer
        self.num=0
    def push(self,state, action, reward,next_state,terminated, truncated):
        ''' 
        store transition (s,a,r,s',ter,tru) in buffer
        '''
        self.obs_buf[self.ptr]=state
        self.act_buf[self.ptr]=action
        self.reward_buf[self.ptr]=reward
        self.next_obs_buf[self.ptr]=next_state
        self.ter_buf[self.ptr]=terminated
        self.tru_buf[self.ptr]=truncated
        if(self.ptr==self.capacity-1):
            self.is_full=True
        self.ptr+=1
        self.ptr=self.ptr%self.capacity
        if(self.num<self.capacity):
            self.num+=1
    def sample(self, batch_size: int):
        #obs_mean=self.obs_buf[0:self.num].mean()
        #obs_var=self.obs_buf[0:self.num].var()
        #obs_next_mean=self.next_obs_buf[0:self.num].mean()
        #obs_next_var=self.next_obs_buf[0:self.num].var()

        if(self.sum<=batch_size):
            batch_size=self.num
            idxs=np.array(range(batch_size),dtype=int)
        else:
            idxs = np.random.randint(0, high=self.num, size=batch_size)
        #obs=(self.obs_buf[idxs]-obs_mean)/obs_var
        #obs_next=(self.next_obs_buf[idxs]-obs_next_mean)/obs_next_var

        batch = dict(obs=self.obs_buf[idxs],
                     act=self.act_buf[idxs],
                     reward=self.reward_buf[idxs],
                     next_obs=self.next_obs_buf[idxs],
                     ter=self.ter_buf[idxs],
                     tru=self.tru_buf[idxs])
        return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}
    def clear(self):
        ''' 
        clear buffer replay
        '''
        self.obs_buf*=0
        self.act_buf*=0
        self.reward_buf*=0
        self.next_obs_buf*=0
        self.ter_buf*=0
        self.tru_buf*=0
    def __len__(self):
        ''' 
        return length of buffer
        '''
        return len(self.obs_buf)
