import sys
import torch
import torch.nn.functional as F
from torch import nn

import utils,function

class SOAS(nn.Module):
    def __init__(self,input_dim,hidden_dim,latent_dim,act_dim,latent_measure="pdf",lstm_layer_num=3,mask_layer_num=3,act_layer_num=3,latent_layer_num=3,local_size=100):
        super(SOAS,self).__init__()
        # 保存模型参数
        self.input_dim=input_dim
        self.hidden_dim=hidden_dim
        self.latent_dim=latent_dim
        self.act_dim=act_dim
        self.latent_measure=latent_measure
        self.lstm_layer_num=lstm_layer_num
        self.act_layer_num=act_layer_num
        self.latent_layer_num=latent_layer_num
        self.local_size=local_size

        # 初始化LSTM层，提取特征
        # 务必记好，在states输入到LSTM之前，必须通过self.to_lstm
        self.to_lstm=nn.Linear(input_dim,hidden_dim)
        self.lstm_layers=[nn.LSTMCell(self.hidden_dim,self.hidden_dim) for i in range(self.lstm_layer_num)]
        self.lstm_layers_seq=nn.Sequential(*self.lstm_layers)

        # 初始化Indicator网络，用于阻断当前时间步提取的特征向后传递
        mask_layers=[nn.Linear(self.local_size*hidden_dim,self.hidden_dim),nn.ReLU()]
        for i in range(mask_layer_num-2):
            mask_layers.append(nn.Linear(self.hidden_dim,self.hidden_dim))
            mask_layers.append(nn.ReLU())
        mask_layers.append(nn.Linear(self.hidden_dim,1))
        mask_layers.append(nn.Sigmoid())    
        self.mask=nn.Sequential(*mask_layers)

        # 初始化Latent推断网络
        latent_list=[]
        for i in range(self.latent_layer_num-1):
            latent_list.append(nn.Linear(self.latent_dim+self.hidden_dim,self.latent_dim+self.hidden_dim))
            latent_list.append(nn.ReLU())
        if self.latent_measure=="pdf":
            latent_list.append(nn.Linear(self.latent_dim+self.hidden_dim,self.latent_dim*2))
        else:
            latent_list.append(nn.Linear(self.latent_dim+self.hidden_dim,self.latent_dim))
        self.latent=nn.Sequential(*latent_list)

        # 初始化Action预测网络
        action_list=[]
        for i in range(self.act_layer_num-1):
            action_list.append(nn.Linear(self.latent_dim+self.hidden_dim,self.latent_dim+self.hidden_dim))
            action_list.append(nn.ReLU())
        action_list.append(nn.Linear(self.latent_dim+self.hidden_dim,self.act_dim))
        self.action=nn.Sequential(*action_list)

        '''
        print(self.lstm_layers)
        print(self.mask)
        print(self.latent)
        print(self.action)
        '''

    def reset_for_test(self,device,epsilon=0.001):
        # 初始化用于迭代的lstm状态
        self.iter_hiddens=[utils.get_lstm_initial_state(1,self.hidden_dim,device) for i in range(self.lstm_layer_num)]
        # 初始化top_feats，假定前面有local_size个虚拟时间步，local_size时间步的top_feats都是0，这个纯属为了预测mask_i用的
        self.top_feats=[torch.zeros((1,self.hidden_dim)).to(device) for i in range(self.local_size)]
        # 初始化Latent变量
        self.latent_variable=torch.zeros((1,self.latent_dim)).to(device)
        # 初始化前一个时间步的Mask值
        self.mask_pre=epsilon

    def step_forward(self,states):
        # 转换states的维度
        states=self.to_lstm.forward(states)
        # states是一个带Batch维度，但是Batch维度尺寸为1的状态输入
        step_hiddens=[]
        # 循环迭代，生成当前时间步的LSTM隐藏变量
        for j in range(self.lstm_layer_num):
            hidden_j=self.lstm_layers[j](states,(self.iter_hiddens[j][0]*self.mask_pre,self.iter_hiddens[j][1]*self.mask_pre))
            step_hiddens.append(hidden_j)
            states=hidden_j[0]
        # 更新top_feats，去掉最早一个top_feature项，append当前时间步对应的top_feature        
        self.top_feats.pop(0)
        self.top_feats.append(step_hiddens[-1][0])
        # 更新mask_pre
        self.mask_pre=self.mask(torch.cat(self.top_feats,dim=1))
        # 更新当前的Latent
        input4Latent=torch.cat([self.latent_variable,self.top_feats[-1]],dim=1)
        stepLatent=self.latent(input4Latent)
        self.latent_variable=stepLatent
        # 生成当前时间步的动作
        input4Action=torch.cat([self.latent_variable[:,:self.latent_dim],self.top_feats[-1]],dim=1)
        stepAction=self.action(input4Action)
        return stepAction

    def forward(self,inputs,epsilon=0.001):
        # 初始化lstm的隐藏状态
        iter_hiddens=[utils.get_lstm_initial_state(inputs.size(0),self.hidden_dim,inputs.device) for i in range(self.lstm_layer_num)]

        # 初始化mask，前面假定有local_size个虚拟时间步，前local_size-1时间步的mask是1,第local_size时间步的mask是0
        masks=[torch.ones((inputs.shape[0],1)).to(inputs.device) for i in range(self.local_size-1)]
        # 填充部分的最后一个mask值设置为一个非常小的数epsilon，默认为0.001
        masks.append(torch.ones((inputs.shape[0],1)).to(inputs.device)*epsilon)

        # 初始化top_feats，前面假定有local_size个虚拟时间步，local_size时间步的top_feats都是0
        top_feats=[torch.zeros((inputs.shape[0],self.hidden_dim)).to(inputs.device) for i in range(self.local_size)]

        # 保存latent和action的list
        # latent_codings保存原始的latent网络输出的编码内容
        latent_codings=[]
        latents=[]
        actions=[]

        # 开始扫描真正的专家轨迹部分，生成top_features，也就是各个时间步对应的隐藏状态，放在top_feats列表中
        for i in range(inputs.size(1)):
            mask_i_1=masks[-1]
            states=self.to_lstm.forward(inputs[:,i])
            next_hiddens=[]
            for j in range(self.lstm_layer_num):
                hidden_j=self.lstm_layers[j](states,(iter_hiddens[j][0]*mask_i_1,iter_hiddens[j][1]*mask_i_1))
                next_hiddens.append(hidden_j)
                states=hidden_j[0]
            iter_hiddens=next_hiddens
            top_feats.append(iter_hiddens[-1][0])
            mask_i_1=self.mask(torch.cat(top_feats[i+1:i+self.local_size+1],dim=1))
            masks.append(mask_i_1)

        # 开始扫描生成潜在变量
        last_latent=torch.zeros((inputs.shape[0],self.latent_dim)).to(inputs.device)
        for i in range(inputs.size(1)):
            feat4latent=torch.cat([last_latent,top_feats[i+self.local_size]],dim=1)
            latent=self.latent(feat4latent)
            # 这里的latent还不是最终的潜在编码
            latent_codings.append(latent)
            if self.latent_measure=="pdf":
                if self.training:
                    mu,log_var=torch.split(latent,self.latent_dim,dim=1)
                    latent=function.gaussian_sample(mu,log_var)
                else:
                    latent=latent[:,:self.latent_dim]
            latents.append(latent)
            last_latent=latent
        
        # 开始生成动作序列
        for i in range(inputs.size(1)):
            latent_i=latents[i]
            feat_i=top_feats[i+self.local_size]
            feat4action=torch.cat([latent_i,feat_i],dim=1)
            action=self.action(feat4action)
            # action升一维是为了方便后续拼接计算误差
            action=action.unsqueeze(1)
            actions.append(action)

        # 最终返回的内容的时间步长度保持和observations和actions的长度一致
        return top_feats[self.local_size:],masks[self.local_size:],latent_codings,latents,actions