import torch

class MTNet(torch.nn.Module):
    def __init__(self, X, Q, Y, config, scope='MTNet'):
        '''
        X: 长期时序
        Q：长期时序中一小段的短期时序
        Y：标签值
        '''
        self.config = config
        self.lr = Variable(torch.FloatTensor(lr), requires_grad = True)
        self.input_keep_prob = Variable(torch.FloatTensor(input_keep_prob), requires_grad = True)
        self.output_keep_prob = Variable(torch.FloatTensor(output_keep_prob), requires_grad = True)
        last_rnn_hid_size = self.config.en_rnn_hidden_sizes[-1]

        
    def _mtnet_linear(self, x, input_features, output_features):
        '''<batch_size, input_features> => <batch_size, output_features>'''
        linear = torch.nn.Linear(input_features, output_features)
        return linear_m(x)
    
    def _mtnet_rnns(self, rnn_input, Tc, input_size, hidden_size, bias=True):
        
#         for rnn_hidden_size in self.config.en_rnn_hidden_sizes:
#             torch.nn.GRUCell(input_size, hidden_size, bias=bias)
        
        attr_w = torch.rand((self.config.en_rnn_hidden_sizes[-1], Tc))
        attr_u = torch.rand((Tc, Tc))
        attr_v = torch.rand((Tc, 1))
        rnn = torch.nn.GRUCell(input_size, hidden_size, bias=bias)
        h_states = []
        for k in range(self.config.n):
            
            attr_input = rnn_input[:, k].permute([0, 2, 1]) # 长期时序中的一条
            h_state = torch.rand(self.config.batch_size, self.config.en_rnn_hidden_sizes[-1]) # 初始化隐藏层
            
            # rnn循环
            for t in range(Tc):
                # 线性编码
                h_part = torch.mm(h_state, attr_w) # <256, 32> * <32, 7>
                h_part = h_part.unsqueeze(-2).repeat(1, self.config.en_rnn_hidden_sizes[-1], 1)
                a = torch.matmul(attr_input, attr_u)  # <batch_size, en_conv_hidden_size, Tc>*<Tc, Tc>
                b = a + h_part # <batch_size, en_conv_hidden_size, Tc> + (batch_size, Tc>
                c = torch.matmul(b, attr_v) # <batch_size, en_conv_hidden_size, Tc> * (Tc, 1>
                
                # softmax层
                c = torch.reshape(c, shape=[-1, en_conv_hidden_size]) # <batch_size, en_conv_hidden_size>
                a_ks = torch.softmax(c, dim=1) # <batch_size, en_conv_hidden_size>
                # 注意力层
                x_t = torch.mul(attr_input[:, :, t], a_ks)
                # rnn更新隐藏层
                h_state = rnn(x_t, h_state)
                
            h_states.append(h_state.unsqueeze(1))  # <batch_size, 1, 32>
            
        return torch.cat(h_states, dim=1) # <batch_size, n, 32>
        
    def _mtnet_encoder(self, origin_input_x, padding=False, activation_func=torch.nn.relu, is_train=True):
        batch_size_new = self.config.batch_size * n
        Tc = self.config.T - self.config.W + 1 # 12 - 6 + 1 
        last_rnn_hidden_size = self.config.en_rnn_hidden_sizes[-1]
        input_x = torch.reshape(origin_input_x, shape=[-1, 1, self.config.T, self.config.D]) 
        #[batch_size,channels,height_1,width_1]
        
        # 卷积层提取不同时序间的隐关系
        conv = torch.nn.Conv2d(1, last_rnn_hidden_size, (self.config.W, self.config.D), padding=padding)
        h_conv = conv(input_x)
        h_conv = torch.dropout(input=h_conv, p=self.output_keep_prob, train=is_train, )
        rnn_input = torch.reshape(h_conv, shape=[-1, self.config.n, Tc, self.config.en_conv_hidden_size])
        
        # 注意力机制的RNN层
        res_hstates = self.mtnet_rnns(rnn_input, Tc, self.config.en_conv_hidden_size, last_rnn_hidden_size)
        
        return  res_hstates
        
    def forward(self, X, Q, Y):
        # 对长期时序进行encoder，用于提取时序中自相关性， # <batch_size, n, self.config.en_rnn_hidden_sizes[-1]>
        c_is = self._mtnet_encoder(X, self.config.n) 
        # 对长期时序进行encoder，用于提取长期与短期时序的相似性， # <batch_size, n, self.config.en_rnn_hidden_sizes[-1]>
        m_is = self._mtnet_encoder(X, self.config.n) 
        # 对短期时序进行encoder， # <batch_size, 1, self.config.en_rnn_hidden_sizes[-1]>
        u = self._mtnet_encoder(torch.reshape(Q, shape=[-1, 1, self.config.T, self.config.D]), 1) 
        
        # m和u内积，获得长期与短期时序的相似性
        p_is = torch.matmul(m_is, u.permute([0, 2, 1])) # <batch_size, n, 1>
        p_is = p_is.squeeze(-1).softmax(1).unsqueeze(-1)  # <batch_size, n, 1>
        # <batch_size, n, en_rnn_hidden_sizes> * <batch_size, n, 1>
        o_is = torch.mul(c_is, p_is)   # <batch_size, n, en_rnn_hidden_sizes> 
        
        # MTNet中长短期部分
        pred_x = torch.cat([o_is, u], dim=1) # <batch_size, n+1, en_rnn_hidden_sizes> 
        # <batch_size, (n+1)*rnn_size>
        pred_x = torch.reshape(pred_x, shape=[-1, self.config.en_rnn_hidden_sizes[-1] * (self.config.n + 1)]) 
        y_pred = self._mtnet_linear(pred_x, self.config.en_rnn_hidden_sizes[-1] * (self.config.n + 1), self.config.K)
        # MTNet中AR部分
        highway_x = torch.reshape(Q[:, -self.config.highway_window:],
                               shape=[-1, self.config.highway_window * self.config.D])
        y_pred_l = self._mtnet_linear(highway_x, self.config.highway_window * self.config.D, self.config.K)
        
        return y_pred+y_pred_l
    
    def penalty(self):
        pass