import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, pad_sequence, pack_sequence


class FSL_model(nn.Module):

    def __init__(self, data: dict, args: dict):
        super(FSL_model, self).__init__()
        self.data = data
        self.args = args
        self.load_args()
        '''word embeddings'''
        self.word_embeds = nn.Embedding(self.args['vocab_size'], self.args['word_emb_dim'])
        self.word_embeds.weight.data.copy_(torch.from_numpy(data['embedding']))
        
        # 固定parameters
        # for p in self.parameters():
        #     p.requires_grad = False
        # optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
        #                       lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)

        '''LSTM'''
        self.hidden = (torch.randn(2, self.args['batch_size'], self.args['hidden_dim']), 
                       torch.randn(2, self.args['batch_size'], self.args['hidden_dim']))
        # hidden state: (n_direction(=2) ∗ num_layers, batch size, hidden size)
        self.lstm = nn.LSTM(input_size = self.args['word_emb_dim'],
                            hidden_size = self.args['hidden_dim'],
                            num_layers = 1,
                            batch_first = True, 
                            bidirectional = True)
        '''Self-attention'''
        self.attention = MHAttention(emb_dim=self.args['hidden_dim'] * 2, 
                                     atte_dim=self.args['atte_dim'], head_num=self.args['head_num'])
    
        '''CapsNet'''
        self.capsnet = CapsNet(in_caps=self.args['head_num'], in_dim=self.args['hidden_dim'] * 2,
                               out_caps=self.args['s_cnum'], out_dim=self.args['out_dim'], 
                               dropout_keep=self.args['keep_prob'], r=self.args['routing'])
    def forward(self, X, y):
        """
        parameters:
            x: a batch of padded sequence sized (batch_size, seq_len)
            y: labels correspond to x
        """
        self.hidden = (torch.randn(2, self.args['batch_size'], self.args['hidden_dim']), 
                       torch.randn(2, self.args['batch_size'], self.args['hidden_dim']))
        
        Xembeds = self.word_embeds(X[0]) # sentence embeding
        max_leng = X[0].shape[1]         # seq max len

        packed_Xemb = pack_padded_sequence(Xembeds, X[1], batch_first=True, enforce_sorted=False)
        packed_lstm_out, self.hidden = self.lstm(packed_Xemb, self.hidden)
        lstm_out = pad_packed_sequence(packed_lstm_out, batch_first=True, total_length = max_leng)[0]# 0: tensor 1: seq_len
        
        atte_out, atte_weight = self.attention(lstm_out)
        intent_feature = self.capsnet(atte_out)
        return intent_feature, atte_weight
        
# seq_mask = torch.cat([torch.Tensor([1]*seq_len + [0]*(max_leng-seq_len)).reshape(1, -1) for seq_len in X[1]], dim = 0)
# atte_out, atte_weight = self.attention(lstm_out, lstm_out, lstm_out, key_padding_mask=seq_mask)
    def ZeroShotPrediction(self, X, y):
        """
        X shaped: 
        """
        with torch.no_grad():
            Q = self.inter_class_sim(self.data['sc_vec'], self.data['uc_vec'])
            caps_out = self.forward(X, y)[0]
            new_vec = torch.matmul(Q, caps_out)
            return new_vec

    def inter_class_sim(self, seen_class, unseen_class, sigma: int = 4):
        """
        seen_class: (k, embedding=300)
        unseen_class: (l, embedding=300)
        Q: (L, K)
        """
        sc_vec = torch.from_numpy(seen_class)
        uc_vec = torch.from_numpy(unseen_class)
        Q = torch.zeros(uc_vec.size(0), sc_vec.size(0))
        var_mat = torch.eye(sc_vec.size(1)) / (sigma ** 2)
        dist = lambda v1, v2 : torch.mm(torch.mm((v1 - v2), var_mat), (v1 - v2).permute([1, 0]))
        dist_matrix = torch.zeros(uc_vec.size(0), sc_vec.size(0))
        for l in range(Q.size(0)):
            for k in range(Q.size(1)):
                dist_matrix[l, k] = dist(sc_vec[k].reshape(1, -1), uc_vec[l].reshape(1, -1))
        for l in range(Q.size(0)):
            for k in range(Q.size(1)):
                Q[l, k] = torch.exp(-1 * dist_matrix[l, k]) / torch.exp(-1 * dist_matrix[l, :]).sum()
        return Q

    def load_args(self):
        self.args['vocab_size'] = self.data['embedding'].shape[0]
        self.args['word_emb_dim'] = self.data['embedding'].shape[1]
        self.args['max_len'] = self.data['max_len']
        self.args['s_cnum'] =  np.unique(self.data['y_tr']).shape[0]
        self.args['u_cnum'] =  np.unique(self.data['y_te']).shape[0]
        self.args['out_atoms'] = self.args['s_cnum']


class MHAttention(nn.Module):
    """
    每个head作为一个feature
    """
    def __init__(self, emb_dim, atte_dim, head_num = 3):
        super(MHAttention, self).__init__()
        self.f1 = nn.Sequential(
            nn.Linear(emb_dim, atte_dim, bias=False),
            nn.Tanh(),
            nn.Linear(atte_dim, head_num, bias=False),
            nn.Softmax(dim=1)
        )

    def forward(self, X):
        """
            X: (batch, seq, 2 * hidden_dim)
        """
        A = self.f1(X).permute([0, 2, 1])
        # print(A.size(), X.size(), torch.bmm(A, X).size())
        return torch.bmm(A, X), A

class CapsNet(nn.Module):
    def __init__(self, in_caps, in_dim, out_caps, out_dim, dropout_keep = 0.8, r = 3):
        super(CapsNet, self).__init__()
        self.in_caps = in_caps
        self.out_caps = out_caps
        self.routings = r
        self.weight = nn.Parameter(0.01 * torch.randn(out_caps, in_caps, out_dim, in_dim))

    def forward(self, X):
        """
        X shaped: (batch_size, in_caps, in_dim)
        """
        x_hat = torch.squeeze(torch.matmul(self.weight, X[:, None, :, :, None]), dim=-1)
        # shaped: (batch, out_caps, in_caps, in_dims)
        x_hat_detached = x_hat.detach()
        b = torch.zeros(X.size(0), self.out_caps, self.in_caps, requires_grad=True)
        for i in range(self.routings):
            c = F.softmax(b, dim=1)
            if i == self.routings - 1:
                outputs = self.squash(torch.sum(c[:, :, :, None] * x_hat, dim=-2, keepdim=True))
            else:
                outputs = self.squash(torch.sum(c[:, :, :, None] * x_hat_detached, dim=-2, keepdim=True))
                b = torch.sum(outputs * x_hat_detached, dim=-1) + b
        return torch.squeeze(outputs, dim=-2)

    def squash(self, inputs, axis=-1):
        norm = torch.norm(inputs, p=2, dim=axis, keepdim=True)
        scale = norm**2 / (0.5 + norm**2) / (norm + 1e-8)
        return scale * inputs