import torch
from torch import nn
import torch.nn.functional as F

class SelfAttention(nn.Module):
    def __init__(self, input_dim, hidden_dim, dropout):
        super(SelfAttention, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.dropout=dropout
        self.attention_U = nn.Linear(input_dim, hidden_dim)
        self.attention_V = nn.Linear(hidden_dim, 1)
    
    def forward(self, x):
        # size(x) = seq_len * batch * source
        x = F.dropout(x, p = self.dropout)
        score = self.attention_U(x) # seq_len * batch * hidden_dim
        score = torch.tanh(score)
        score = F.softmax(self.attention_V(score).squeeze(), dim=0) # seq_len * batch
        return score.T.unsqueeze(-1)
        
class textRNN(nn.Module):
    def __init__(self, config, TEXT):
        super(textRNN, self).__init__()
        self.config = config
        if config.pre_word_vec:
            self.embed = nn.Embedding.from_pretrained(TEXT.vocab.vectors, freeze=True)
        else:
            self.embed = nn.ModuleList([nn.Embedding(len(TEXT.vocab), config.word_vec_num)])
        self.lstm = nn.LSTM(input_size=config.word_vec_num, hidden_size=config.hidden_num, 
                                num_layers=2, bidirectional=True, batch_first=True)
        self.att = SelfAttention(config.hidden_num * 2, config.attn_hidden_num, config.dropout)
        self.fc = nn.Linear(2 * config.hidden_num, config.class_num)
    
    def forward(self, x):
        x = self.embed(x)
        x, _ = self.lstm(x)
        score = self.att(x.permute(1,0,2))
        x = torch.sum(score * x, dim=1)
        return self.fc(x)
    
    def weight_init(self):
        with torch.no_grad():
            for lstm_w in self.lstm.all_weights:
                nn.init.xavier_normal_(lstm_w[0])
                nn.init.xavier_normal_(lstm_w[1])
                nn.init.constant_(lstm_w[2], 0.0)
                nn.init.constant_(lstm_w[3], 0.0)
            nn.init.xavier_normal_(self.att.attention_U.weight)
            nn.init.xavier_normal_(self.att.attention_V.weight)
            nn.init.xavier_normal_(self.fc.weight)
            nn.init.constant_(self.fc.bias, 0.0)
            nn.init.constant_(self.att.attention_U.bias, 0.0)
            nn.init.constant_(self.att.attention_V.bias, 0.0)