import torch
import torch.nn as nn
import torch.nn.functional as F
from .textRNN import SelfAttention

class MLP(nn.Module):
    def __init__(self,config, TEXT):
        super(MLP, self).__init__()
        self.config = config
        if config.pre_word_vec:
            self.embed = nn.Embedding.from_pretrained(TEXT.vocab.vectors, freeze=True)
        else:
            self.embed = nn.Embedding(len(TEXT.vocab), config.word_vec_num)
        self.att = SelfAttention(config.word_vec_num, config.attn_hidden_num, config.dropout)
        self.hidden = nn.ModuleList([nn.Linear(config.hidden_size[i], config.hidden_size[i+1])
                                for i in range(len(config.hidden_size)-1)])
        self.fc = nn.Linear(config.hidden_size[-1], config.class_num)

    def forward(self, x):
        x = self.embed(x)
        score = self.att(x.permute(1,0,2))
        x = torch.sum(score * x, dim=1) #batch * W = 300
        for fc in self.hidden:
            x = fc(x)
            x = F.relu(x)
        return self.fc(x)
    
    def weight_init(self):
        with torch.no_grad():
            for fc in self.hidden:
                nn.init.kaiming_uniform_(fc.weight)
                nn.init.constant_(fc.bias, 0.0)
            nn.init.kaiming_uniform_(self.fc.weight)
            nn.init.constant_(self.fc.bias, 0.0) 