import torch
import torch.nn as nn
import torch.nn.functional as F

class Transformer(nn.Module):
    def __init__(self, class_num=4, embed_dim=200, head_num=5, hidden=64, N=6, pad_size=200, dropout=0.0, 
                 device=torch.cuda.device('cuda' if torch.cuda.is_available() else 'cpu')):
        """
        Transformer. According to the paper Attention Is All You Need
        
        Args:
        - embed_dim: the dim of word embedding.
        - head_num: the number of head.
        - pad_size: length of each sentence.
        - N: the number of encoders.
        - hidden: the dim of hidden layer in FFN.
        - class_num: the number of class of predicition.
        
        """
        super(Transformer, self).__init__()
        self.position_encoder = Positional_Encoder(embed_dim, hidden, dropout=dropout, device=device)
        self.encoders = nn.ModuleList()
        self.encoders.extend([Encoder(embed_dim, head_num, hidden, dropout=dropout) for i in range(N)])
        self.fc = nn.Linear(self.pad_size * embed_dim, num_class)
        
        self.device = device
    
    def forward(self, x):
       """
       Args:
       - x: (batch, seq_len, embed_dim) a batch of data.
       
       Returns:
       - out: (batch, N) the corresponding scores.
       """
        out = self.position_encoder(x)
        out = self.encoders(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out
    
    def loss(self, y, labels):
        loss = F.nll_loss(y, labels)
        return loss
    
class Encoder(nn.Module):
    def __init__(self, embed_dim, head_num, hidden, dropout=0.0):
        self.attention = Multi_Head_Attension(embed_dim, head_num, dropout=dropout)
        self.FFN = Positionwise_Feed_Forward(embed_dim, hidden, dropout=dropout)
        self.LN1 = nn.LayerNorm(embed_dim)
        self.LN2 = nn.LayerNorm(embed_dim)
    
    def forward(self, x):
        out = self.LN1(x + self.attention(x))
        out = self.LN2(out + self.FFN(X))
        return out
    
class Positional_Encoding(nn.Module):
    def __init__(self, embed_dim, pad_size, dropout = 0.0,
                 device=torch.cuda.device('cuda' if torch.cuda.is_available() else 'cpu')):
        super(Positional_Encoding, self).__init__()
        self.dropout = nn.Dropout(dropout)
        # dim of self.pe is (pad_size, embed_dim)
        self.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed_dim)) for i in range(embed_dim)] for pos in range(pad_size)])
        self.pe[:, 0::2] = F.sin(self.pe[:, 0::2])
        self.pe[:, 1::2] = F.cos(self.pe[:, 1::2])
        self.pe = nn.Parameter(self.pe, requires_grad=False).to(self.device)
    
    def forward(self, x):
        """
        Args:
        - x: (batch, pad_size, embed_dim) the sequence after embedding.
        
        Returns:
        - out: (batch, pad_size, embed_dim) the sequence after positional encoding.
        """
        out = x + self.pe
        out = self.dropout(out)
        return out

class Scaled_Dot_Product_Attention(nn.Module):
    def __init__(self, embed_dim):
        super(Scaled_Dot_Product_Attention, self).__init__()
        self.embed_dim = embed_dim
    
    def forward(self, Q, K, V):
        """
        Args:
        - Q: (batch_size, len_Q, dim_Q) the query.
        - K: (batch_size, len_K, dim_K) the key.
        - V: (batch_size, len_V, dim_V) the value.
        """
        attention = torch.mm(Q, K.permute(0, 2, 1)) / (F.sqrt(self.embed_dim))
        attention = F.softmax(attention, dim=-1)
        out = torch.mm(attention, V)
        return out
    
class Multi_Head_Attension(nn.Module):
    def __init__(self, embed_dim, head_num, dropout=0.0):
        self.head_num = head_num
        assert embed_dim % head_num == 0
        self.dim_head = embed_dim // head_dim
        self.fc_Q = nn.Linear(embed_dim, num_head * self.dim_head)
        self.fc_K = nn.Linear(embed_dim, num_head * self.dim_head)
        self.fc_V = nn.Linear(embed_dim, num_head * self.dim_head)
        self.fc = nn.Linear(num_head * self.dim_head, embed_dim)
        self.attention = Scaled_Dot_Product_Attention(embed_dim)
    
    def forward(self, x):
        batch_size = x.shape[0]
        Q = self.fc_Q(x)
        K = self.fc_K(x)
        V = self.fc_V(x)
        attention = self.attention(Q, K, V)
        
        attention = attention.view(batch_size, -1, self.dim_head * self.num_head)
        out = self.fc(attention)
        out = self.dropout(out)
        return out
    
class Positionwise_Feed_Forward(nn.Module):
    def __init__(self, embed_dim, hidden, dropout=0.0):
        super(Positionwize_Feed_Forward, self).__init__()
        self.Linear1 = nn.Linear(embed_dim, hidden)
        self.Lienar2 = nn.Linear(hidden, embed_dim)
    
    def forward(self, x):
        out = self.Linear1(x)
        out = F.relu(out)
        out = self.Linear2(out)
        return out
    
    
    
    
    
    
    
    
    
    
    
    
    