import torch
import torch.nn as nn
import math
from torch.autograd import Variable

'''Attention Is All You Need'''


class Transformer1d(nn.Module):
    """

    Input:
        X: (n_samples, n_channel, n_length)
        Y: (n_samples)

    Output:
        out: (n_samples, n_classes)

    Pararmetes:

    """

    def __init__(self, n_classes, n_length, d_model, nhead, dim_feedforward, dropout, activation, verbose=False):
        super(Transformer1d, self).__init__()

        self.d_model = d_model
        self.nhead = nhead
        self.n_length = n_length
        self.dim_feedforward = dim_feedforward
        self.dropout = dropout
        self.activation = activation
        self.n_classes = n_classes
        self.verbose = verbose
        self.position_embedding = Positional_Encoding(dropout=dropout)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=self.d_model,
            nhead=self.nhead,
            dim_feedforward=self.dim_feedforward,
            dropout=self.dropout,
            activation=activation,
            batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=5)
        self.dense = nn.Linear(self.n_length, self.n_classes)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        # out = self.position_embedding(x)
        out = x.unsqueeze(2)

        out1 = self.transformer_encoder(out)

        # out3 = out2.mean(0)
        out2 = out1.squeeze(2)

        out3 = self.dense(out2)

        # out4 = self.softmax(out3)

        if self.verbose:
            print(out.shape, out1.shape, out2.shape, out3.shape)
        return out3


class Positional_Encoding(nn.Module):
    """Implement the PE function."""

    def __init__(self, d_model=1, dropout=0.5, max_len=5000):
        super(Positional_Encoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
        # print(pe.shape)

    def forward(self, x):
        x = x.unsqueeze(2) + Variable(self.pe[:, :x.size(1)],
                                      requires_grad=False)
        # print(x.shape)
        return self.dropout(x)


class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()
        self.rnn = nn.LSTM(     # LSTM 效果要比 nn.RNN() 好多了
            input_size=2589,      # 图片每行的数据像素点
            hidden_size=256,     # rnn hidden unit
            num_layers=3,       # 有2层 RNN layers
            batch_first=True,   # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
        )

        self.out = nn.Linear(256, 2)  # 输出层

    def forward(self, x):
        # x shape (batch, time_step, input_size)
        # r_out shape (batch, time_step, output_size)
        # h_n shape (n_layers, batch, hidden_size)   LSTM 有两个 hidden states, h_n 是分线, h_c 是主线
        # h_c shape (n_layers, batch, hidden_size)
        x = x.unsqueeze(1)
        r_out, (h_n, h_c) = self.rnn(x, None)  # None 表示 hidden state 会用全0的 state

        # print (r_out.shape)
        # 选取最后一个时间点的 r_out 输出
        # 这里 r_out[:, -1, :] 的值也是 h_n 的值
        out = self.out(r_out[:, -1, :])
        return out


class TransformerClassifier(nn.Module):
    """

    Input:
        X: (n_samples, n_channel, n_length)
        Y: (n_samples)

    Output:
        out: (n_samples, n_classes)

    Pararmetes:

    """

    def __init__(self,  input_dim = 2589,num_classes=2,num_layers=5):
        super(TransformerClassifier, self ).__init__()

        
        self.transformer_encoder= nn.Transformer(
            d_model= 1, nhead = 1, num_encoder_layers = num_layers,num_decoder_layers = 0,batch_first=True)
        self.fc = nn.Linear(input_dim, num_classes)

    def forward(self, x):
        # out = self.position_embedding(x)
        out = x.unsqueeze(2)
        
        out1 = self.transformer_encoder(src=out,tgt=torch.rand(out.shape).to("cuda"))

        # out3 = out2.mean(0)
        out2 = out1.squeeze(2)

        out3 = self.fc(out2)


        # print(out.shape, out1.shape, out2.shape, out3.shape)
        return out3
