import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

from .pyraformer import SingleStepEmbedding, EncoderLayer


class Encoder(nn.Module):

    def __init__(self, d_model, input_size, feature_size, n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens, dropout,
                 device):
        super().__init__()

        self.d_model = d_model
        self.num_heads = n_head
        self.layers = nn.ModuleList([
            EncoderLayer(d_model, d_inner_hid, n_head, d_k, d_v, dropout, normalize_before=False) for _ in
            range(n_layer)
        ])
        self.embedding = SingleStepEmbedding(feature_size, d_model, num_tokens, input_size, device)

    def forward(self, sequence, token):
        seq_enc = self.embedding(sequence, token)

        for i in range(len(self.layers)):
            seq_enc, _ = self.layers[i](seq_enc)
        return seq_enc


class Predictor(nn.Module):

    def __init__(self, dim, output_size=1):
        super().__init__()

        self.linear = nn.Linear(dim, output_size, bias=False)
        self.softplus = nn.Sigmoid()

        nn.init.xavier_normal_(self.linear.weight)

    def forward(self, data):
        out = self.linear(data)
        out = self.softplus(out)
        return out


class Transformer(nn.Module):

    def __init__(self, d_model, input_size, feature_size, n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens, dropout,
                 device):
        super().__init__()

        self.encoder = Encoder(d_model, input_size, feature_size, n_layer, n_head, d_inner_hid, d_k, d_v, num_tokens,
                               dropout, device)
        self.preditor = Predictor(d_model, 1)

    def forward(self, data, token=None):
        out = self.encoder(data, token)
        out = out[:, -1, :]
        out = self.preditor(out)

        return out
