import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.nn.utils import weight_norm
import warnings

warnings.filterwarnings("ignore")


class CNN(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.cov1 = nn.Conv2d(1, 3, (7, 1), stride=(7, 1))  # in_channels, out_channels, kernel_size
        self.cov2 = nn.Conv2d(3, 32, (4, in_p), stride=(1, 1))
        # #self.relu = nn.ReLU()
        # self.fc1 = nn.Linear(3, 1, bias=True)
        # #self.fc2 = nn.Linear(5, 1, bias=True)
        # self.fc_p = nn.Linear(in_p, hidden)
        # self.fc_b = nn.Linear(in_b, hidden)
        self.out = nn.Linear(32, out)
        # self.out2 = nn.Linear()

    def forward(self, feat_p):  # final output (stocks,1)
        feat_p = self.cov1(F.normalize(feat_p).unsqueeze(1))
        feat_p = F.relu(feat_p)
        feat_p = self.cov2(F.normalize(feat_p))
        feat_p = F.relu(feat_p)
        output = self.out(torch.squeeze(feat_p))
        return output


class GRU(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.rnn = rnn
        self.rnn_p = nn.GRU(input_size=in_p, hidden_size=hidden, num_layers=2, batch_first=True)
        # self.rnn_b = nn.LSTM(input_size=in_b, hidden_size=hidden, num_layers=2, batch_first=True)
        self.fc_p = nn.Linear(in_p, hidden)
        # self.fc_b = nn.Linear(in_b, hidden)
        self.out = nn.Linear(hidden, out)
        # self.out2 = nn.Linear()

    def forward(self, feat_p):  # final output (stocks,1)
        feat_p, _ = self.rnn_p(F.normalize(feat_p))
        feat_p = feat_p[:, -1, :]
        output = self.out(feat_p)
        return output


class HAN(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.hidden_size = hidden
        self.num_layers = 2
        self.rnn = rnn
        self.rnn_p = nn.GRU(input_size=in_p, hidden_size=hidden, num_layers=self.num_layers, batch_first=True, bidirectional=True, dropout=0.2)
        # self.rnn_b = nn.LSTM(input_size=in_b, hidden_size=hidden, num_layers=2, batch_first=True)
        self.Wh = nn.Linear(in_features=2 * self.hidden_size, out_features=1, bias=True)
        torch.nn.init.xavier_uniform_(self.Wh.weight)
        self.fc = nn.Linear(in_features=2 * self.hidden_size, out_features=1, bias=True)

    def forward(self, feat_p):  # final output (stocks,1)
        device = torch.device("cpu")
        h0 = torch.zeros(self.num_layers * 2, feat_p.size(0), self.hidden_size).to(device)
        nn.init.orthogonal_(h0)
        h, _ = self.rnn_p(F.normalize(feat_p), h0)
        o_i = nn.LeakyReLU()(self.Wh(h))
        beta_i = nn.Softmax(dim=1)(o_i)
        V = torch.matmul(beta_i.unsqueeze(3), h.unsqueeze(2))
        V = torch.sum(torch.squeeze(V), 1)  # [batch, 2 * self.hidden_size]
        output = self.fc(V)
        return output


class LSTM(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.rnn = rnn
        self.rnn_p = nn.LSTM(input_size=in_p, hidden_size=hidden, num_layers=2, batch_first=True)
        # self.rnn_b = nn.LSTM(input_size=in_b, hidden_size=hidden, num_layers=2, batch_first=True)
        self.fc_p = nn.Linear(in_p, hidden)
        # self.fc_b = nn.Linear(in_b, hidden)
        self.out = nn.Linear(32, out)
        # self.out2 = nn.Linear()

    def forward(self, feat_p):  # final output (stocks,1)
        feat_p, _ = self.rnn_p(F.normalize(feat_p))
        feat_p = feat_p[:, -1, :]
        output = self.out(feat_p)
        return output


class TRANS(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.rnn = rnn
        self.fc_p = nn.Linear(in_p, hidden)
        # self.fc_b = nn.Linear(in_b, hidden)
        encoder_layer_p = nn.TransformerEncoderLayer(d_model=hidden, nhead=2)
        # encoder_layer_b = nn.TransformerEncoderLayer(d_model=hidden, nhead=2)
        self.transfromer_p = nn.TransformerEncoder(encoder_layer_p, num_layers=1)
        # self.transfromer_b = nn.TransformerEncoder(encoder_layer_b, num_layers=1)
        self.fc1 = nn.ModuleList([nn.Linear(hidden, 1), nn.Linear(29, hidden)])
        # self.fc2 = nn.ModuleList([nn.Linear(hidden,1), nn.Linear(20,hidden)])
        self.out = nn.Linear(32, out)
        # self.out2 = nn.Linear()

    def forward(self, feat_p):  # final output (stocks,1)
        feat_p = self.transfromer_p(torch.tanh(self.fc_p(F.normalize(feat_p))))
        feat_p = self.fc1[1](torch.tanh(torch.squeeze(self.fc1[0](feat_p))))
        output = self.out(feat_p)
        return output


class Chomp1d(nn.Module):
    def __init__(self, chomp_size):
        super(Chomp1d, self).__init__()
        self.chomp_size = chomp_size

    def forward(self, x):
        return x[:, :, :-self.chomp_size].contiguous()


class TemporalBlock(nn.Module):
    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
        super(TemporalBlock, self).__init__()
        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation))
        self.chomp1 = Chomp1d(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)
        self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation))
        self.chomp2 = Chomp1d(padding)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout)
        self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
        self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()
        self.init_weights()

    def init_weights(self):
        self.conv1.weight.data.normal_(0, 0.01)
        self.conv2.weight.data.normal_(0, 0.01)
        if self.downsample is not None:
            self.downsample.weight.data.normal_(0, 0.01)

    def forward(self, x):
        out = self.net(x)
        res = x if self.downsample is None else self.downsample(x)
        return self.relu(out + res)


class TemporalConvNet(nn.Module):
    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvNet, self).__init__()
        layers = []
        num_levels = len(num_channels)
        for i in range(num_levels):
            dilation_size = 2 ** i
            in_channels = num_inputs if i == 0 else num_channels[i - 1]
            out_channels = num_channels[i]
            layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size, padding=(kernel_size - 1) * dilation_size, dropout=dropout)]
        self.network = nn.Sequential(*layers)

    def forward(self, x):
        return self.network(x)


class TCN_mod(nn.Module):

    def __init__(self, input_size, output_size, num_channels,
                 kernel_size=2, dropout=0.3, emb_dropout=0.1, tied_weights=False):
        super(TCN_mod, self).__init__()
        self.encoder = nn.Linear(input_size, output_size)
        self.tcn = TemporalConvNet(output_size, num_channels, kernel_size, dropout=dropout)
        # self.decoder = nn.Linear(num_channels[-1], output_size)
        if tied_weights:
            if num_channels[-1] != input_size:
                raise ValueError('When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight
            print("Weight tied")
        self.drop = nn.Dropout(emb_dropout)
        self.emb_dropout = emb_dropout
        self.init_weights()

    def init_weights(self):
        self.encoder.weight.data.normal_(0, 0.01)
        # self.decoder.bias.data.fill_(0)
        # self.decoder.weight.data.normal_(0, 0.01)

    def forward(self, input):
        """Input ought to have dimension (N, C_in, L_in), where L_in is the seq_len; here the input is (N, L, C)"""
        emb = self.drop(self.encoder(input))
        y = self.tcn(emb.transpose(1, 2)).transpose(1, 2)
        # y = self.decoder(y)
        return y.contiguous()


class TCN(nn.Module):
    def __init__(self, in_p, in_b, hidden, out, rnn):
        super().__init__()
        self.rnn = rnn
        self.encoder_p = TCN_mod(in_p, 64, [64, 32, 32])
        self.out = nn.Linear(32, out)

    def forward(self, feat_p):  # final output (stocks,1)
        feat_p = self.encoder_p(F.normalize(feat_p))
        feat_p = feat_p[:, -1, :]
        output = self.out(feat_p)
        return output
