import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
from common.configs.tools import seed_num
torch.manual_seed(seed_num)
random.seed(seed_num)


class BiLSTM(torch.nn.Module):
    def __init__(self, args):
        super(BiLSTM, self).__init__()
        self.args = args
        vocab_size = args.embed_num
        embedding_dim = args.embed_dim
        hidden_dim = args.lstm_hidden_dim
        lstm_num_layers = args.lstm_num_layers
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embedding = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                          embedding_dim=self.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True,
                            dropout=args.dropout, bidirectional=True)
        self.linear1 = nn.Linear(hidden_dim, hidden_dim*2)
        self.linear2 = nn.Linear(hidden_dim*2, args.class_num)

        self.dropout = nn.Dropout(args.dropout)

    def forward(self, x):
        x = self.embedding(x).float()
        x = self.dropout(x)
        lstm_out, (ht, ct) = self.lstm(x)
        ht_out = self.linear1(ht[-1])
        ht_out = self.linear2(F.relu(ht_out))

        logits = ht_out

        return logits


class BiGRU(torch.nn.Module):
    def __init__(self, args):
        super(BiGRU, self).__init__()
        self.args = args
        vocab_size = args.embed_num
        embedding_dim = args.embed_dim
        hidden_dim = args.lstm_hidden_dim
        lstm_num_layers = args.lstm_num_layers
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embedding = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                          embedding_dim=self.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        self.gru = nn.GRU(embedding_dim, hidden_dim, batch_first=True,
                          dropout=args.dropout, bidirectional=True)
        self.linear1 = nn.Linear(hidden_dim, hidden_dim*2)
        self.linear2 = nn.Linear(hidden_dim*2, args.class_num)

        self.dropout = nn.Dropout(args.dropout)

    def forward(self, x):
        x = self.embedding(x).float()
        x = self.dropout(x)
        gru_out, (ht, ct) = self.gru(x)
        ht_out = self.linear1(ht[-1])
        ht_out = self.linear2(F.relu(ht_out))

        print(ct.shape)

        logits = ht_out

        return logits
