import random

import torch
from torch import nn
from torch.nn import Embedding, LSTM, Linear
import torch.nn.functional as F
from config import MAXLEN
import numpy as np
import torch.nn as nn


def init_weights(model):
    for name, param in model.named_parameters():
      if 'bias' in name:
          pass
         # nn.init.constant_(param, 0.5)
      elif 'weight' in name:
         nn.init.orthogonal_(param)


class EncoderRNN(nn.Module):
    def __init__(self, input_size, embedding_dim, hidden_dim, word2vec=None, dropout=0.3, device=None):
        super(EncoderRNN, self).__init__()
        self.hidden_dim = hidden_dim
        self.input_size = input_size
        self.embedding_dim = embedding_dim
        self.drop_out = nn.Dropout(dropout)
        self.n_layers = 1
        self.device = device

        self.embedding = Embedding(input_size, embedding_dim)
        if word2vec is not None:
            self.embedding.load_state_dict({'weight': torch.from_numpy(word2vec)})
            self.embedding.weight.requires_grad = False

        self.bi_lstm = LSTM(embedding_dim, hidden_dim,
                            num_layers=1, bidirectional=True, batch_first=True)

    def forward(self, inputs, input_lens, hidden=None):
        embedded = self.embedding(inputs)
        packed_input = nn.utils.rnn.pack_padded_sequence(embedded, input_lens, batch_first=True)
        packed_output, hidden = self.bi_lstm(packed_input, hidden)
        output, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)
        return output, hidden


class DecoderRNN(nn.Module):

    def __init__(self, tag_size, embedding_dim, hidden_dim, batch_size=15, dropout=0.3, device=None):
        """

        :param tag_size: 标签数量
        :param embedding_dim: 标签嵌入维度
        :param hidden_dim: decoder维度
        :param batch_size:
        :param dropout:
        """
        super(DecoderRNN, self).__init__()

        self.tag_size = tag_size
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.batch_size = batch_size
        self.dropout = dropout
        self.n_layers = 1
        self.device = device

        self.embedding = Embedding(self.tag_size, self.embedding_dim)
        self.lstm = LSTM(self.embedding_dim + self.hidden_dim , self.hidden_dim, batch_first=True)
        self.tag_out = Linear(self.hidden_dim, self.tag_size)

    def init_weights(self):
        self.embedding.weight.data.uniform_(-0.1, 0.1)

    def forward(self, input, context, encoder_outpus, input_lens):
        embedded = self.embedding(input)
        hidden = None
        decode = []
        length = encoder_outpus.size(1)

        for i in range(length):
            aligned = encoder_outpus[:, i, :].view(self.batch_size, 1, -1)
            _, hidden = self.lstm(torch.cat((embedded, aligned), 2), hidden)

            score = self.tag_out(hidden[0])
            soft_maxed = F.log_softmax(score.transpose(0,1), dim=2)
            decode.append(soft_maxed)
            _, tag_index = torch.max(soft_maxed, 2)
            embedded = self.embedding(tag_index)

        tag_scores = torch.cat(decode, 1)
        tag_scores = tag_scores.view(input.size(0)*length,-1)
        return tag_scores
