import random

import torch
from torch import nn
from torch.nn import Embedding, LSTM, Linear, LSTMCell, Parameter
import torch.nn.functional as F
from config import MAXLEN
import numpy as np
import torch.nn as nn
from decoder import LSTMd


def init_weights(model, hidden_dim):
    for name, param in model.named_parameters():
      if 'bias_ih' in name:
         nn.init.constant_(param[hidden_dim:hidden_dim*2], 1.0)

      elif 'bias_hh' in name:
          # print(param.size())
          # print(param)
          nn.init.constant_(param, 0.0)
      elif 'weight_hh' in name:
          nn.init.orthogonal_(param)


class EncoderRNN(nn.Module):
    def __init__(self, input_size, embedding_dim, hidden_dim, word2vec=None, dropout=0.3, device=None):
        super(EncoderRNN, self).__init__()
        self.hidden_dim = hidden_dim
        self.input_size = input_size
        self.embedding_dim = embedding_dim
        self.drop_out = nn.Dropout(p=dropout)
        self.n_layers = 1
        self.device = device

        self.embedding = Embedding(input_size, embedding_dim, padding_idx=0)
        if word2vec is not None:
            self.embedding.load_state_dict({'weight': torch.from_numpy(word2vec)})
            # self.embedding.weight.requires_grad = False
            # print(self.embedding.weight.requires_grad)

        self.bi_lstm = LSTM(embedding_dim, hidden_dim,
                            num_layers=1, bidirectional=True, batch_first=True)
        self.f_lstm = LSTM(embedding_dim, hidden_dim, num_layers=1, batch_first=True)
        self.b_lstm = LSTM(embedding_dim, hidden_dim, num_layers=1, batch_first=True)

        # init_weights(self.bi_lstm, self.hidden_dim)
        # init_weights(self.f_lstm, self.hidden_dim)
        # init_weights(self.b_lstm, self.hidden_dim)

    def reverse_input(self, inputs):
        inputs_r = torch.zeros(*inputs.size())
        for i,sent in enumerate(inputs):
            inputs_r[i] = sent.__reversed__()
        return torch.tensor(inputs_r, dtype=inputs.dtype, device=self.device)

    def init_hidden(self, input):
        hidden = torch.tensor(torch.zeros(self.n_layers * 2, input.size(0), self.hidden_dim),
                              device=self.device)
        context = torch.tensor(torch.zeros(self.n_layers * 2, input.size(0), self.hidden_dim),
                               device=self.device)
        return (hidden, context)

    def forward(self, inputs, hidden=None):
        embedded = self.embedding(inputs)
        embedded = self.drop_out(embedded)

        hidden = self.init_hidden(inputs)
        output, hidden = self.bi_lstm(embedded, hidden)
        return output, hidden


class EncoderRNN2(nn.Module):
    def __init__(self, input_size, embedding_dim, hidden_dim, word2vec=None, dropout=0.5, device=None):
        super(EncoderRNN2, self).__init__()
        self.hidden_dim = hidden_dim
        self.input_size = input_size
        self.embedding_dim = embedding_dim
        self.drop_out = nn.Dropout(p=dropout)
        self.n_layers = 1
        self.device = device

        self.embedding = Embedding(input_size, embedding_dim, padding_idx=0)
        if word2vec is not None:
            self.embedding.load_state_dict({'weight': torch.from_numpy(word2vec)})

        self.f_lstm = LSTM(embedding_dim, hidden_dim, num_layers=1, batch_first=True)
        self.b_lstm = LSTM(embedding_dim, hidden_dim, num_layers=1, batch_first=True)

        # init_weights(self.bi_lstm, self.hidden_dim)
        # init_weights(self.f_lstm, self.hidden_dim)
        # init_weights(self.b_lstm, self.hidden_dim)

    def reverse_input(self, inputs):
        inputs_r = torch.zeros(*inputs.size())
        for i,sent in enumerate(inputs):
            inputs_r[i] = sent.__reversed__()
        return torch.tensor(inputs_r, dtype=inputs.dtype, device=self.device)

    def init_hidden(self, input):
        hidden = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                              device=self.device)
        context = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                               device=self.device)
        return (hidden, context)

    def forward(self, inputs, hidden=None):
        embedded = self.embedding(inputs)
        embedded = self.drop_out(embedded)
        inputs_r = self.reverse_input(inputs)
        embedded_r = self.embedding(inputs_r)
        embedded_r = self.drop_out(embedded_r)

        hidden = self.init_hidden(inputs)
        o_f, h_f = self.f_lstm(embedded, hidden)
        o_b, h_b = self.b_lstm(embedded_r, hidden)

        output = torch.cat((o_f,self.reverse_input(o_b)),2)
        hidden = (torch.cat((h_f[0], h_b[0]), 2), torch.cat((h_f[1], h_b[1]), 2))
        return output, hidden


class DecoderRNN(nn.Module):

    def __init__(self, tag_size, dist_size, hidden_dim, device=None):
        """

        :param tag_size: 标签数量
        :param embedding_dim: 标签嵌入维度
        :param hidden_dim: decoder维度
        :param batch_size:
        :param dropout:
        """
        super(DecoderRNN, self).__init__()
        self.tag_size = tag_size
        self.hidden_dim = hidden_dim
        self.dist_size = dist_size
        self.n_layers = 1
        self.device = device
        self.lstm = LSTM(self.hidden_dim + self.hidden_dim, self.hidden_dim, batch_first=True)
        self.tag_out = Linear(self.hidden_dim, self.hidden_dim)
        self.tag_pre = Linear(self.hidden_dim, self.tag_size)
        self.SOS = Parameter(torch.Tensor(self.hidden_dim))
        self.init_weights()

    def init_weights(self):
        # self.embedding.weight.data.uniform_(-0.1, 0.1)
        self.SOS.data.fill_(0)

    def init_hidden(self, input):
        hidden = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                              device=self.device)
        context = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                               device=self.device)
        return (hidden, context)

    def init_hidden2(self, enc_hidden):
        if enc_hidden[0].size(0) == 1:
            return enc_hidden
        hidden = torch.cat((enc_hidden[0][0], enc_hidden[0][1]), 1).unsqueeze(0)
        context = torch.cat((enc_hidden[1][0], enc_hidden[1][1]), 1).unsqueeze(0)
        return (hidden, context)

    def get_t0(self, enc_hidden):
        hidden = torch.cat((enc_hidden[0][0], enc_hidden[0][1]), 1).unsqueeze(0)
        context = torch.cat((enc_hidden[1][0], enc_hidden[1][1]), 1).unsqueeze(0)
        return torch.cat((hidden, context), 2).transpose(0,1)

    def forward(self, encoder_outpus, target=None, enc_hidden=None, teacher_forcing=True):
        batch_size, length, _ = encoder_outpus.size()
        # score_T = torch.tensor(torch.zeros(batch_size*self.hidden_dim).unsqueeze(1),
        #                    device = self.device).view(batch_size, 1, -1)
        decode = []
        decode_dist = []
        hidden = self.init_hidden2(enc_hidden)
        score_T = torch.cat([self.SOS]*batch_size).view(batch_size,1,-1)

        for i in range(length):
            aligned = encoder_outpus[:, i, :].view(batch_size, 1, -1)
            _, hidden = self.lstm(torch.cat((aligned, score_T), 2), hidden)

            score_T = torch.tanh(self.tag_out(hidden[0]).transpose(0, 1))
            score = self.tag_pre(score_T)
            soft_maxed = F.log_softmax(score, dim=2)
            decode.append(soft_maxed)

        tag_scores = torch.cat(decode, 1)
        tag_scores = tag_scores.view(batch_size*length,-1)
        return tag_scores, decode_dist

# 预测相对距离
class DecoderRNN2(nn.Module):

    def __init__(self, tag_size, dist_size, hidden_dim, device=None):
        """

        :param tag_size: 标签数量
        :param embedding_dim: 标签嵌入维度
        :param hidden_dim: decoder维度
        :param batch_size:
        :param dropout:
        """
        super(DecoderRNN2, self).__init__()
        self.tag_size = tag_size
        self.hidden_dim = hidden_dim
        self.dist_size = dist_size
        self.n_layers = 1
        self.device = device
        self.lstm = LSTM(self.hidden_dim + self.hidden_dim, self.hidden_dim, batch_first=True)
        self.tag_out = Linear(self.hidden_dim, self.hidden_dim)
        self.tag_pre = Linear(self.hidden_dim, self.tag_size)
        self.pre_dist = Linear(self.hidden_dim, self.dist_size)
        self.SOS = Parameter(torch.Tensor(self.hidden_dim))
        self.init_weights()

    def init_weights(self):
        # self.embedding.weight.data.uniform_(-0.1, 0.1)
        self.SOS.data.fill_(0)

    def init_hidden(self, input):
        hidden = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                              device=self.device)
        context = torch.tensor(torch.zeros(self.n_layers * 1, input.size(0), self.hidden_dim),
                               device=self.device)
        return (hidden, context)

    def init_hidden2(self, enc_hidden):
        if enc_hidden[0].size(0) == 1:
            return enc_hidden
        hidden = torch.cat((enc_hidden[0][0], enc_hidden[0][1]), 1).unsqueeze(0)
        context = torch.cat((enc_hidden[1][0], enc_hidden[1][1]), 1).unsqueeze(0)
        return (hidden, context)

    def get_t0(self, enc_hidden):
        hidden = torch.cat((enc_hidden[0][0], enc_hidden[0][1]), 1).unsqueeze(0)
        context = torch.cat((enc_hidden[1][0], enc_hidden[1][1]), 1).unsqueeze(0)
        return torch.cat((hidden, context), 2).transpose(0,1)

    def forward(self, encoder_outpus, target=None, enc_hidden=None, teacher_forcing=True):
        batch_size, length, _ = encoder_outpus.size()
        # score_T = torch.tensor(torch.zeros(batch_size*self.hidden_dim).unsqueeze(1),
        #                    device = self.device).view(batch_size, 1, -1)
        decode = []
        decode_dist = []
        hidden = self.init_hidden2(enc_hidden)
        score_T = torch.cat([self.SOS]*batch_size).view(batch_size,1,-1)

        for i in range(length):
            aligned = encoder_outpus[:, i, :].view(batch_size, 1, -1)
            _, hidden = self.lstm(torch.cat((aligned, score_T), 2), hidden)

            score_T = torch.tanh(self.tag_out(hidden[0]).transpose(0, 1))
            score = self.tag_pre(score_T)

            score_dist = self.pre_dist(score_T)
            soft_maxed = F.log_softmax(score, dim=2)
            dist_soft_maxed = F.log_softmax(score_dist, dim=2)
            decode.append(soft_maxed)
            decode_dist.append(dist_soft_maxed)

        tag_scores = torch.cat(decode, 1)
        tag_scores = tag_scores.view(batch_size*length,-1)
        dist_scores = torch.cat(decode_dist, 1)
        dist_scores = dist_scores.view(batch_size*length, -1)
        return tag_scores, dist_scores


class E2EModel_cus(nn.Module):

    def __init__(self, input_size, embedding_dim, enc_dim, w2v,
                 tag_size, dec_dim, dist_size, enc_drop=0.5, device=None):
        super(E2EModel_cus, self).__init__()
        self.encoder = EncoderRNN(input_size, embedding_dim, enc_dim, w2v, enc_drop, device)
        self.decoder = DecoderRNN(tag_size, dist_size, dec_dim, device)

    def forward(self, inputs, target=None, teacher_forcing=True):
        enc_output, enc_hidden = self.encoder(inputs)
        tag_score = self.decoder(enc_output, target, enc_hidden, teacher_forcing)
        return tag_score


class E2EModel(nn.Module):

    def __init__(self, input_size, embedding_dim, enc_dim, w2v,
                 tag_size, dec_dim, enc_drop=0.5, device=None):
        super(E2EModel, self).__init__()
        self.encoder = EncoderRNN(input_size, embedding_dim, enc_dim, w2v, enc_drop, device)
        self.decoder = LSTMd(enc_dim * 2, dec_dim, device)
        self.tag_pre = Linear(dec_dim, tag_size)

    def forward(self, inputs, target=None, teacher_forcing=True):
        batch, length = inputs.size()
        enc_output, enc_hidden = self.encoder(inputs)
        _, _, tag_score = self.decoder(enc_output)
        tag_score = self.tag_pre(tag_score)
        tag_score_soft_max = F.log_softmax(tag_score, dim=2)
        return tag_score_soft_max.view(batch*length, -1)


class PairLoss(nn.Module):
    def __init__(self, device):
        super(PairLoss, self).__init__()
        self.crit = nn.CosineEmbeddingLoss()
        self.device = device

    def forward(self,inputs, pair_index):
        pair_loss = 0
        tar = torch.tensor([1], device=self.device).float()
        for i, s_p in enumerate(pair_index):
            for p in s_p:
                x1 = inputs[i][p[0]]
                x2 = inputs[i][p[1]]
                s = x1 + x2
                # index = torch.argmax(x1, dim=1)

                pair_loss += s.mean().abs()

        return pair_loss



