import torch
import torch.nn as nn
import torch.optim as optim


class MASH_RNN(nn.Module):
    def __init__(self, word_size, word_dim, hidden_dim, importance_vector_dim):
        super(MASH_RNN, self).__init__()
        self.hidden_dim = hidden_dim
        self.embedding = nn.Embedding(word_size, word_dim)
        self.word_rnn = nn.GRU(word_dim, hidden_dim, bidirectional=True)
        self.sentence_rnn = nn.GRU(hidden_dim * 2, hidden_dim, bidirectional=True)
        self.paragraph_rnn = nn.GRU(hidden_dim * 2, hidden_dim, bidirectional=True)
        self.linear_w = nn.Linear(hidden_dim * 2, importance_vector_dim)
        self.linear_s = nn.Linear(hidden_dim * 2, importance_vector_dim)
        self.linear_p = nn.Linear(hidden_dim * 2, importance_vector_dim)
        self.uw = torch.rand(importance_vector_dim)
        self.us = torch.rand(importance_vector_dim)
        self.up = torch.rand(importance_vector_dim)

    def compute_dw(self, words):
        # 单词级表示
        embeds = self.embedding(words)
        word_rnn_out = self.gru_layer(embeds, level='word')
        dw = self.alpha_sum(word_rnn_out, level='word')
        return dw

    def compute_ds_dp(self, sentences, s_dist, p_dist):
        embeds = self.embedding(sentences)
        sentence_rnn_out = self.gru_layer(embeds, level='word')
        docs_sentence_rnn_out = torch.split(sentence_rnn_out, s_dist)
        batch_ds = torch.zeros(size=(len(s_dist), self.hidden_dim * 2))
        batch_dp = torch.zeros(size=(len(s_dist), self.hidden_dim * 2))
        # 句子级表示
        for doc_step, doc_sentence_rnn_out in enumerate(docs_sentence_rnn_out):
            sentence_seq = self.alpha_sum(doc_sentence_rnn_out, level='word').unsqueeze(1)
            doc_s_rnn_out = self.gru_layer(sentence_seq, level='sentence')
            batch_ds[doc_step] = self.alpha_sum(doc_s_rnn_out, level='sentence')
            p_sentences = torch.split(doc_s_rnn_out, p_dist[doc_step])
            # 段落级表示
            batch_p_ds = torch.zeros(size=(len(p_sentences), self.hidden_dim * 2))
            for p_step, p_sentence in enumerate(p_sentences):
                p_sent_seq = self.alpha_sum(p_sentence, level='word').unsqueeze(1)
                p_s_rnn_out = self.gru_layer(p_sent_seq, level='sentence')
                batch_p_ds[p_step] = self.alpha_sum(p_s_rnn_out, level='sentence')
            p_rnn_out = self.gru_layer(batch_p_ds.unsqueeze(1), level='paragraph')
            batch_dp[doc_step] = self.alpha_sum(p_rnn_out, level='paragraph')

        return batch_ds, batch_dp

    def compute_dp(self, paragraphs):
        embeds = self.embedding(paragraphs)
        batch_ds = torch.zeros(size=(embeds.size(0), self.hidden_dim * 2))
        for paragraph_step, embed in enumerate(embeds):
            dw = self.gru_attn_sum(embed, level='word').unsqueeze(0)
            batch_ds[paragraph_step] = self.gru_attn_sum(dw, level='sentence')
        dp = self.gru_attn_sum(batch_ds.unsqueeze(0), level='paragraph')
        return dp

    def gru_layer(self, input, level):
        if level == 'word':
            rnn_out, rnn_hidden = self.word_rnn(input)
        elif level == 'sentence':
            rnn_out, rnn_hidden = self.sentence_rnn(input)
        else:
            rnn_out, rnn_hidden = self.paragraph_rnn(input)
        return rnn_out

    def alpha_sum(self, rnn_out, level):
        if level == 'word':
            alpha = torch.softmax(torch.matmul(torch.tanh(self.linear_w(rnn_out)), self.uw), dim=-1)
            sum = torch.sum(alpha.unsqueeze(2) * rnn_out, dim=1)
        elif level == 'sentence':
            alpha = torch.softmax(torch.matmul(torch.tanh(self.linear_s(rnn_out)), self.us), dim=-1)
            sum = torch.sum(alpha.unsqueeze(2) * rnn_out, dim=0)
        else:
            alpha = torch.softmax(torch.matmul(torch.tanh(self.linear_p(rnn_out)), self.up), dim=-1)
            sum = torch.sum(alpha.unsqueeze(2) * rnn_out, dim=0)
        return sum

    def forward(self, word_tensor, sentence_tensor, s_dist, p_dist):
        batch_dw = self.compute_dw(word_tensor)
        batch_ds, batch_dp = self.compute_ds_dp(sentence_tensor, s_dist, p_dist)
        batch_d = torch.cat(tensors=(batch_dp, batch_ds, batch_dw), dim=-1)
        return batch_d


class SMASH_RNN(nn.Module):
    def __init__(self, word_size, word_dim, hidden_dim, importance_vector_dim, fd_dim, ff_dim):
        super(SMASH_RNN, self).__init__()
        self.mash_rnns = MASH_RNN(word_size, word_dim, hidden_dim, importance_vector_dim)
        self.mash_rnnc = MASH_RNN(word_size, word_dim, hidden_dim, importance_vector_dim)
        self.fd = nn.Linear(hidden_dim * 2 * 3 * 2, fd_dim)
        self.ff = nn.Linear(fd_dim, ff_dim)

    def forward(self, doc_dict):
        ds = self.mash_rnns(doc_dict['s'][0], doc_dict['s'][1], doc_dict['s'][2], doc_dict['s'][3])
        dc = self.mash_rnns(doc_dict['c'][0], doc_dict['c'][1], doc_dict['c'][2], doc_dict['c'][3])
        xf = torch.cat(tensors=(ds, dc), dim=-1)
        uf = torch.relu(self.fd(xf))
        y_ = torch.sigmoid(self.ff(uf))
        return y_


# mash_rnn = MASH_RNN(100, 5, 10, 10)
# word_tensor = torch.randint(low=1, high=99, size=(2, 100))
# sentence_tensor = torch.randint(low=1, high=50, size=(20, 10))
# sentences_dist = [8, 12]
# paragraph_dist = [[3, 5], [5, 7]]
# mash_rnn(word_tensor, sentence_tensor, sentences_dist, paragraph_dist)
if __name__ == '__main__':
    word_size = 100
    word_dim = 10
    hidden_dim = 20
    importance_vector_dim = 10
    fd_dim = 10
    ff_dim = 2
    learning_rate = 10e-3
    smash_rnn = SMASH_RNN(word_size, word_dim, hidden_dim, importance_vector_dim, fd_dim, ff_dim)
    criterion = nn.BCELoss()
    optimization = optim.Adam(smash_rnn.parameters(), lr=learning_rate)
    word_tensor = torch.randint(low=1, high=99, size=(2, 100))
    sentence_tensor = torch.randint(low=1, high=50, size=(20, 10))
    sentences_dist = [8, 12]
    paragraph_dist = [[3, 5], [5, 7]]
    print('word tensor',word_tensor)
    print('sentence_tensor',sentence_tensor)
    doc_dict = {'s': (word_tensor, sentence_tensor, sentences_dist, paragraph_dist),
                'c': (word_tensor, sentence_tensor, sentences_dist, paragraph_dist)}
    gold_y = torch.tensor([[1, 0], [0, 1]], dtype=torch.float)
    for i in range(10):
        y_ = smash_rnn(doc_dict)
        loss = criterion(y_, gold_y)
        print(loss.item())
        loss.backward()
        optimization.step()
