import torch
from torch import nn


class WordEncoder(nn.Module):
    """word encoder"""

    def __init__(self, vocab_size, embed_dim, hidden_size, num_layers, attention_size):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_dim)
        self.gru = nn.GRU(input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, bidirectional=True,
                          batch_first=True)
        self.linear = nn.Linear(hidden_size * 2, attention_size)
        self.uw = nn.Parameter(torch.randn(attention_size), requires_grad=True)  # uw是衡量单词重要性的向量,

    def forward(self, inputs):
        embedded = self.embedding(inputs)
        output, h_n = self.gru(embedded)
        w_background = self.attention(output, self.uw)
        return w_background

    def attention(self, h, u):
        """麻烦在每一次计算要在哪个维度进行，要搞明白每个变量的每个维度代表的意思"""
        alpha = torch.softmax(torch.matmul(torch.tanh(self.linear(h)), u), dim=-1)  # dim对哪个维度进行计算，要在时间步维度做softmax计算
        sum = torch.sum(alpha.unsqueeze(2) * h, dim=1)  # unsqueeze在指定维度加一个维数为1的维度，时间步维度sum(相当于把时间维度去掉了)
        return sum  # 返回背景变量 shape:[batch_size,hidden_size],hidden_size就是传进来的h的隐藏层大小


class SentenceEncoder(nn.Module):
    """sentence encoder"""

    def __init__(self, vocab_size, embed_dim, hidden_size, num_layers, attention_size):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_dim)
        self.gru = nn.GRU(input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, bidirectional=True,
                          batch_first=True)
        self.w_linear = nn.Linear(hidden_size * 2, attention_size)
        self.s_linear = nn.Linear(hidden_size * 2, attention_size)
        self.uw = nn.Parameter(torch.randn(attention_size))  # uw是衡量单词重要性的向量,
        self.us = nn.Parameter(torch.randn(attention_size))  # us是衡量句子重要性的向量，

    def forward(self, inputs, level):
        if level == 'word':
            embedded = self.embedding(inputs)
            output, h_n = self.gru(embedded)
            w_background = self.attention(output, self.uw, level)
            return w_background
        elif level == 'sentence':
            output, h_n = self.gru(inputs)
            s_background = self.attention(output, self.us, level)
            return s_background
        else:
            raise ValueError('level参数异常')

    def attention(self, h, u, level):
        if level == 'word':
            alpha = torch.softmax(torch.matmul(torch.tanh(self.w_linear(h)), u), dim=-1)
        else:
            alpha = torch.softmax(torch.matmul(torch.tanh(self.s_linear(h)), u), dim=-1)
        sum = torch.sum(alpha.unsqueeze(2) * h, dim=1)
        return sum


class ParagraphEncoder(nn.Module):
    """paragraph encoder"""

    def __init__(self, vocab_size, embed_dim, hidden_size, num_layers, attention_size):
        super().__init__()
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_dim)
        self.gru = nn.GRU(input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, bidirectional=True,
                          batch_first=True)
        self.w_linear = nn.Linear(hidden_size * 2, attention_size)
        self.s_linear = nn.Linear(hidden_size * 2, attention_size)
        self.p_linear = nn.Linear(hidden_size * 2, attention_size)
        # u是随机初始化的，在训练中不断改变
        self.uw = nn.Parameter(torch.randn(attention_size))  # uw是衡量单词重要性的向量,
        self.us = nn.Parameter(torch.randn(attention_size))  # us是衡量句子重要性的向量，
        self.up = nn.Parameter(torch.randn(attention_size))  # up是衡量段落重要性的向量，

    def forward(self, inputs, level):
        if level == 'word':
            embedded = self.embedding(inputs)
            output, h_n = self.gru(embedded)
            w_background = self.attention(output, self.uw, level)
            return w_background
        elif level == 'sentence':
            output, h_n = self.gru(inputs)
            s_background = self.attention(output, self.us, level)
            return s_background
        elif level == 'paragraph':
            output, h_n = self.gru(inputs)
            s_background = self.attention(output, self.up, level)
            return s_background
        else:
            raise ValueError('level参数异常')

    def attention(self, h, u, level):
        if level == 'word':
            alpha = torch.softmax(torch.matmul(torch.tanh(self.w_linear(h)), u), dim=-1)
        elif level == 'sentence':
            alpha = torch.softmax(torch.matmul(torch.tanh(self.s_linear(h)), u), dim=-1)
        else:
            alpha = torch.softmax(torch.matmul(torch.tanh(self.p_linear(h)), u), dim=-1)
        sum = torch.sum(alpha.unsqueeze(2) * h, dim=1)
        return sum


class SMASH(nn.Module):

    def __init__(self, vocab_size, embed_dim, hidden_dim, num_layers, attention_size, fd_out, ff_out):
        super().__init__()
        self.word_encoder = WordEncoder(vocab_size, embed_dim, hidden_dim, num_layers, attention_size=attention_size)
        self.sentence_encoder = SentenceEncoder(vocab_size, embed_dim, hidden_dim, num_layers,
                                                attention_size=attention_size)
        self.paragraph_encoder = ParagraphEncoder(vocab_size, embed_dim, hidden_dim, num_layers,
                                                  attention_size=attention_size)
        # fd_out随意设置， ff_out为2，因为最后是一个二分类任务
        self.fd = nn.Linear(hidden_dim * 2 * 2 * 3, fd_out)
        self.ff = nn.Linear(fd_out, ff_out)

    def forward(self, source_w, target_w, source_s, target_s, source_p, target_p, batch):
        # word level represent
        source_w_doc = self.word_encoder(source_w)
        target_w_doc = self.word_encoder(target_w)

        # sentence level represent
        batch_doc_sentence_level_s, batch_doc_sentence_level_t = torch.zeros(
            size=(32, 10, 256)), torch.zeros(
            size=(32, 10, 256))  # [batch,sentence_len,hidden_size*2]
        for b_s, b_t, b_idx in zip(source_s, target_s, range(len(source_s))):  # 遍历batch[32,10,20]
            ss_tem, st_tem = torch.zeros(size=(10, 256)), torch.zeros(size=(10, 256))
            for s_s, s_t, s_idx in zip(b_s, b_t, range(len(b_s))):  # 遍历sentence [10,20]
                s_s = s_s.unsqueeze(0)  # [1,20]
                s_t = s_t.unsqueeze(0)
                ss_tem[s_idx] = self.sentence_encoder(s_s, 'word').squeeze(0)
                st_tem[s_idx] = self.sentence_encoder(s_t, 'word').squeeze(0)
            batch_doc_sentence_level_s[b_idx] = ss_tem
            batch_doc_sentence_level_t[b_idx] = st_tem
        source_s_doc = self.sentence_encoder(batch_doc_sentence_level_s, 'sentence')
        target_s_doc = self.sentence_encoder(batch_doc_sentence_level_t, 'sentence')

        # paragraph level represent
        batch_doc_paragraph_level_s, batch_doc_paragraph_level_t = torch.zeros(size=(32, 3, 256)), torch.zeros(
            size=(32, 3, 256))
        for b_s, b_t, b_idx in zip(source_p, target_p, range(len(source_p))):
            ps_tem, pt_tem = torch.zeros(size=(3, 256)), torch.zeros(size=(3, 256))
            for p_s, p_t, p_idx in zip(b_s, b_t, range(len(b_s))):
                ss_tem, st_tem = torch.zeros(size=(10, 256)), torch.zeros(size=(10, 256))
                for s_s, s_t, s_idx in zip(p_s, p_t, range(len(p_s))):
                    s_s = s_s.unsqueeze(0)  # [1,20]
                    s_t = s_t.unsqueeze(0)
                    ss_tem[s_idx] = self.paragraph_encoder(s_s, 'word')
                    st_tem[s_idx] = self.paragraph_encoder(s_t, 'word')
                ss_tem = ss_tem.unsqueeze(0)  # [1,10,256]
                st_tem = st_tem.unsqueeze(0)
                ps_tem[p_idx] = self.paragraph_encoder(ss_tem, 'sentence').squeeze(0)
                pt_tem[p_idx] = self.paragraph_encoder(st_tem, 'sentence').squeeze(0)
            batch_doc_paragraph_level_s[b_idx] = ps_tem
            batch_doc_paragraph_level_t[b_idx] = pt_tem
        source_p_doc = self.paragraph_encoder(batch_doc_paragraph_level_s, 'paragraph')
        target_p_doc = self.paragraph_encoder(batch_doc_paragraph_level_t, 'paragraph')

        doc_s = torch.cat(tensors=(source_w_doc, source_s_doc, source_p_doc), dim=-1)
        doc_t = torch.cat(tensors=(target_w_doc, target_s_doc, target_p_doc), dim=-1)
        xf = torch.cat(tensors=(doc_s, doc_t), dim=-1)
        uf = torch.relu(self.fd(xf))
        y = torch.sigmoid(self.ff(uf))
        return y
