from typing import Union, List, Optional, Any

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.lr_scheduler as lrs
from pytorch_lightning import LightningModule

from pytorch_lightning.utilities.types import STEP_OUTPUT


class DeepLstm(LightningModule):
    def __init__(self, config):
        super(DeepLstm, self).__init__()
        self.config = config

        # 1. Word Embedding Layer
        # initialize word embedding
        self.word_emb = nn.Embedding(config.vocab_size, config.word_dim)

        # 2. Contextual Embedding Layer
        self.modeling_LSTM1 = nn.LSTM(input_size=config.word_dim,
                                      hidden_size=config.hidden_size,
                                      bidirectional=True,
                                      batch_first=True,
                                      dropout=config.dropout)

        self.modeling_LSTM2 = nn.LSTM(input_size=config.hidden_size * 2,
                                      hidden_size=config.hidden_size,
                                      bidirectional=True,
                                      batch_first=True,
                                      dropout=config.dropout)

        # 3. Output Layer
        self.classifier = nn.Linear(config.hidden_size * 4, config.n_entities)

        self.dropout = nn.Dropout(p=config.dropout)

    def forward(self, batch):
        # TODO: More memory-efficient architecture
        def output_layer(m):
            """
            :param m: (batch, c_len ,hidden_size * 2)
            :return: p1: (batch, c_len)
            """
            # (batch, c_len)
            p1 = self.classifier(m)
            return p1

        # 1. Word Embedding Layer
        passage = self.word_emb(batch.context)
        question = self.word_emb(batch.question)
        c_word = torch.cat((passage, question), 1)

        # 2. Modeling Layer
        m, (h, c) = self.modeling_LSTM1(c_word)
        first_layer = m[:, -1, :]
        m = self.modeling_LSTM2(m, (h, c))
        # 3. Output Layer
        last_layer = m[0][:, -1, :]
        concat = torch.cat((first_layer, last_layer), dim=-1)
        p1 = output_layer(concat)

        # (batch, c_len), (batch, c_len)
        return p1


class AttentiveReader(LightningModule):
    def __init__(self, conifg):
        super(AttentiveReader, self).__init__()
        self.args = conifg

        # 1. Word Embedding Layer
        # initialize word embedding
        self.word_emb = nn.Embedding(conifg.vocab_size, conifg.word_dim)

        # 2. Contextual Embedding Layer
        self.modeling_doc_LSTM = nn.LSTM(input_size=conifg.word_dim,
                                         hidden_size=conifg.hidden_size,
                                         bidirectional=True,
                                         batch_first=True,
                                         dropout=conifg.dropout)

        self.modeling_query_LSTM = nn.LSTM(input_size=conifg.word_dim,
                                           hidden_size=conifg.hidden_size,
                                           bidirectional=True,
                                           batch_first=True,
                                           dropout=conifg.dropout)

        # 3. Output Layer
        # self.classifier = nn.Linear(args.hidden_size * 4, args.n_entities)
        # 第二个模型用下面的，第一个模型用上面的
        self.classifier = nn.Linear(conifg.hidden_size, conifg.n_entities)

        self.dropout = nn.Dropout(p=conifg.dropout)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, batch):
        # TODO: More memory-efficient architecture
        def output_layer(m):
            """
            :param m: (batch, c_len ,hidden_size * 2)
            :return: p1: (batch, c_len)
            """
            p1 = self.classifier(m)
            return p1

        # 1. Word Embedding Layer
        passage = self.word_emb(batch.context)
        question = self.word_emb(batch.question)

        # 2. Modeling Layer, passage_hidden(b, lens, hidden*2)，因为是与前面每一个时刻进行attention，所以需要保留所有时刻的向量值
        passage_hidden, (h, c) = self.modeling_doc_LSTM(passage)
        passage_hidden_transfer = passage_hidden.reshape(-1, passage_hidden.size()[-1])

        question_hidden, (h, c) = self.modeling_query_LSTM(question)
        # q_vector (b, hidden*2), 取得最后一位向量，用来表示全局向量，所以中间是-1
        q_vector = question_hidden[:, -1, :]

        # 3. attention
        W_y = torch.randn((self.args.hidden_size * 2, 1))
        W_u = torch.randn((self.args.hidden_size * 2, 1))
        add_1 = torch.mm(passage_hidden_transfer, W_y).reshape(passage_hidden.size()[0], passage_hidden.size()[1], -1)
        add_2 = torch.mm(q_vector, W_u).unsqueeze(1)
        # m_t (b, len, 1)
        m_t = F.tanh(add_1 + add_2)
        # s_t (b, len, 1), 这里就相当于打分了
        s_t = self.softmax(m_t)
        # r (b, hidden*2)
        r = torch.sum(s_t * passage_hidden, dim=1)

        W_rg = torch.randn((self.args.hidden_size * 2, self.args.hidden_size))
        W_ug = torch.randn((self.args.hidden_size * 2, self.args.hidden_size))
        add_1 = torch.mm(r, W_rg)
        add_2 = torch.mm(q_vector, W_ug)
        # g (b, hidden), r是加了q的attention的passage表示，q是lstm直接出来的表示
        g = F.tanh(add_1 + add_2)

        # 4. Output Layer
        p1 = output_layer(g)

        # (batch, c_len), (batch, c_len)
        return p1


class ImpatientReader(LightningModule):
    def __init__(self, config):
        super(ImpatientReader, self).__init__()
        self.config = config

        # 1. Word Embedding Layer
        # initialize word embedding
        self.word_emb = nn.Embedding(config.vocab_size, config.word_dim)

        # 2. Contextual Embedding Layer
        self.modeling_doc_LSTM = nn.LSTM(input_size=config.word_dim,
                                         hidden_size=config.hidden_size,
                                         bidirectional=True,
                                         batch_first=True,
                                         dropout=config.dropout)

        self.modeling_query_LSTM = nn.LSTM(input_size=config.word_dim,
                                           hidden_size=config.hidden_size,
                                           bidirectional=True,
                                           batch_first=True,
                                           dropout=config.dropout)

        # 3. Output Layer
        # self.classifier = nn.Linear(args.hidden_size * 4, args.n_entities)
        # 第二个模型用下面的，第一个模型用上面的
        self.classifier = nn.Linear(config.hidden_size, config.n_entities)

        self.dropout = nn.Dropout(p=config.dropout)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, batch):
        # TODO: More memory-efficient architecture
        def output_layer(m):
            """
            :param m: (batch, c_len ,hidden_size * 2)
            :return: p1: (batch, c_len)
            """
            p1 = self.classifier(m)
            return p1

        # 1. Word Embedding Layer
        passage = self.word_emb(batch.context)
        question = self.word_emb(batch.question)

        # 2. Modeling Layer, passage_hidden(b, lens, hidden*2)
        passage_hidden, (h, c) = self.modeling_doc_LSTM(passage)
        passage_hidden_transfer = passage_hidden.reshape(-1, passage_hidden.size()[-1])

        question_hidden, (h, c) = self.modeling_query_LSTM(question)
        # q_vector (b, hidden*2)
        u = question_hidden[:, -1, :]

        # 3. attention
        # 初始化参数
        W_dm = torch.randn((self.config.hidden_size * 2, 1))
        W_rm = torch.randn((self.config.hidden_size * 2, 1))
        W_qm = torch.randn((self.config.hidden_size * 2, 1))
        W_rr = torch.randn((self.config.hidden_size * 2, self.config.hidden_size * 2))
        r = torch.randn((question_hidden.size()[0], self.config.hidden_size * 2))
        # 本质上是将question的每一个token算一下attention打分，然后将每一次的打分进行融合。
        for i in range(question_hidden.size()[1]):
            # add_1 (b, len, 1), add_2 (b, 1, 1)
            add_1 = torch.mm(passage_hidden_transfer, W_dm).reshape(passage_hidden.size()[0], passage_hidden.size()[1],
                                                                    -1)
            add_2 = torch.mm(question_hidden[:, i, :], W_qm).unsqueeze(1)
            # m_t (b, len, 1)
            m_t = F.tanh(add_1 + torch.mm(r, W_rm).unsqueeze(1) + add_2)
            # s_t (b, len, 1)

            s_t = self.softmax(m_t)
            # r (b, hidden*2)
            r = torch.sum(s_t * passage_hidden, dim=1) + F.tanh(torch.mm(r, W_rr))

        W_rg = torch.randn((self.config.hidden_size * 2, self.config.hidden_size))
        W_ug = torch.randn((self.config.hidden_size * 2, self.config.hidden_size))
        add_1 = torch.mm(r, W_rg)
        add_2 = torch.mm(u, W_ug)
        # g (b, hidden)
        g = F.tanh(add_1 + add_2)

        # 4. Output Layer
        p1 = output_layer(g)

        # (batch, c_len), (batch, c_len)
        return p1


