#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2021/9/14 12:40 上午
# @Author  : khan_long
# @Email   : longkehan15@qq.com
# @File    : model.py
# @Software: PyCharm
import torch
import torch.nn as nn
from torch.nn import Parameter, Linear
import torch.nn.functional as F
from allennlp.modules import FeedForward
from torchtext.vocab import GloVe

NUM_SECTIONS = 5
WITH_ELMO = False

GLOVE_DIMENSION = 100
ELMo_DIMENSION = 1024

LSTM_HIDDEN_SIZE = 50
LSTM_NUM_LAYERS = 2
LSTM_DROP_OUT = 0.3

FW_NUM_LAYERS = 2
FW_NUM_NODES = 20


def new_parameter(*size):
    out = Parameter(torch.FloatTensor(*size))
    torch.nn.init.xavier_normal_(out)
    return out


class Attention(nn.Module):
    """ Simple multiplicative attention"""

    def __init__(self, attention_size):
        super(Attention, self).__init__()
        self.attention = new_parameter(attention_size, 1)

    def forward(self, x_in, reduction_dim=-2, return_attn_distribution=False):
        """
        return_attn_distribution: if True it will also return the original attention distribution

        this reduces the one before last dimension in x_in to a weighted sum of the last dimension
        e.g., x_in.shape == [64, 30, 100] -> output.shape == [64, 100]
        Usage: You have a sentence of shape [batch, sent_len, embedding_dim] and you want to
            represent sentence to a single vector using attention [batch, embedding_dim]

        Here we use it to aggregate the lexicon-aware representation of the sentence
        In two steps we convert [batch, sent_len, num_words_in_category, num_categories] into [batch, num_categories]
        """
        # calculate attn weights
        attn_score = torch.matmul(x_in, self.attention).squeeze()
        # add one dimension at the end and get a distribution out of scores
        attn_distrib = F.softmax(attn_score.squeeze(), dim=-1).unsqueeze(-1)
        scored_x = x_in * attn_distrib
        weighted_sum = torch.sum(scored_x, dim=reduction_dim)
        if return_attn_distribution:
            return attn_distrib.reshape(x_in.shape[0], -1), weighted_sum
        else:
            return weighted_sum


class BiLSTMAttention(nn.Module):
    def __init__(self, hidden_size=LSTM_HIDDEN_SIZE, num_layers=LSTM_NUM_LAYERS, num_directions=2,
                 lstm_dropout=LSTM_DROP_OUT, with_elmo=WITH_ELMO, num_sections=NUM_SECTIONS):
        super(BiLSTMAttention, self).__init__()

        self.num_sections = num_sections
        self.with_elmo = with_elmo
        self.num_directions = num_directions
        if with_elmo:
            self.embedding_size = GLOVE_DIMENSION + ELMo_DIMENSION
        else:
            self.embedding_size = GLOVE_DIMENSION

        self.lstm_hidden_size = hidden_size
        self.lstm_num_layers = num_layers
        self.lstm_dropout = lstm_dropout

        self.lstm = nn.LSTM(input_size=self.embedding_size, hidden_size=self.lstm_hidden_size,
                            num_layers=self.lstm_num_layers, bidirectional=(self.num_directions == 2),
                            dropout=self.lstm_dropout)

        self.lstm_output_dim = self.num_directions * self.lstm_hidden_size

        self.attention_seq2seq = Attention(self.lstm_output_dim)

        self.feedforward_s = FeedForward(input_dim=self.lstm_output_dim, num_layers=FW_NUM_LAYERS,
                                         hidden_dims=[FW_NUM_NODES, self.num_sections], activations=['relu', 'linear'],
                                         dropout=[0.2, 0.0])

        self.feedforward_w = FeedForward(input_dim=self.lstm_output_dim, num_layers=FW_NUM_LAYERS,
                                         hidden_dims=[FW_NUM_NODES, 2], activations=['relu', 'linear'],
                                         dropout=[0.2, 0.0])

    def forward(self, x):

        # words to embeddings
        # glove

        if self.with_elmo:
            pass
