import torch
import torch.nn as nn
import pickle
import numpy as np
import math

from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence

class MeanAggregator(nn.Module):
    def __init__(self):
        super(MeanAggregator, self).__init__()

    def forward(self, seq_embed, mask):
        batch_size = seq_embed.shape[0]
        return torch.sum(seq_embed, dim=1) / torch.sum(mask, dim=-1).view(batch_size, 1)


class DotProductAttention(nn.Module):
    def __init__(self, d_model):
        super(DotProductAttention, self).__init__()
        self.project = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.Tanh(),
            nn.Linear(d_model, 1, bias=False)
        )
        self.softmax = nn.Softmax(dim=2)

    def forward(self, seq_embed, mask=None):
        batch_size = seq_embed.shape[0]
        max_length = seq_embed.shape[1]
        embed_size = seq_embed.shape[2]
        score = self.project(seq_embed).view(batch_size, 1, max_length)
        if mask is not None:
            mask = torch.where(mask == 1, torch.tensor(float('-inf')).cuda(), torch.tensor(0.).cuda())
            score = score + mask
        scaled_score = self.softmax(score)
        # print(scaled_score)
        return torch.bmm(scaled_score, seq_embed)


class TweetEncoder(nn.Module):
    def __init__(self, d_model, dataset):
        super(TweetEncoder, self).__init__()
        self.dataset = dataset
        self.d_model = d_model
        self.dropout = nn.Dropout(0.2)
        self.rnn = nn.GRU(input_size=d_model,
                          hidden_size=d_model,
                          batch_first=True,
                          bidirectional=True)

        word_embed_mtx = self.load_word_embed(self.dataset)
        self.word_embed = nn.Embedding(word_embed_mtx.shape[0],
                                       word_embed_mtx.shape[1]).from_pretrained(word_embed_mtx)

        self.aggregate = DotProductAttention(d_model * 2)
        # self.aggregate = MeanAggregator()

    def load_word_embed(self, dataset):
        word_embed_dict = pickle.load(open('../dataset/{}/word_emb_dict_nostop.pkl'.format(dataset), 'rb'))
        word_embed = torch.zeros(len(word_embed_dict) + 1, self.d_model)
        word_embed[1:, ] = torch.from_numpy(np.array(list(word_embed_dict.values()))).float()
        return word_embed

    def forward(self, tweets):
        batch_size = tweets.shape[0]
        max_length = tweets.shape[1]

        input_embed = self.word_embed(tweets)
        input_embed = self.dropout(input_embed)

        tweets_length = torch.sum((tweets != 0), dim=-1).cpu()
        input_embed = pack_padded_sequence(input_embed, tweets_length, batch_first=True, enforce_sorted=False)
        out_embed, _ = self.rnn(input_embed)
        out_embed, lens_unpacked = pad_packed_sequence(out_embed, batch_first=True)
        # print(tweets_length)
        mask = torch.ones(batch_size, 1, max_length).cuda()
        for i, length in enumerate(tweets_length):
            mask[i, :,:length] = 0

        out_embed = self.dropout(out_embed)
        out = self.aggregate(out_embed, mask)
        return out


class TweetsEncoder(nn.Module):
    def __init__(self, d_model):
        super(TweetsEncoder, self).__init__()
        self.base_encoder = TweetEncoder(d_model, 'cmu')
        self.dropout = nn.Dropout(0.2)
        self.aggregate = DotProductAttention(d_model * 4)
        self.rnn = nn.GRU(input_size=d_model * 2,
                          hidden_size=d_model * 2,
                          batch_first=True,
                          bidirectional=True)

    def forward(self, tweets, tweets_num):
        tweets_encoding = self.base_encoder(tweets)
        tweets_encoding = self.dropout(tweets_encoding)

        batch_size = len(tweets_num)
        max_length = max(tweets_num)
        embed_size = tweets_encoding.shape[-1]
        batch_tweets_encoding = torch.zeros(batch_size, max_length, embed_size).cuda()

        tweet_idx = 0
        for user_idx, num in enumerate(tweets_num):
            user_tweets_encoding = tweets_encoding[tweet_idx: tweet_idx + num].view(num, embed_size)
            batch_tweets_encoding[user_idx, :num, :] = user_tweets_encoding
            tweet_idx += num

        assert tweet_idx == tweets.shape[0]
        batch_tweets_encoding = pack_padded_sequence(batch_tweets_encoding, torch.LongTensor(tweets_num), batch_first=True, enforce_sorted=False)
        user_embed, _ = self.rnn(batch_tweets_encoding)
        user_embed, lens_unpacked = pad_packed_sequence(user_embed, batch_first=True)
        # print(tweets_length)
        mask = torch.ones(batch_size, 1, max_length).cuda()
        for i, length in enumerate(tweets_num):
            mask[i, :, :length] = 0

        user_embed = self.dropout(user_embed)
        out = self.aggregate(user_embed, mask)
        out = out.view(batch_size, out.shape[-1])

        return out
