from typing import List, Dict

import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data as tud
from torch import nn

C = 3  # context window
K = 15  # number of negative samples

# I don't know if this works, because I use cpu as the processor
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


class WordEmbeddingDataset(tud.Dataset):
    def __init__(self,
                 text: List[str],
                 word2index: Dict[str, int],
                 index2word: List[str],
                 word_frequencies: np.array,
                 word_counts: np.array):
        """
        WordEmbeddingDataset
        :param text: corpus text to create the dataset. It has to be separated
        :param word2index: words to indexes
        :param index2word: indexes to words
        :param word_frequencies: word frequencies
        :param word_counts: number of words
        """
        super(WordEmbeddingDataset, self).__init__()
        print('device: %s' % device)
        self.text_encoded = [word2index.get(word, word2index['<UNK>']) for word in text]
        self.text_encoded = torch.tensor(self.text_encoded).long()
        self.word2index = word2index
        self.index2word = index2word
        self.word_frequencies = torch.tensor(word_frequencies)
        self.word_counts = torch.tensor(word_counts)

    def __len__(self):
        """
        size of the WordEmbeddingDataset
        :return: size
        """
        return len(self.text_encoded)

    def __getitem__(self, index: int) -> (torch.Tensor, torch.Tensor, torch.Tensor):
        """
        get center words, positive words and negative words from the dataset
        :param index: index of center word
        :return: as above saying
        """
        center_word = self.text_encoded[index]
        positive_indices = list(range(index - C, index)) + list(range(index + 1, index + C - 1))
        positive_indices = [i % len(self.text_encoded) for i in positive_indices]
        positive_words = self.text_encoded[positive_indices]  # type: torch.Tensor()

        negative_words = torch.multinomial(self.word_frequencies, K * positive_words.shape[0], True)
        return center_word, positive_words, negative_words


class WordEmbeddingModel(nn.Module):
    def __init__(self, vocab_size: int, embedding_size: int):
        super(WordEmbeddingModel, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size

        self.in_embed = nn.Embedding(self.vocab_size, self.embedding_size).to(device)

    def forward(self, input_labels, pos_labels, neg_labels):
        input_embedding = self.in_embed(input_labels).to(device)
        pos_embedding = self.in_embed(pos_labels).to(device)
        neg_embedding = self.in_embed(neg_labels).to(device)

        input_embedding = input_embedding.unsqueeze(2)

        pos_dot = torch.bmm(pos_embedding, input_embedding)
        pos_dot = pos_dot.squeeze(2)

        neg_dot = torch.bmm(neg_embedding, -input_embedding)
        neg_dot = neg_dot.squeeze(2)

        log_pos = F.logsigmoid(pos_dot).sum(1)
        log_neg = F.logsigmoid(neg_dot).sum(1)

        loss = log_pos + log_neg

        return -loss

    def input_embedding(self):
        return self.in_embed.weight.detach_().numpy()
