from collections import Counter
import numpy as np
import torch
from networkx.algorithms.distance_measures import center
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F

learning_rate = 1e-3
max_epochs = 2
K = 2
C = 2
root = "data/text8.train.txt"
VOCAB_SIZE = 30000
UNK = "UNK"

def split_word(text):
    text_list = text.split(" ")
    return text_list

def read_data(path):
    with open(path, "r", encoding="utf-8") as f:
        text = f.readline()
        text_list = split_word(text)
    return text_list

class SkipGramDataset(Dataset):
    def __init__(self, text_list, word2idx, word_freq):
        super(SkipGramDataset, self).__init__()
        text_index = [word2idx.get(word, word2idx[UNK]) for word in text_list]
        self.text_index = torch.tensor(text_index)
        self.word2idx = word2idx
        self.word_freq = torch.tensor(word_freq)

    def __getitem__(self, index):
        # index : 100 , 98, 99, 101, 102
        center_word = self.text_index[index]
        pos_index = list(range(index - C, index)) + list(range(index + 1, index + C + 1))
        pos_words = self.text_index[pos_index]
        neg_words = torch.multinomial(self.word_freq, C * 2 * K, replacement=False)

        return center_word, pos_words, neg_words

    def __len__(self):
        return self.text_index.size()[0]

class SkipGramModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        self.input_embedding = nn.Embedding(vocab_size, embedding_dim)
        self.output_embedding = nn.Embedding(vocab_size, embedding_dim)

    def forward(self, center_word, pos_words, neg_words):
        '''
            center_word: [B]
            pos_words: [B, C * 2]
            neg_words: [B, C * 2 * K]
        '''
        center_emb = self.input_embedding(center_word) # [B, d]
        pos_emb = self.output_embedding(pos_words) # [B, C * 2, d]
        neg_emb = self.output_embedding(neg_words) # [B, C * 2 * K, d]

        # pos_emb: [B, C * 2, d] * center_emb: [B, d, 1] bmm = [B, 2 * C]
        pos_logits = torch.bmm(pos_emb, center_emb.unsqueeze(2)).squeeze()
        # neg_emb: [B, C * 2 * K, d] * center_emb: [B, d, 1] bmm = [B, 2 * C * K]
        neg_logits = torch.bmm(neg_emb, center_emb.unsqueeze(2)).squeeze()

        pos_logits = F.logsigmoid(pos_logits) # [B, 2 * C]
        neg_logitis = F.logsigmoid(-neg_logits) # [B, 2 * C * K]

        pos_loss = torch.sum(pos_logits, dim=1) # [B]
        neg_loss = torch.sum(neg_logits, dim=1) # [B]

        loss = -(pos_loss.mean() + neg_loss.mean())
        return loss

def train(dataloader, model):
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    for epoch in range(max_epochs):
        for step, batch in enumerate(dataloader):
            center_word, pos_words, neg_words = batch
            loss = model(center_word, pos_words, neg_words)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

if __name__ == '__main__':
    text_list = read_data(root)
    text_dict = Counter(text_list).most_common(VOCAB_SIZE)
    text_dict = dict(text_dict)

    # unk_count = len(text_list) - np.sum(np.array(text_dict.values()))
    tmp = list(text_dict.values())
    tmp = np.array(tmp)
    tmp = np.sum(tmp)

    unk_count = len(text_list) - tmp
    text_dict[UNK] = unk_count

    # idx2word = ['a', 'the', ...]
    # word2idx = {'a' : 123, 'the' : 100, ...}
    idx2word = [k for k in text_dict.keys()]
    word2idx = {v:k for k, v in enumerate(idx2word)}

    word_freq = text_dict

    print("finish")




