import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
# import torchtext.data as data
from collections import defaultdict
import torch.optim as optim
import random
# import torch.nn.functional as F

random.seed(1234)
torch.manual_seed(1234)

EMB_SIZE = 64
# functions to read the corpus
# word to index; tag to index
w2i = defaultdict(lambda: len(w2i))
t2i = defaultdict(lambda: len(t2i))

UNK = w2i["<unk>"]
train_path = '../data/classes/train.txt'
dev_path = '../data/classes/test.txt'


def read_data(filename):
    with open(filename) as fd:
        for l in fd:
            tag, words = l.lower().strip().split('|||')
            yield ([[w2i[x] for x in words.split(" ")]], t2i[tag])


class CBOW(nn.Module):
    def __init__(self):
        super(CBOW, self).__init__()
        # self.embed = nn.Parameter(torch.rand(len(w2i), len(t2i)))
        self.embed = nn.Embedding(len(w2i), EMB_SIZE, sparse=True)
        # self.embed.weight.require_grad = True
        self.linear = nn.Linear(EMB_SIZE, len(t2i))
        # self.bias = nn.Parameter(torch.zeros(len(t2i)), requires_grad=True)
        self.softmax = nn.LogSoftmax()

    def forward(self, words):
        x = torch.LongTensor(words)
        embed = self.embed(x)
        embed = embed.sum(dim=1)
        x = self.linear(embed)
        return x


if __name__ == '__main__':

    # dataset = data.TabularDataset(path=path)
    train_data = list(read_data(train_path))
    dev_data = list(read_data(dev_path))
    bow = CBOW()
    opt = optim.SGD(bow.parameters(), lr=1e-4)
    loss_func = nn.CrossEntropyLoss()
    length = len(train_data)

    print(bow)
    for epoch in range(1000):
        total_loss = 0
        random.shuffle(train_data)
        # print(bow.embed[Variable(torch.LongTensor([w2i['we']]))])

        for d in train_data:
            tag = torch.LongTensor([d[1]])
            # tag = Variable(tag)
            out = bow(d[0])
            opt.zero_grad()
            loss = loss_func(out, tag)
            loss.backward()
            opt.step()
            total_loss += loss.data

        print(total_loss / length)
        correct = 0.0
        for d in dev_data:
            tag = d[1]
            out = bow(d[0]).data.numpy()
            predict = np.argmax(out)
            if predict == tag:
                correct += 1

        print(correct/len(dev_data))
