from collections import Counter, defaultdict
import csv
import os
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
# import torch.utils.data as data
# import tarfile
import time
import random
import math


# the length of N gram
# p(wi|wi_1, wi_2)p(wi_1|wi_2, wi_3)...
N_GRAM = 2
EMBED_SIZE = 256

w2i = defaultdict(lambda: len(w2i))
S = w2i['<s>']
UNK = w2i['<unk>']


def read_data(filename):
    for l in open(filename):
        yield [w2i[w] for w in l.strip().split(' ')]


class LogLinLM(nn.Module):
    def __init__(self):
        super(LogLinLM, self).__init__()
        self.embed1 = nn.Embedding(N_WORDS, EMBED_SIZE, sparse=True)
        self.linear1 = nn.Linear(EMBED_SIZE * N_GRAM, 100)
        self.linear2 = nn.Linear(100, N_WORDS)

    def forward(self, x):
        # print(x)
        emb1 = self.embed1(Variable(torch.LongTensor(x))).view((1, -1))
        # print(emb1)
        l1 = F.relu(self.linear1(emb1))
        l2 = self.linear2(l1)
        return l2


def generate(model):
    hist = [S] * N_GRAM
    prob = model(hist)


if __name__ == '__main__':
    train_data = list(read_data('../data/ptb/train.txt'))
    w2i = defaultdict(lambda: UNK, w2i)

    dev_data = list(read_data('../data/ptb/valid.txt'))
    i2w = {v: k for k, v in w2i.items()}

    N_WORDS = len(w2i)

    log_lin_lm = LogLinLM()
    opt = optim.Adagrad(log_lin_lm.parameters(), lr=1e-3)
    loss_func = nn.CrossEntropyLoss()
    length = len(train_data)
    print(log_lin_lm([1, 2]))
    n_words = 0

    # todo add batch
    for epoch in range(100):
        start = time.time()
        total_loss = 0
        random.shuffle(train_data)
        # print(bow.embed[Variable(torch.LongTensor([w2i['we']]))])

        total_loss = 0
        for sent_id, sent in enumerate(train_data):
            loss = 0
            opt.zero_grad()
            for ngram, target in zip(zip(sent, sent[1:] + [S]), sent[2:] + [S] * N_GRAM):
                out = log_lin_lm(ngram)
                tag = Variable(torch.LongTensor([target]))
                loss += loss_func(out, tag)
                n_words += 1
            loss.backward()
            opt.step()

            if (sent_id + 1) % 5000 == 0:
                print('--finished %r sentences, loss: %.4f, time: %.2f' % (sent_id + 1, loss.data[0], time.time() - start))
            total_loss += loss.data[0]
        print('Epoch: %d, train_loss/word: %.4f, ppl: %.2f, time: %.2f' % (epoch, total_loss/n_words, math.exp(total_loss / n_words), time.time() - start))
