import numpy as np

import torch as T
from torch.autograd import Variable

def genX(path, id, cnt):
    X = np.zeros((cnt, cnt), dtype=np.float64)
    with open(path, "r") as f:
        for row in f.readlines():
            r = ''.join(row.split())
            n = len(r)
            for i in range(n):
                ch1 = r[i]
                if id[ch1] < cnt:
                    for ch2 in r[max(0, i-10) : min(n, i+11)]:
                        if id[ch2] < cnt:
                            X[id[ch1], id[ch2]] += 1
    return X

class Glove():

    def __init__(self, dictSize, embDim):
        self.dictSize = dictSize
        self.embDim = embDim

        self.xmax = 200
        self.alpha = 0.75
        self.lr = 0.1
        
        self.w = Variable(T.rand(dictSize, embDim), requires_grad=True)
        self.w_ = Variable(T.rand(dictSize, embDim), requires_grad=True)
        
        self.b = Variable(T.rand(dictSize, 1),requires_grad=True)
        self.b_ = Variable(T.rand(1, dictSize), requires_grad=True)

        self.optim = T.optim.Adagrad([self.w, self.w_, self.b, self.b_], lr=self.lr)

        T.nn.init.normal_(self.w)
        T.nn.init.normal_(self.w_)
        T.nn.init.normal_(self.b)
        T.nn.init.normal_(self.b_)

    def load(self, path):
        state = T.load(path)
        self.w = state['net']['w']
        self.w_ = state['net']['w_']
        self.b = state['net']['b']
        self.b_ = state['net']['b_']
        #self.optim.load_state_dict(state['optimizer'])
        self.optim = T.optim.Adagrad([self.w, self.w_, self.b, self.b_], lr=self.lr)
        return state["epoch"]

    def save(self, path, epoch):
        state = {
            'net': {
                'w': self.w,
                'w_': self.w_,
                'b': self.b,
                'b_': self.b_
            },
            'optimizer': self.optim.state_dict(),
            'epoch': epoch
        }
        T.save(state, path % epoch)

    def trainStep(self, X):
        X = T.tensor(X)
        self.optim.zero_grad()

        # forward
        f = T.clamp((X / self.xmax) ** self.alpha, 0, 1)
        y = T.log(1 + X)
        b = self.b.expand(-1, self.dictSize)
        b_ = self.b_.expand(self.dictSize, -1)
        w_ = self.w_.transpose(0, 1)
        v = self.w @ w_ + b + b_
        d = v - y

        loss = (f * d * d).mean()

        #backward
        loss.backward()
        self.optim.step()

        return loss

    def eval(self, word):
        if word >= self.dictSize:
            word = 0
        return self.w[word, :] + self.w_[word, :]

    def batchEval(self, words):
        res = T.zeros((len(words), len(words[0]), self.embDim))
        for i in range(len(words)):
            for j in range(len(words[i])):
                res[i, j] = self.eval(words[i][j])
        return res
