import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import random
'''
1. split data into train/val/test, 80%  train, 10% val, 10% test
'''
@torch.no_grad()
def calc_full_loss(C, W, b, W2, b2, Y, X):
    emb = C[X]
    logits1  = emb.view(-1, 30) @ W + b
    mean = logits1.mean(0, keepdim=True)
    stddeviation = logits1.std(0, keepdim=True)
    # batch normalize, keep logits1 guassian
    logits1 = bngain * (logits1 - bnmean_running) / (bnstd_running) + bnbias
    h = torch.tanh(logits1)
    logits = h @ W2 + b2
    return F.cross_entropy(logits, Y)
    

def build_dataset(words):
    X, Y = [], []
    for w in words:
            #print(w)
            context = [0] * block_size
            for ch in w + '.':
                idx = stoi[ch]
                X.append(context)
                Y.append(idx)
                #print(''.join(itos[i] for i in context), '--->', itos[idx])
                context = context[1:] + [idx]

    X =   torch.tensor(X)
    Y =   torch.tensor(Y)
    print(X.shape, Y.shape)
    return X, Y

def draw_embeddings(C, stoi):
    plt.figure(figsize=(8,8))
    plt.scatter(C[:,0].data, C[:,1].data, s=200)
    for i in range(C.shape[0]):
        plt.text(C[i,0].item(), C[i,1].item(), itos[i], ha="center", va="center", color="white")
    plt.grid("minor")

def inference(C, W, b, W2, b2, g, block_size):
    out = []
    context = [0] * block_size
    while True:
        emb = C[torch.tensor([context])]
        logits1 = emb.view(-1, 30) @ W + b
        logits1 = bngain * (logits1 - bnmean_running) / (bnstd_running) + bnbias
        h = torch.tanh(logits1)
        logits = h @ W2 + b2
        probs = F.softmax(logits, dim=1)
        idx = torch.multinomial(probs, num_samples=1, generator=g).item()
        context = context[1:] + [idx]
        out.append(idx)
        if idx == 0:
            break
    return ''.join(itos[i] for i in out)

if __name__ == '__main__':
    words = open('names.txt', 'r').read().splitlines()
    print(len(words))
    chars = sorted(list(set(''.join(words))))
    chars = ['.'] + chars
    stoi = {s:i for i,s in enumerate(chars)}
    itos = {s:i for i,s in stoi.items()}    
    block_size = 3
    batch_size = 32
    context = [0] * block_size
    random.seed(42)
    random.shuffle(words)
    n = len(words)
    n1 = int (0.8*n)
    n2 = int (0.9*n)
    X, Y = build_dataset(words[:n1])
    Xval, Yval = build_dataset(words[n1:n2])
    Xtest, Ytest = build_dataset(words[n2:])
    
    g = torch.Generator().manual_seed(2147483647)
    C = torch.randn((len(stoi), 10), generator=g)
    W=torch.randn((30, 200), generator=g)
    b = torch.randn(200, generator=g)
    W2=torch.randn((200, len(stoi)), generator=g)
    b2  = torch.randn(len(stoi), generator=g)
    bngain  = torch.ones((1,200))
    bnbias  = torch.zeros((1,200))
    bnmean_running = torch.zeros((1,200))
    bnstd_running = torch.ones((1,200))
    params = [C, W, b, W2, b2, bngain, bnbias]
    #lre = torch.linspace(-3, 0, 1000)
    #lrs = 10**lre
    #lri = []
    lossi = []
    stepi = []
    for p in params:
        p.requires_grad = True
    for i in range(200000):
        idx =  torch.randint(0, X.shape[0], (batch_size,))
        embedding = C[X[idx]]
        #embcat = torch.cat([embedding[:, 0, :], embedding[:, 1, :], embedding[:, 2, :]], 1)
        #embcat = torch.cat(torch.unbind(embedding, 1), 1)
        embcat = embedding.view(-1, 30)
        logits1 = embcat @ W + b
        mean = logits1.mean(0, keepdim=True)
        stddeviation = logits1.std(0, keepdim=True)
        # batch normalize, keep logits1 guassian
        logits1 = bngain * (logits1 - mean) / (stddeviation) + bnbias
        with torch.no_grad():
            bnmean_running = 0.999 * bnmean_running + 0.001 * mean
            bnstd_running = 0.999 * bnstd_running + 0.001 * stddeviation
        h = torch.tanh(logits1)
        
        logits2 = h @ W2 + b2
        #mean2  = logits2.mean(0, keepdim=True)
        #stddeviation2 = logits2.std(0, keepdim=True)
        #counts = logits2.exp()
        #probs = counts / counts.sum(1, keepdims=True)
        #loss = -probs[torch.arange(Y.shape[0]), Y].log().mean()
        loss = F.cross_entropy(logits2, Y[idx])
        #print(logits2.shape)
        lr = 0.1 if i < 100000 else 0.01
        print("loss:", loss.item())
        #print("full  loss:", calc_full_loss(C, W, b, W2, b2, Y).item())
        for p in params:
            p.grad = None
        loss.backward()
        for p in params:
            p.data += -lr * p.grad
        #lri.append(lre[i])
        lossi.append(loss.log10().item())
        stepi.append(i)
    plt.plot(stepi, lossi)
    #draw_embeddings(C, stoi)
    #plt.show()
    loss  = calc_full_loss(C, W, b, W2, b2, Yval, Xval)
    train_loss = calc_full_loss(C, W, b, W2, b2, Y, X)
    print("train loss:", train_loss.item())
    print("val  loss:", loss.item())

    #for _ in range(10):
    #    out = inference(C, W, b, W2, b2, g, block_size)
    #    print(out)