import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F 
def show_image(n, itos):
    plt.figure(figsize=(16,16))
    plt.imshow(n, cmap='Blues')
    for i in range(27):
        for j in range(27):
            chstr  = itos[i] + itos[j]
            plt.text(j, i, chstr, ha='center', va='bottom', color='gray')
            plt.text(j, i, n[i,j].item(), ha='center', va='top', color= 'gray')
    plt.axis('off')
    plt.show()
    
# words here can be any string you want to measure
def bigram_loss(words, N, stoi, itos):
    log_likelihood = 0
    n = 0
    for w in words:
        chs = ['.'] + list(w) + ['.']
        for ch1, ch2 in zip(chs, chs[1:]):
            n += 1
            log_likelihood += torch.log(N[stoi[ch1], stoi[ch2]])
    return -log_likelihood/n

def bigram_modelV1(words):
    chars = sorted(list(set(''.join(words))))
    chars  =  ['.'] + chars 
    stoi = {s:i for i,s in enumerate(chars)}
    itos = {i:s for s,i in stoi.items()}
    n = torch.zeros((27, 27), dtype=torch.int32)

    for w in words:
        chs = ['.'] + list(w) + ['.']
        for ch1, ch2 in zip(chs, chs[1:]):
            n[stoi[ch1], stoi[ch2]] += 1
    N = (n + 1).float() # model smoothing, plus one
    N = N/N.sum(dim=1, keepdim=True)
    loss = bigram_loss(words, N, stoi, itos)
    print(loss)
    lossTest = bigram_loss("gangafdjq", N, stoi, itos)
    print(lossTest)
    idx = 0
    result = ''
    g = torch.Generator().manual_seed(2147483647)
    for  i in range(10):
        idx = 0
        while True:
            p = N[idx]
            idx = torch.multinomial(p, num_samples=1, replacement=True, generator=g).item()
            if idx == 0:
                break
            result  += itos[idx]
        #print(result)
        result = ''

def bigram_modelV2(words):
    chars = sorted(list(set(''.join(words))))
    chars  =  ['.'] + chars 
    stoi = {s:i for i,s in enumerate(chars)}
    itos = {i:s for s,i in stoi.items()}
    n = torch.zeros((27, 27), dtype=torch.int32)
    xs  = []
    ys = []
    for w in words:
        chs = ['.'] + list(w) + ['.']
        for ch1, ch2 in zip(chs, chs[1:]):
            xs.append(stoi[ch1])
            ys.append(stoi[ch2])
    xs = torch.tensor(xs)  
    ys = torch.tensor(ys)
    W = torch.randn((27, 27), requires_grad=True)
    for k  in range(50):
        xenc  = F.one_hot(xs, num_classes=27).float()
        logits = xenc @ W
        counts = logits.exp()
        probs = counts / counts.sum(dim=1, keepdim=True)
        loss = -probs[torch.arange(xs.shape[0]), ys].log().mean() + 0.01 * (W**2).mean()
        print(loss.item())
        #backward pass
        W.grad = None
        loss.backward()
        #update
        W.data += -50 * W.grad
#  sample/inference
    for i in range(5):
        out = []
        ix = 0
        while True:
            xenc = F.one_hot(torch.tensor([ix]), num_classes=27).float()
            logits = xenc @ W
            counts = logits.exp()
            p = counts / counts.sum(dim=1, keepdim=True)
            ix = torch.multinomial(p, num_samples=1, replacement=True).item()
            out.append(itos[ix])
            if ix == 0:
                break
        print(''.join(out))
if __name__ == '__main__':
    words = open('names.txt').read().splitlines()
    bigram_modelV2(words)
    