|
|
import torch
|
|
|
import torch.nn as nn
|
|
|
from torch.nn import functional as F
|
|
|
import gradio as gr
|
|
|
|
|
|
|
|
|
batch_size = 64
|
|
|
block_size = 64
|
|
|
n_embd = 128
|
|
|
n_head = 4
|
|
|
n_layer = 4
|
|
|
dropout = 0.2
|
|
|
device = 'cpu'
|
|
|
|
|
|
|
|
|
class Head(nn.Module):
|
|
|
def __init__(self, head_size):
|
|
|
super().__init__()
|
|
|
self.key = nn.Linear(n_embd, head_size, bias=False)
|
|
|
self.query = nn.Linear(n_embd, head_size, bias=False)
|
|
|
self.value = nn.Linear(n_embd, head_size, bias=False)
|
|
|
self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
|
|
|
self.dropout = nn.Dropout(dropout)
|
|
|
def forward(self, x):
|
|
|
B,T,C = x.shape
|
|
|
k = self.key(x)
|
|
|
q = self.query(x)
|
|
|
wei = q @ k.transpose(-2, -1) * C**-0.5
|
|
|
wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
|
|
|
wei = F.softmax(wei, dim=-1)
|
|
|
wei = self.dropout(wei)
|
|
|
v = self.value(x)
|
|
|
out = wei @ v
|
|
|
return out
|
|
|
|
|
|
class MultiHeadAttention(nn.Module):
|
|
|
def __init__(self, num_heads, head_size):
|
|
|
super().__init__()
|
|
|
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
|
|
self.proj = nn.Linear(n_embd, n_embd)
|
|
|
self.dropout = nn.Dropout(dropout)
|
|
|
def forward(self, x):
|
|
|
out = torch.cat([h(x) for h in self.heads], dim=-1)
|
|
|
out = self.proj(out)
|
|
|
return self.dropout(out)
|
|
|
|
|
|
class FeedFoward(nn.Module):
|
|
|
def __init__(self, n_embd):
|
|
|
super().__init__()
|
|
|
self.net = nn.Sequential(
|
|
|
nn.Linear(n_embd, 4 * n_embd),
|
|
|
nn.ReLU(),
|
|
|
nn.Linear(4 * n_embd, n_embd),
|
|
|
nn.Dropout(dropout),
|
|
|
)
|
|
|
def forward(self, x):
|
|
|
return self.net(x)
|
|
|
|
|
|
class Block(nn.Module):
|
|
|
def __init__(self, n_embd, n_head):
|
|
|
super().__init__()
|
|
|
head_size = n_embd // n_head
|
|
|
self.sa = MultiHeadAttention(n_head, head_size)
|
|
|
self.ffwd = FeedFoward(n_embd)
|
|
|
self.ln1 = nn.LayerNorm(n_embd)
|
|
|
self.ln2 = nn.LayerNorm(n_embd)
|
|
|
def forward(self, x):
|
|
|
x = x + self.sa(self.ln1(x))
|
|
|
x = x + self.ffwd(self.ln2(x))
|
|
|
return x
|
|
|
|
|
|
class GPTLanguageModel(nn.Module):
|
|
|
def __init__(self, vocab_size):
|
|
|
super().__init__()
|
|
|
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
|
|
|
self.position_embedding_table = nn.Embedding(block_size, n_embd)
|
|
|
self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
|
|
|
self.ln_f = nn.LayerNorm(n_embd)
|
|
|
self.lm_head = nn.Linear(n_embd, vocab_size)
|
|
|
def forward(self, idx, targets=None):
|
|
|
B, T = idx.shape
|
|
|
tok_emb = self.token_embedding_table(idx)
|
|
|
pos_emb = self.position_embedding_table(torch.arange(T, device=device))
|
|
|
x = tok_emb + pos_emb
|
|
|
x = self.blocks(x)
|
|
|
x = self.ln_f(x)
|
|
|
logits = self.lm_head(x)
|
|
|
return logits, None
|
|
|
def generate(self, idx, max_new_tokens):
|
|
|
for _ in range(max_new_tokens):
|
|
|
idx_cond = idx[:, -block_size:]
|
|
|
logits, _ = self(idx_cond)
|
|
|
logits = logits[:, -1, :]
|
|
|
probs = F.softmax(logits, dim=-1)
|
|
|
idx_next = torch.multinomial(probs, num_samples=1)
|
|
|
idx = torch.cat((idx, idx_next), dim=1)
|
|
|
return idx
|
|
|
|
|
|
|
|
|
print("Loading model and vocabulary...")
|
|
|
|
|
|
with open('input.txt', 'r', encoding='utf-8') as f:
|
|
|
text = f.read()
|
|
|
chars = sorted(list(set(text)))
|
|
|
vocab_size = len(chars)
|
|
|
stoi = { ch:i for i,ch in enumerate(chars) }
|
|
|
itos = { i:ch for i,ch in enumerate(chars) }
|
|
|
encode = lambda s: [stoi[c] for c in s]
|
|
|
decode = lambda l: ''.join([itos[i] for i in l])
|
|
|
|
|
|
|
|
|
model = GPTLanguageModel(vocab_size)
|
|
|
model.load_state_dict(torch.load('model.pt', map_location=device))
|
|
|
model.to(device)
|
|
|
model.eval()
|
|
|
|
|
|
|
|
|
def generate_text(start_text):
|
|
|
if not start_text:
|
|
|
return "Please type something to start!"
|
|
|
|
|
|
try:
|
|
|
|
|
|
context = torch.tensor([encode(start_text)], dtype=torch.long, device=device)
|
|
|
|
|
|
|
|
|
output_ids = model.generate(context, max_new_tokens=200)
|
|
|
|
|
|
|
|
|
full_response = decode(output_ids[0].tolist())
|
|
|
return full_response
|
|
|
except KeyError:
|
|
|
return "Error: You used a character the AI has never seen before."
|
|
|
|
|
|
|
|
|
print("Launching Web App...")
|
|
|
interface = gr.Interface(
|
|
|
fn=generate_text,
|
|
|
inputs=gr.Textbox(lines=2, placeholder="Type a starting word (e.g. 'Nano')..."),
|
|
|
outputs="text",
|
|
|
title="My Private AI",
|
|
|
description="An AI model trained from scratch on my own data."
|
|
|
)
|
|
|
|
|
|
interface.launch() |