Chintan-Shah commited on
Commit
6ec474a
·
verified ·
1 Parent(s): 563a311

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tiktoken
2
+ import os
3
+ import torch
4
+ from torch.nn import functional as F
5
+
6
+ from model import GPTConfig, GPT
7
+ import gradio as gr
8
+
9
+ device = 'cpu'
10
+ if torch.cuda.is_available():
11
+ device = 'cuda'
12
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
13
+ device = "mps"
14
+ print(f"using device: {device}")
15
+
16
+ modelpath = '.'
17
+
18
+ # STOP
19
+ max_length = 500
20
+
21
+ enc = tiktoken.get_encoding('gpt2')
22
+
23
+ # CHANGES IN CURRENT CODE
24
+ ckpt_path = os.path.join(modelpath, 'GPT2ShakespeareModel.pt')
25
+ print(ckpt_path)
26
+ checkpoint = torch.load(ckpt_path, map_location=device)
27
+ gptconf = GPTConfig(**checkpoint['model_args'])
28
+ model = GPT(gptconf)
29
+ state_dict = checkpoint['model']
30
+ unwanted_prefix = '_orig_mod.'
31
+ for k,v in list(state_dict.items()):
32
+ if k.startswith(unwanted_prefix):
33
+ state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
34
+ model.load_state_dict(state_dict)
35
+
36
+ model.to(device)
37
+ model = torch.compile(model)
38
+
39
+ def generateText(inputText="JULIET\n", num_tokens=500):
40
+ start_tokens = enc.encode(inputText)
41
+ # print(start_tokens, len(start_tokens))
42
+ start_tokens = torch.tensor(start_tokens)
43
+ x = start_tokens.view(1, len(start_tokens))
44
+ # print(x, x.shape)
45
+ x = x.to(device)
46
+
47
+ while x.size(1) < max_length:
48
+ # forward the model to get the logits
49
+ with torch.no_grad():
50
+ logits = model(x)[0] # (B, T, vocab_size)
51
+ # take the logits at the last position
52
+ logits = logits[:, -1, :] # (B, vocab_size)
53
+ # get the probabilities
54
+ probs = F.softmax(logits, dim=-1)
55
+ # do top-k sampling of 50 (huggingface pipeline default)
56
+ # topk_probs here becomes (5, 50), topk_indices is (5, 50)
57
+ topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
58
+ # select a token from the top-k probabilities
59
+ # note: multinomial does not demand the input to sum to 1
60
+ ix = torch.multinomial(topk_probs, 1) # (B, 1)
61
+ # gather the corresponding indices
62
+ xcol = torch.gather(topk_indices, -1, ix) # (B, 1)
63
+ # append to the sequence
64
+ x = torch.cat((x, xcol), dim=1)
65
+ # print(x.size(1))
66
+
67
+ # print the generated text
68
+ tokens = x[0, :max_length].tolist()
69
+ decoded = enc.decode(tokens)
70
+ return decoded
71
+
72
+
73
+ # def generateOutput(inputText="JULIET\n", num_tokens = 500):
74
+ # context = torch.zeros((1, 1), dtype=torch.long, device=device)
75
+ # return(decode(model.generate(context, max_new_tokens=num_tokens)[0].tolist()))
76
+
77
+ title = "GPT from Scratch using char tokenizer to generate text based on training"
78
+ description = "GPT from Scratch using char tokenizer to generate text based on training"
79
+ examples = [["ROMEO:\nWith love's light wings did I o'er-perch these walls;\nFor stony limits cannot hold love out,\nAnd what love can do that dares love attempt;\nTherefore thy kinsmen are no let to me.\n", 500],
80
+ ["ROMEO:\n", 500],
81
+ ["JULIET:\n", 500],
82
+ ["CAPULET:\nWhy, how now, kinsman! wherefore storm you so?\n", 500],
83
+ ["KING RICHARD II:\nAy, hand from hand, my love, and heart from heart.\nAnd", 500],
84
+ ["KING RICHARD II:\n", 500],
85
+ ["CAPULET:\n", 500],
86
+ ["QUEEN:\nBanish us both and send the king with me.\nAnd", 500],
87
+ ["QUEEN:\n", 500],
88
+ ["CORIOLANUS:\n", 500],
89
+ ["MENENIUS:\n", 500]
90
+ ]
91
+
92
+ demo = gr.Interface(
93
+ generateText,
94
+ inputs = [
95
+ gr.Textbox(label="Starting text"),
96
+ gr.Slider(100, 2000, value = 500, step=100, label="Number of chars that you want in your output"),
97
+ ],
98
+ outputs = [
99
+ gr.Text(),
100
+ ],
101
+ title = title,
102
+ description = description,
103
+ examples = examples,
104
+ cache_examples=False
105
+ )
106
+ demo.launch()