ibrahimmkhalid commited on
Commit
029c3bf
·
1 Parent(s): 462d56c

make streamlit app

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +186 -2
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Testing
3
- emoji: 🏆
4
  colorFrom: green
5
  colorTo: red
6
  sdk: streamlit
 
1
  ---
2
+ title: LLM From Scratch
3
+ emoji: 🧠
4
  colorFrom: green
5
  colorTo: red
6
  sdk: streamlit
app.py CHANGED
@@ -1,4 +1,188 @@
1
  import streamlit as st
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+ import pickle
6
+ import os
7
 
8
+ st.title('LLM from scratch Demo')
9
+
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ block_size = 128
12
+ batch_size = 32
13
+ max_iters = 4000
14
+ learning_rate = 3e-4
15
+ eval_every = 500
16
+ n_embd = 384
17
+ n_head = 8
18
+ n_layer = 8
19
+ dropout = 0.2
20
+
21
+
22
+ class Head(nn.Module):
23
+ """ one head of self-attention """
24
+
25
+ def __init__(self, head_size):
26
+ super().__init__()
27
+ self.key = nn.Linear(n_embd, head_size, bias=False)
28
+ self.query = nn.Linear(n_embd, head_size, bias=False)
29
+ self.value = nn.Linear(n_embd, head_size, bias=False)
30
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
31
+
32
+ self.dropout = nn.Dropout(dropout)
33
+
34
+ def forward(self, x):
35
+ # input of size (batch, time-step, channels)
36
+ # output of size (batch, time-step, head size)
37
+ B,T,C = x.shape
38
+ k = self.key(x) # (B,T,hs)
39
+ q = self.query(x) # (B,T,hs)
40
+ # compute attention scores ("affinities")
41
+ wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)
42
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
43
+ wei = F.softmax(wei, dim=-1) # (B, T, T)
44
+ wei = self.dropout(wei)
45
+ # perform the weighted aggregation of the values
46
+ v = self.value(x) # (B,T,hs)
47
+ out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)
48
+ return out
49
+
50
+ class MultiHeadAttention(nn.Module):
51
+ """ multiple heads of self-attention in parallel """
52
+
53
+ def __init__(self, num_heads, head_size):
54
+ super().__init__()
55
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
56
+ self.proj = nn.Linear(head_size * num_heads, n_embd)
57
+ self.dropout = nn.Dropout(dropout)
58
+
59
+ def forward(self, x):
60
+ out = torch.cat([h(x) for h in self.heads], dim=-1) # (B, T, F) -> (B, T, [h1, h1, h1, h1, h2, h2, h2, h2, h3, h3, h3, h3])
61
+ out = self.dropout(self.proj(out))
62
+ return out
63
+
64
+ class FeedFoward(nn.Module):
65
+ """ a simple linear layer followed by a non-linearity """
66
+
67
+ def __init__(self, n_embd):
68
+ super().__init__()
69
+ self.net = nn.Sequential(
70
+ nn.Linear(n_embd, 4 * n_embd),
71
+ nn.ReLU(),
72
+ nn.Linear(4 * n_embd, n_embd),
73
+ nn.Dropout(dropout),
74
+ )
75
+
76
+ def forward(self, x):
77
+ return self.net(x)
78
+
79
+ class Block(nn.Module):
80
+ """ Transformer block: communication followed by computation """
81
+
82
+ def __init__(self, n_embd, n_head):
83
+ # n_embd: embedding dimension, n_head: the number of heads we'd like
84
+ super().__init__()
85
+ head_size = n_embd // n_head
86
+ self.sa = MultiHeadAttention(n_head, head_size)
87
+ self.ffwd = FeedFoward(n_embd)
88
+ self.ln1 = nn.LayerNorm(n_embd)
89
+ self.ln2 = nn.LayerNorm(n_embd)
90
+
91
+ def forward(self, x):
92
+ y = self.sa(x)
93
+ x = self.ln1(x + y)
94
+ y = self.ffwd(x)
95
+ x = self.ln2(x + y)
96
+ return x
97
+
98
+ class GPTLanguageModel(nn.Module):
99
+ def __init__(self, vocab_size):
100
+ super().__init__()
101
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
102
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
103
+ self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
104
+ self.ln_f = nn.LayerNorm(n_embd) # final layer norm
105
+ self.lm_head = nn.Linear(n_embd, vocab_size)
106
+
107
+
108
+ self.apply(self._init_weights)
109
+
110
+ def _init_weights(self, module):
111
+ if isinstance(module, nn.Linear):
112
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
113
+ if module.bias is not None:
114
+ torch.nn.init.zeros_(module.bias)
115
+ elif isinstance(module, nn.Embedding):
116
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
117
+
118
+ def forward(self, index, targets=None):
119
+ B, T = index.shape
120
+
121
+
122
+ # idx and targets are both (B,T) tensor of integers
123
+ tok_emb = self.token_embedding_table(index) # (B,T,C)
124
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
125
+ x = tok_emb + pos_emb # (B,T,C)
126
+ x = self.blocks(x) # (B,T,C)
127
+ x = self.ln_f(x) # (B,T,C)
128
+ logits = self.lm_head(x) # (B,T,vocab_size)
129
+
130
+ if targets is None:
131
+ loss = None
132
+ else:
133
+ B, T, C = logits.shape
134
+ logits = logits.view(B*T, C) # reshape to what torch.cross_entropy expects
135
+ targets = targets.view(B*T)
136
+ loss = F.cross_entropy(logits, targets)
137
+ return logits, loss
138
+
139
+ def generate(self, index, max_new_tokens):
140
+ # index is (B, T) array of indices in the current context
141
+ for _ in range(max_new_tokens):
142
+ # crop idx to the last block_size tokens
143
+ index_cond = index[:, -block_size:]
144
+ # get the predictions
145
+ logits, loss = self.forward(index_cond)
146
+ # focus only on the last time step
147
+ logits = logits[:, -1, :] # becomes (B, C)
148
+ # apply softmax to get probabilities
149
+ probs = F.softmax(logits, dim=-1) # (B, C)
150
+ # sample from the distribution
151
+ index_next = torch.multinomial(probs, num_samples=1) # (B, 1)
152
+ # append sampled index to the running sequence
153
+ index = torch.cat((index, index_next), dim=1) # (B, T+1)
154
+ return index
155
+
156
+ if not os.path.exists("./openwebtext/vocab.txt"):
157
+ raise Exception("Please run extract.py first")
158
+ chars = ""
159
+ with open("./openwebtext/vocab.txt", 'r', encoding='utf-8') as f:
160
+ text = f.read()
161
+ chars = sorted(list(set(text)))
162
+
163
+ string_to_int = {ch: i for i, ch in enumerate(chars)}
164
+ int_to_string = {i: ch for i, ch in enumerate(chars)}
165
+
166
+ encode = lambda s: [string_to_int[ch] for ch in s]
167
+ decode = lambda x: ''.join([int_to_string[i] for i in x])
168
+
169
+
170
+ model_pickle_path = './model.pkl'
171
+
172
+ try:
173
+ st.write('loading model parameters...')
174
+ with open(model_pickle_path, 'rb') as f:
175
+ model = pickle.load(f)
176
+ st.write('model loaded successfully!')
177
+ except:
178
+ st.error('ERROR: model loading failed/model not found. Please run ./train_gpt_openwebtext.py first.')
179
+ exit()
180
+
181
+ prompt = ''
182
+ prompt = st.text_area('Prompt:', value=prompt, height=100, max_chars=block_size - 1, key='prompt')
183
+ if len(prompt) != 0:
184
+ context = torch.tensor(encode(prompt), dtype=torch.long, device=device)
185
+ max_new_tokens = block_size - len(prompt)
186
+ generated_chars = decode(model.generate(context.unsqueeze(0), max_new_tokens=max_new_tokens)[0].tolist())
187
+ st.write('Generated text:')
188
+ st.write(generated_chars)