bala1802 commited on
Commit
dabde41
1 Parent(s): be2585e

Upload 7 files

Browse files
Files changed (7) hide show
  1. block.py +21 -0
  2. data_utils.py +49 -0
  3. feedforward.py +17 -0
  4. gpt_config.py +16 -0
  5. gpt_language_model.py +66 -0
  6. head.py +33 -0
  7. multi_head_attention.py +19 -0
block.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ from multi_head_attention import MultiHeadAttention
4
+ from feedforward import FeedFoward
5
+
6
+ class Block(nn.Module):
7
+ """ Transformer block: communication followed by computation """
8
+
9
+ def __init__(self, n_embd, n_head):
10
+ # n_embd: embedding dimension, n_head: the number of heads we'd like
11
+ super().__init__()
12
+ head_size = n_embd // n_head
13
+ self.sa = MultiHeadAttention(n_head, head_size)
14
+ self.ffwd = FeedFoward(n_embd)
15
+ self.ln1 = nn.LayerNorm(n_embd)
16
+ self.ln2 = nn.LayerNorm(n_embd)
17
+
18
+ def forward(self, x):
19
+ x = x + self.sa(self.ln1(x))
20
+ x = x + self.ffwd(self.ln2(x))
21
+ return x
data_utils.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ with open('data/input.txt', 'r', encoding='utf-8') as f:
4
+ text = f.read()
5
+
6
+ # here are all the unique characters that occur in this text
7
+ chars = sorted(list(set(text)))
8
+ vocab_size = len(chars)
9
+ # create a mapping from characters to integers
10
+ stoi = { ch:i for i,ch in enumerate(chars) }
11
+ itos = { i:ch for i,ch in enumerate(chars) }
12
+ encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
13
+ decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
14
+
15
+ # Train and test splits
16
+ data = torch.tensor(encode(text), dtype=torch.long)
17
+ n = int(0.9*len(data)) # first 90% will be train, rest val
18
+ train_data = data[:n]
19
+ val_data = data[n:]
20
+
21
+ '''
22
+ '''
23
+ def get_train_data():
24
+ return train_data
25
+
26
+ '''
27
+ '''
28
+ def get_val_data():
29
+ return val_data
30
+
31
+ '''
32
+ '''
33
+ def get_data():
34
+ return data
35
+
36
+ '''
37
+ '''
38
+ def get_encoder():
39
+ return encode
40
+
41
+ '''
42
+ '''
43
+ def get_decoder():
44
+ return decode
45
+
46
+ '''
47
+ '''
48
+ def get_vocab_size():
49
+ return vocab_size
feedforward.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import gpt_config as config
3
+
4
+ class FeedFoward(nn.Module):
5
+ """ a simple linear layer followed by a non-linearity """
6
+
7
+ def __init__(self, n_embd):
8
+ super().__init__()
9
+ self.net = nn.Sequential(
10
+ nn.Linear(n_embd, 4 * n_embd),
11
+ nn.ReLU(),
12
+ nn.Linear(4 * n_embd, n_embd),
13
+ nn.Dropout(config.dropout),
14
+ )
15
+
16
+ def forward(self, x):
17
+ return self.net(x)
gpt_config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # hyperparameters
4
+ batch_size = 64 # how many independent sequences will we process in parallel?
5
+ block_size = 256 # what is the maximum context length for predictions?
6
+ max_iters = 10000
7
+ eval_interval = 500
8
+ learning_rate = 3e-4
9
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
+ device = 'mps'
11
+ eval_iters = 200
12
+ n_embd = 384
13
+ n_head = 6
14
+ n_layer = 6
15
+ dropout = 0.2
16
+ vocab_size = 65
gpt_language_model.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ import gpt_config as config
6
+ from block import Block
7
+
8
+ class GPTLanguageModel(nn.Module):
9
+
10
+ def __init__(self):
11
+ super().__init__()
12
+ # each token directly reads off the logits for the next token from a lookup table
13
+ self.token_embedding_table = nn.Embedding(config.vocab_size, config.n_embd)
14
+ self.position_embedding_table = nn.Embedding(config.block_size, config.n_embd)
15
+ self.blocks = nn.Sequential(*[Block(config.n_embd, n_head=config.n_head) for _ in range(config.n_layer)])
16
+ self.ln_f = nn.LayerNorm(config.n_embd) # final layer norm
17
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
18
+
19
+ # better init, not covered in the original GPT video, but important, will cover in followup video
20
+ self.apply(self._init_weights)
21
+
22
+ def _init_weights(self, module):
23
+ if isinstance(module, nn.Linear):
24
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
25
+ if module.bias is not None:
26
+ torch.nn.init.zeros_(module.bias)
27
+ elif isinstance(module, nn.Embedding):
28
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
29
+
30
+ def forward(self, idx, targets=None):
31
+ B, T = idx.shape
32
+
33
+ # idx and targets are both (B,T) tensor of integers
34
+ tok_emb = self.token_embedding_table(idx) # (B,T,C)
35
+ pos_emb = self.position_embedding_table(torch.arange(T, device=config.device)) # (T,C)
36
+ x = tok_emb + pos_emb # (B,T,C)
37
+ x = self.blocks(x) # (B,T,C)
38
+ x = self.ln_f(x) # (B,T,C)
39
+ logits = self.lm_head(x) # (B,T,vocab_size)
40
+
41
+ if targets is None:
42
+ loss = None
43
+ else:
44
+ B, T, C = logits.shape
45
+ logits = logits.view(B*T, C)
46
+ targets = targets.view(B*T)
47
+ loss = F.cross_entropy(logits, targets)
48
+
49
+ return logits, loss
50
+
51
+ def generate(self, idx, max_new_tokens):
52
+ # idx is (B, T) array of indices in the current context
53
+ for _ in range(max_new_tokens):
54
+ # crop idx to the last block_size tokens
55
+ idx_cond = idx[:, -config.block_size:]
56
+ # get the predictions
57
+ logits, loss = self(idx_cond)
58
+ # focus only on the last time step
59
+ logits = logits[:, -1, :] # becomes (B, C)
60
+ # apply softmax to get probabilities
61
+ probs = F.softmax(logits, dim=-1) # (B, C)
62
+ # sample from the distribution
63
+ idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
64
+ # append sampled index to the running sequence
65
+ idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
66
+ return idx
head.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ import gpt_config as config
6
+
7
+ class Head(nn.Module):
8
+ """ one head of self-attention """
9
+
10
+ def __init__(self, head_size):
11
+ super().__init__()
12
+ self.key = nn.Linear(config.n_embd, head_size, bias=False)
13
+ self.query = nn.Linear(config.n_embd, head_size, bias=False)
14
+ self.value = nn.Linear(config.n_embd, head_size, bias=False)
15
+ self.register_buffer('tril', torch.tril(torch.ones(config.block_size, config.block_size)))
16
+
17
+ self.dropout = nn.Dropout(config.dropout)
18
+
19
+ def forward(self, x):
20
+ # input of size (batch, time-step, channels)
21
+ # output of size (batch, time-step, head size)
22
+ B,T,C = x.shape
23
+ k = self.key(x) # (B,T,hs)
24
+ q = self.query(x) # (B,T,hs)
25
+ # compute attention scores ("affinities")
26
+ wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)
27
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
28
+ wei = F.softmax(wei, dim=-1) # (B, T, T)
29
+ wei = self.dropout(wei)
30
+ # perform the weighted aggregation of the values
31
+ v = self.value(x) # (B,T,hs)
32
+ out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)
33
+ return out
multi_head_attention.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ import gpt_config as config
5
+ from head import Head
6
+
7
+ class MultiHeadAttention(nn.Module):
8
+ """ multiple heads of self-attention in parallel """
9
+
10
+ def __init__(self, num_heads, head_size):
11
+ super().__init__()
12
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
13
+ self.proj = nn.Linear(head_size * num_heads, config.n_embd)
14
+ self.dropout = nn.Dropout(config.dropout)
15
+
16
+ def forward(self, x):
17
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
18
+ out = self.dropout(self.proj(out))
19
+ return out