sayanbanerjee32 commited on
Commit
ef65453
1 Parent(s): 9177ebb

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. data/meta.pkl +3 -0
  2. model.py +144 -0
  3. saved_model/ckpt.pt +3 -0
data/meta.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee5a37533af83b67fcbe6b93705fde9e15e78bafe895f54b2cb2cb32534526c
3
+ size 703
model.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from dataclasses import dataclass
5
+
6
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
7
+
8
+
9
+ class Head(nn.Module):
10
+ """ one head of self-attention """
11
+ def __init__(self, config, head_size):
12
+ super().__init__()
13
+ self.key = nn.Linear(config.n_embed, head_size, bias=False)
14
+ self.query = nn.Linear(config.n_embed, head_size, bias=False)
15
+ self.value = nn.Linear(config.n_embed, head_size, bias=False)
16
+ self.register_buffer('tril', torch.tril(torch.ones(config.block_size, config.block_size)))
17
+
18
+ self.dropout = nn.Dropout(config.dropout)
19
+
20
+ def forward(self, x):
21
+ B, T, C = x.shape
22
+ k = self.key(x) #(B, T, head_size)
23
+ q = self.query(x) #(B, T, head_size)
24
+ wei = q @ k.transpose(-2, -1) * C**-0.5 #
25
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
26
+ wei = F.softmax(wei, dim=-1)
27
+ wei = self.dropout(wei)
28
+ v = self.value(x)
29
+ out = wei @ v
30
+ return out
31
+
32
+ class MultiHeadAttention(nn.Module):
33
+ def __init__(self, config, head_size):
34
+ super().__init__()
35
+ self.heads = nn.ModuleList([Head(config, head_size) for _ in range(config.n_head)])
36
+ self.proj = nn.Linear(config.n_embed, config.n_embed)
37
+ self.dropout = nn.Dropout(config.dropout)
38
+
39
+ def forward(self, x):
40
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
41
+ out = self.dropout(self.proj(out))
42
+ return out
43
+
44
+ class FeedForward(nn.Module):
45
+ def __init__(self, config):
46
+ super().__init__()
47
+ self.net = nn.Sequential(
48
+ nn.Linear(config.n_embed, 4 * config.n_embed),
49
+ nn.ReLU(),
50
+ nn.Linear(4 * config.n_embed, config.n_embed),
51
+ nn.Dropout(config.dropout),
52
+ )
53
+
54
+ def forward(self, x):
55
+ return self.net(x)
56
+
57
+ class Block(nn.Module):
58
+ def __init__(self, config):
59
+ super().__init__()
60
+ head_size = config.n_embed // config.n_head
61
+ self.sa = MultiHeadAttention(config, head_size)
62
+ self.ffwd = FeedForward(config)
63
+ self.ln1 = nn.LayerNorm(config.n_embed)
64
+ self.ln2 = nn.LayerNorm(config.n_embed)
65
+
66
+ def forward(self, x):
67
+ x = x + self.sa(self.ln1(x))
68
+ x = x + self.ffwd(self.ln2(x))
69
+ return x
70
+
71
+ @dataclass
72
+ class ModelConfig:
73
+ block_size: int = 256
74
+ vocab_size: int = 50304
75
+ n_layer: int = 6
76
+ n_head: int = 6
77
+ n_embed: int = 384
78
+ dropout: float = 0.2
79
+
80
+ class BigramLanguageModel(nn.Module):
81
+ def __init__(self, config):
82
+ super().__init__()
83
+ assert config.vocab_size is not None
84
+ assert config.block_size is not None
85
+ self.config = config
86
+
87
+ self.token_embedding_table = nn.Embedding(config.vocab_size, config.n_embed)
88
+ self.position_embedding_table = nn.Embedding(config.block_size, config.n_embed)
89
+ self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
90
+ # nn.Sequential(
91
+ # Block(n_embed, n_head=4),
92
+ # Block(n_embed, n_head=4),
93
+ # Block(n_embed, n_head=4),
94
+ # nn.LayerNorm(n_embed),
95
+ # )
96
+ self.ln_f = nn.LayerNorm(config.n_embed) # final layer norm
97
+ # self.sa_heads = MultiHeadAttention(4, n_embed//4) # 4 of 8 dimensional self attention
98
+ # self.ffwd = FeedForward(n_embed)
99
+ self.lm_head = nn.Linear(config.n_embed, config.vocab_size)
100
+
101
+ def forward(self, idx, targets= None):
102
+ B, T = idx.shape
103
+
104
+ # idx and targets are both (B, T) tensor of integers
105
+ tok_emb = self.token_embedding_table(idx) #(B, T, C = channels)
106
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) #(T, C)
107
+ x = tok_emb + pos_emb #(B, T, C)
108
+ # x = self.sa_heads(x) #apply one self attention head
109
+ # x = self.ffwd(x)
110
+ x = self.blocks(x)
111
+ x = self.ln_f(x)
112
+ logits = self.lm_head(x) #(B, T, Cw)
113
+
114
+ if targets is None:
115
+ loss = None
116
+ else:
117
+ B, T, C = logits.shape
118
+ logits = logits.view(B*T, C)
119
+ targets = targets.view(B*T)
120
+ loss = F.cross_entropy(logits, targets)
121
+
122
+ return logits, loss
123
+
124
+ @torch.no_grad()
125
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
126
+ # idx is (B, T) array of indices in the current context
127
+ for _ in range(max_new_tokens):
128
+ # crop idx to last block_size token
129
+ idx_cond = idx[:, -self.config.block_size:]
130
+ # get the predictions
131
+ logits, loss = self(idx_cond)
132
+ # focus only on the last time step
133
+ logits = logits[:, -1, :] / temperature # becomes (B, C)
134
+ # optionally crop the logits to only the top k options
135
+ if top_k is not None:
136
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
137
+ logits[logits < v[:, [-1]]] = -float('Inf')
138
+ # apply softmax to get probabilities
139
+ probs = F.softmax(logits, dim=-1)
140
+ # sample from the distribution
141
+ idx_next = torch.multinomial(probs, num_samples=1)
142
+ # append sampled index to the running sequence
143
+ idx = torch.cat((idx, idx_next), dim=1)
144
+ return idx
saved_model/ckpt.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd5c2d495d9f48692ebd0620d2b419c537dcb04fac36ddd62b36dc7f25debd9e
3
+ size 161902