kjbk
Browse files- modelLM.py +21 -21
modelLM.py
CHANGED
@@ -29,30 +29,30 @@ class OBILanguageModel(PreTrainedModel):
|
|
29 |
|
30 |
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
|
57 |
|
58 |
def generate(self, idx, max_new_tokens):
|
|
|
29 |
|
30 |
|
31 |
|
32 |
+
def forward(self, idx, targets=None):
|
33 |
+
tok_emb = self.token_embedding_table(idx)
|
34 |
+
pos_emb = None # Initialize pos_emb to None
|
35 |
+
try:
|
36 |
+
pos_emb = self.position_embedding_table(torch.arange(idx.size(1), device='cpu'))
|
37 |
+
except IndexError as e:
|
38 |
+
# Handle the IndexError by initializing pos_emb with zeros
|
39 |
+
print(f"IndexError: {e}")
|
40 |
+
print(f"idx.size(1): {idx.size(1)}")
|
41 |
+
print(f"Positional embedding table shape: {self.position_embedding_table.weight.shape}")
|
42 |
+
pos_emb = torch.zeros((idx.size(1), self.config.hidden_size), device=device)
|
43 |
|
44 |
+
x = tok_emb + pos_emb
|
45 |
+
x = self.transformer(x, x)
|
46 |
+
x = self.ln1(x)
|
47 |
+
x = self.ln2(x)
|
48 |
+
logits = self.lm_head(x)
|
49 |
|
50 |
+
if targets is None:
|
51 |
+
loss = None
|
52 |
+
else:
|
53 |
+
loss = F.cross_entropy(logits.view(-1, self.config.vocab_size), targets.view(-1))
|
54 |
|
55 |
+
return logits, loss
|
56 |
|
57 |
|
58 |
def generate(self, idx, max_new_tokens):
|