nikshep01 commited on
Commit
5b42b54
·
verified ·
1 Parent(s): e175332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -3,6 +3,10 @@ import torch
3
 
4
  # Load the tokenizer and model for GPT-Neo
5
  tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
 
 
 
 
6
  model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
7
 
8
  # Input text description for UI
@@ -13,9 +17,8 @@ input_ids = tokenizer.encode(input_text, return_tensors="pt")
13
  attention_mask = torch.ones(input_ids.shape, dtype=torch.long) # Set the attention mask
14
 
15
  # Generate code
16
- output = model.generate(input_ids, attention_mask=attention_mask, max_length=100)
17
 
18
  # Decode and print the generated code
19
  generated_code = tokenizer.decode(output[0], skip_special_tokens=True)
20
  print(generated_code)
21
-
 
3
 
4
  # Load the tokenizer and model for GPT-Neo
5
  tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
6
+
7
+ # Explicitly set pad_token_id to eos_token_id
8
+ tokenizer.pad_token_id = tokenizer.eos_token_id
9
+
10
  model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
11
 
12
  # Input text description for UI
 
17
  attention_mask = torch.ones(input_ids.shape, dtype=torch.long) # Set the attention mask
18
 
19
  # Generate code
20
+ output = model.generate(input_ids, attention_mask=attention_mask, max_length=10000)
21
 
22
  # Decode and print the generated code
23
  generated_code = tokenizer.decode(output[0], skip_special_tokens=True)
24
  print(generated_code)