Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
# Import the necessary libraries
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
@@ -17,6 +16,10 @@ model.to('cpu')
|
|
17 |
def generate_text(prompt, temperature, top_p):
|
18 |
prompt_with_eos = " #CONTEXT# " + prompt + " #TOPIC# " # Add the string "EOS" to the end of the prompt
|
19 |
input_tokens = tokenizer.encode(prompt_with_eos, return_tensors='pt')
|
|
|
|
|
|
|
|
|
20 |
input_tokens = input_tokens.to('cpu')
|
21 |
|
22 |
generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
|
|
16 |
def generate_text(prompt, temperature, top_p):
|
17 |
prompt_with_eos = " #CONTEXT# " + prompt + " #TOPIC# " # Add the string "EOS" to the end of the prompt
|
18 |
input_tokens = tokenizer.encode(prompt_with_eos, return_tensors='pt')
|
19 |
+
|
20 |
+
if input_tokens.size(1) > 512:
|
21 |
+
return "ERROR, CONTEXT SIZE EXCEEDED"
|
22 |
+
|
23 |
input_tokens = input_tokens.to('cpu')
|
24 |
|
25 |
generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
|