Spaces:
Sleeping
Sleeping
Commit
·
589a2d5
1
Parent(s):
bde8121
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,11 @@ from transformers import GPT2Tokenizer, TrainingArguments, Trainer, GPT2LMHeadMo
|
|
4 |
|
5 |
tokenizer = GPT2Tokenizer.from_pretrained('mindwrapped/gpt2-lotr-fellowship', bos_token='<|startoftext|>',
|
6 |
eos_token='<|endoftext|>', pad_token='<|pad|>')
|
7 |
-
model = GPT2LMHeadModel.from_pretrained('mindwrapped/gpt2-lotr-fellowship')
|
8 |
|
9 |
|
10 |
def generate_text(text, temperature):
|
11 |
-
generated = tokenizer("<|startoftext|> " + text, return_tensors="pt").input_ids
|
12 |
sample_outputs = model.generate(generated, do_sample=True, top_k=50,
|
13 |
max_length=300, top_p=0.95, temperature=float(temperature), num_return_sequences=1)
|
14 |
|
|
|
4 |
|
5 |
tokenizer = GPT2Tokenizer.from_pretrained('mindwrapped/gpt2-lotr-fellowship', bos_token='<|startoftext|>',
|
6 |
eos_token='<|endoftext|>', pad_token='<|pad|>')
|
7 |
+
model = GPT2LMHeadModel.from_pretrained('mindwrapped/gpt2-lotr-fellowship')
|
8 |
|
9 |
|
10 |
def generate_text(text, temperature):
|
11 |
+
generated = tokenizer("<|startoftext|> " + text, return_tensors="pt").input_ids
|
12 |
sample_outputs = model.generate(generated, do_sample=True, top_k=50,
|
13 |
max_length=300, top_p=0.95, temperature=float(temperature), num_return_sequences=1)
|
14 |
|