File size: 551 Bytes
a3af4de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from transformers import GPT2Tokenizer, GPT2Model

# Load the tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')

# Create a prompt
prompt = "I need a hug."

# Tokenize the prompt
inputs = tokenizer.encode(prompt, return_tensors='pt')

# Generate text using the model
outputs = model.generate(inputs, max_length=50, num_return_sequences=1)

# Decode the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

# Print the generated text
print(generated_text)