from transformers import AutoTokenizer, AutoModelWithLMHead
import torch
if torch.cuda.is_available():
    device = torch.device("cuda")
else :
    device = "cpu"
    
tokenizer = AutoTokenizer.from_pretrained("salesken/text_generate")
model = AutoModelWithLMHead.from_pretrained("salesken/text_generate").to(device)

input_query="tough challenges make you stronger.  "
input_ids = tokenizer.encode(input_query.lower(), return_tensors='pt').to(device)

sample_outputs = model.generate(input_ids,
                                do_sample=True,
                                num_beams=1, 
                                max_length=1024,
                                temperature=0.99,
                                top_k = 10,
                                num_return_sequences=1)

for i in range(len(sample_outputs)):
    print(tokenizer.decode(sample_outputs[i], skip_special_tokens=True))
Downloads last month
153
Hosted inference API
Text Generation
This model can be loaded on the Inference API on-demand.