Text Generation
Transformers
PyTorch
Safetensors
gpt2
stable-diffusion
prompt-generator
arxiv:2210.14140
Inference Endpoints
text-generation-inference
FredZhang7 commited on
Commit
b61e4ef
1 Parent(s): 5b515e8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -50,7 +50,7 @@ max_length = 80 # the maximum number of tokens for the output of t
50
  repitition_penalty = 1.2 # the penalty value for each repetition of a token
51
  num_return_sequences=5 # the number of results to generate
52
 
53
- # generate the result with contrastive search. generate 5 results with the highest probability out of 10.
54
  input_ids = tokenizer(prompt, return_tensors='pt').input_ids
55
  output = model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k, max_length=max_length, num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty, penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
56
 
 
50
  repitition_penalty = 1.2 # the penalty value for each repetition of a token
51
  num_return_sequences=5 # the number of results to generate
52
 
53
+ # generate the result with contrastive search
54
  input_ids = tokenizer(prompt, return_tensors='pt').input_ids
55
  output = model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k, max_length=max_length, num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty, penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
56