Text Generation
Transformers
Safetensors
gpt2
Eval Results
Inference Endpoints
text-generation-inference
Sharathhebbar24 commited on
Commit
d531d19
1 Parent(s): 9447052

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -34,9 +34,9 @@ prompt.
34
 
35
  ```python
36
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
37
- >>> model_name = "Sharathhebbar24/Instruct_GPT"
38
  >>> model = AutoModelForCausalLM.from_pretrained(model_name)
39
- >>> tokenizer = AutoTokenizer.from_pretrained("gpt2-medium")
40
  >>> def generate_text(prompt):
41
  >>> inputs = tokenizer.encode(prompt, return_tensors='pt')
42
  >>> outputs = model.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)
 
34
 
35
  ```python
36
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
37
+ >>> model_name = "Sharathhebbar24/Instruct_GPT_v1"
38
  >>> model = AutoModelForCausalLM.from_pretrained(model_name)
39
+ >>> tokenizer = AutoTokenizer.from_pretrained(model_name)
40
  >>> def generate_text(prompt):
41
  >>> inputs = tokenizer.encode(prompt, return_tensors='pt')
42
  >>> outputs = model.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)