Dwaraka commited on
Commit
03eb02b
1 Parent(s): 76207e4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -9
README.md CHANGED
@@ -110,22 +110,15 @@ TRAINING Loss: 3.422200(At Step:40000)
110
 
111
  We can use the model directly with a pipeline for text generation:
112
 
 
113
  >>>from transformers import GPT2Tokenizer, GPT2LMHeadModel
114
- >>>
115
  >>>model_name = "Dwaraka/PROJECT_GUTENBERG_GOTHIC_FICTION_TEXT_GENERATION_gpt2"
116
- >>>
117
- >>>model = GPT2LMHEadModel.from_pretrained(model_name)
118
- >>>
119
  >>>tokenizer = GPT2Tokenizer.from_pretrained(model_name)
120
- >>>
121
  >>>prompt= "Once upon a time, in a dark and spooky castle, there lived a "
122
- >>>
123
  >>>input_ids = tokenizer.encode(prompt, return_tensors="pt" )
124
- >>>
125
  >>>output = model.generate(input_ids, max_length=50, do_sample=True)
126
- >>>
127
  >>>generated_text = tokenizer.decode(output[0], skip_special_tokens=True )
128
- >>>
129
  >>>print(generated_text)
130
 
131
  ### Github Link :
 
110
 
111
  We can use the model directly with a pipeline for text generation:
112
 
113
+ !pip install transformers
114
  >>>from transformers import GPT2Tokenizer, GPT2LMHeadModel
 
115
  >>>model_name = "Dwaraka/PROJECT_GUTENBERG_GOTHIC_FICTION_TEXT_GENERATION_gpt2"
116
+ >>>model = GPT2LMHeadModel.from_pretrained(model_name)
 
 
117
  >>>tokenizer = GPT2Tokenizer.from_pretrained(model_name)
 
118
  >>>prompt= "Once upon a time, in a dark and spooky castle, there lived a "
 
119
  >>>input_ids = tokenizer.encode(prompt, return_tensors="pt" )
 
120
  >>>output = model.generate(input_ids, max_length=50, do_sample=True)
 
121
  >>>generated_text = tokenizer.decode(output[0], skip_special_tokens=True )
 
122
  >>>print(generated_text)
123
 
124
  ### Github Link :