Sarah Ciston commited on
Commit
023d9ac
·
1 Parent(s): e70fc28
Files changed (1) hide show
  1. sketch.js +4 -4
sketch.js CHANGED
@@ -174,9 +174,9 @@ async function runModel(PREPROMPT, PROMPT){
174
 
175
 
176
  // pipeline/transformers version
177
- // let generator = await pipeline('text-generation', 'Xenova/distilgpt2');
178
  // seems to work with default model distilgpt2 ugh
179
- let generator = pipeline('text-generation', "HuggingFaceH4/zephyr-7b-beta")
180
 
181
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
182
 
@@ -185,7 +185,7 @@ async function runModel(PREPROMPT, PROMPT){
185
  // 'Xenova/gpt-3.5-turbo'
186
  // , 'Xenova/distilgpt2'
187
 
188
- // let out = await generator(inputText, {
189
  // max_tokens: 250,
190
  // return_full_text: false
191
  // repetition_penalty: 1.5,
@@ -198,7 +198,7 @@ async function runModel(PREPROMPT, PROMPT){
198
  add_generation_prompt: true
199
  })
200
 
201
- let out = await generator(prompt, {
202
  max_new_tokens: 256,
203
  do_sample: true,
204
  temperature: 0.7,
 
174
 
175
 
176
  // pipeline/transformers version
177
+ // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
178
  // seems to work with default model distilgpt2 ugh
179
+ let pipe = pipeline('text-generation', "HuggingFaceH4/zephyr-7b-beta")
180
 
181
  // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
182
 
 
185
  // 'Xenova/gpt-3.5-turbo'
186
  // , 'Xenova/distilgpt2'
187
 
188
+ // let out = await pipe(inputText, {
189
  // max_tokens: 250,
190
  // return_full_text: false
191
  // repetition_penalty: 1.5,
 
198
  add_generation_prompt: true
199
  })
200
 
201
+ let out = await pipe(prompt, {
202
  max_new_tokens: 256,
203
  do_sample: true,
204
  temperature: 0.7,