Sarah Ciston commited on
Commit
6eb0047
·
1 Parent(s): efa39e6

return to pipeline basic fill mask try

Browse files
Files changed (1) hide show
  1. sketch.js +30 -30
sketch.js CHANGED
@@ -3,6 +3,9 @@
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2';
5
 
 
 
 
6
  /// AUTHORIZATION
7
  // import { textGeneration } from 'https://esm.sh/@huggingface/inference';
8
  // import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub@0.15.1';
@@ -23,12 +26,6 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
23
  // import { HfInference } from 'https://esm.sh/@huggingface/inference';
24
  // const inference = new HfInference(HF_TOKEN);
25
 
26
- // PIPELINE MODELS
27
- // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
28
- // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
29
-
30
- // Since we will download the model from the Hugging Face Hub, we can skip the local model check
31
- // env.allowLocalModels = false;
32
 
33
  ///////// VARIABLES
34
 
@@ -208,19 +205,19 @@ new p5(function (p5) {
208
 
209
  ///// MODEL STUFF
210
 
211
- async function runModel(PROMPT){
212
- // let MODELNAME = 'distilroberta-base'
213
 
214
- let unmasker = await fillMask(PROMPT)
215
 
216
- console.log(unmasker)
217
 
218
- // let res = unmasker(PROMPT, top_k=5)
219
 
220
- var modelResult = [unmasker[0].sequence, unmasker[1].sequence, unmasker[2].sequence]
221
 
222
- return modelResult
223
- }
224
 
225
 
226
  // async function runModel(PREPROMPT, PROMPT){
@@ -365,27 +362,30 @@ async function runModel(PROMPT){
365
 
366
 
367
  // async function runModel(PROMPTS){
368
- // async function runModel(PROMPT){
 
 
 
369
 
370
- // // let MODELNAME = "bert-base-uncased"
371
- // let MODELNAME = 'distilroberta-base'
372
 
373
- // let unmasker = await pipeline('fill-mask', MODELNAME)
 
374
 
375
- // let res = unmasker(PROMPT, top_k=5)
376
 
377
- // var modelResult = res
378
 
379
- // return modelResult
380
 
381
- // // for (let p in PROMPTS){
382
- // // var res = unmasker(p)
383
- // // console.log(res)
384
 
385
- // // var modelResult = res[0].token_str
386
- // // console.log(modelResult)
387
 
388
- // // resultsArray.push(modelResult)
389
- // // }
390
- // // return resultsArray
391
- // }
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2';
5
 
6
+ // Since we will download the model from the Hugging Face Hub, we can skip the local model check
7
+ env.allowLocalModels = false;
8
+
9
  /// AUTHORIZATION
10
  // import { textGeneration } from 'https://esm.sh/@huggingface/inference';
11
  // import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub@0.15.1';
 
26
  // import { HfInference } from 'https://esm.sh/@huggingface/inference';
27
  // const inference = new HfInference(HF_TOKEN);
28
 
 
 
 
 
 
 
29
 
30
  ///////// VARIABLES
31
 
 
205
 
206
  ///// MODEL STUFF
207
 
208
+ // async function runModel(PROMPT){
209
+ // // let MODELNAME = 'distilroberta-base'
210
 
211
+ // let unmasker = await fillMask(PROMPT)
212
 
213
+ // console.log(unmasker)
214
 
215
+ // // let res = unmasker(PROMPT, top_k=5)
216
 
217
+ // var modelResult = [unmasker[0].sequence, unmasker[1].sequence, unmasker[2].sequence]
218
 
219
+ // return modelResult
220
+ // }
221
 
222
 
223
  // async function runModel(PREPROMPT, PROMPT){
 
362
 
363
 
364
  // async function runModel(PROMPTS){
365
+ async function runModel(PROMPT){
366
+
367
+ let MODELNAME = "bert-base-uncased"
368
+ // let MODELNAME = 'distilroberta-base'
369
 
370
+ let unmasker = await pipeline('fill-mask', MODELNAME)
 
371
 
372
+ let res = unmasker(PROMPT)
373
+ // , top_k=5
374
 
375
+ console.log(res[0].sequence, res[0].token_str, res[1].sequence, res[1].token_str)
376
 
377
+ var modelResult = res
378
 
379
+ return modelResult
380
 
381
+ // for (let p in PROMPTS){
382
+ // var res = unmasker(p)
383
+ // console.log(res)
384
 
385
+ // var modelResult = res[0].token_str
386
+ // console.log(modelResult)
387
 
388
+ // resultsArray.push(modelResult)
389
+ // }
390
+ // return resultsArray
391
+ }