Sarah Ciston commited on
Commit
61c49f0
·
1 Parent(s): a767054

add full text false and max tokens

Browse files
Files changed (2) hide show
  1. index.html +1 -1
  2. sketch.js +6 -6
index.html CHANGED
@@ -5,7 +5,7 @@
5
  <meta charset="UTF-8" />
6
  <link rel="stylesheet" type="text/css" href="style.css" />
7
  <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.js"></script>
8
- <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/addons/p5.sound.min.js"></script>
9
  <!-- <meta name="viewport" content="width=device-width, initial-scale=1.0" /> -->
10
  <title>p5.js Critical AI Prompt Battle</title>
11
  </head>
 
5
  <meta charset="UTF-8" />
6
  <link rel="stylesheet" type="text/css" href="style.css" />
7
  <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.js"></script>
8
+ <!-- <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/addons/p5.sound.min.js"></script> -->
9
  <!-- <meta name="viewport" content="width=device-width, initial-scale=1.0" /> -->
10
  <title>p5.js Critical AI Prompt Battle</title>
11
  </head>
sketch.js CHANGED
@@ -2,7 +2,7 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
2
  // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
3
  // const inference = new HfInference();
4
 
5
- let pipe = await pipeline('text-generation', 'Xenova/bloom-560m');
6
  // models('Xenova/gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m')
7
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
8
 
@@ -27,11 +27,11 @@ var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
27
  // max_tokens: 100
28
  // });
29
 
30
- let out = await pipe(PREPROMPT + PROMPT)
31
- // let out = await pipe(PREPROMPT + PROMPT, {
32
- // max_new_tokens: 150,
33
- // temperature: 0.9
34
- // });
35
  console.log(out)
36
 
37
  var result = await out[0].generated_text
 
2
  // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
3
  // const inference = new HfInference();
4
 
5
+ let pipe = await pipeline('text-generation', model='Xenova/bloom-560m', return_full_text=False);
6
  // models('Xenova/gpt2', 'mistralai/Mistral-7B-Instruct-v0.2', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m')
7
  // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
8
 
 
27
  // max_tokens: 100
28
  // });
29
 
30
+ // let out = await pipe(PREPROMPT + PROMPT)
31
+ let out = await pipe(PREPROMPT + PROMPT, {
32
+ max_new_tokens: 250,
33
+ temperature: 0.9
34
+ });
35
  console.log(out)
36
 
37
  var result = await out[0].generated_text