Sarah Ciston
commited on
Commit
·
0a0456c
1
Parent(s):
1455cf4
connect prompt to form button
Browse files
sketch.js
CHANGED
@@ -13,37 +13,47 @@ const inference = new HfInference();
|
|
13 |
let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput
|
14 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
15 |
|
16 |
-
|
|
|
17 |
|
18 |
-
var PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputArray}. Replace any [FILL] with an appropriate word of your choice.`
|
19 |
|
20 |
-
var
|
21 |
-
|
22 |
-
// Chat completion API
|
23 |
-
const out = await inference.chatCompletion({
|
24 |
-
model: "mistralai/Mistral-7B-Instruct-v0.2",
|
25 |
-
// model: "google/gemma-2-9b",
|
26 |
-
messages: [{ role: "user", content: PREPROMPT + PROMPT }],
|
27 |
-
max_tokens: 100
|
28 |
-
});
|
29 |
-
|
30 |
-
// let out = await pipe(PREPROMPT + PROMPT)
|
31 |
-
// let out = await pipe(PREPROMPT + PROMPT, {
|
32 |
-
// max_new_tokens: 250,
|
33 |
-
// temperature: 0.9,
|
34 |
-
// // return_full_text: False,
|
35 |
-
// repetition_penalty: 1.5,
|
36 |
-
// // no_repeat_ngram_size: 2,
|
37 |
-
// // num_beams: 2,
|
38 |
-
// num_return_sequences: 1
|
39 |
-
// });
|
40 |
-
console.log(out)
|
41 |
|
42 |
-
var
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
|
49 |
// Reference the elements that we will need
|
@@ -147,12 +157,8 @@ new p5(function(p5){
|
|
147 |
// p5.background(200)
|
148 |
// p5.textSize(20)
|
149 |
// p5.textAlign(p5.CENTER,p5.CENTER)
|
150 |
-
let promptButton = p5.createButton("GO").position(0, 340);
|
151 |
-
promptButton.position(0, 340);
|
152 |
-
promptButton.elt.style.fontSize = "15px";
|
153 |
-
|
154 |
}
|
155 |
-
|
156 |
p5.draw = function(){
|
157 |
//
|
158 |
}
|
@@ -161,6 +167,8 @@ new p5(function(p5){
|
|
161 |
console.log('sketchfile loaded')
|
162 |
}
|
163 |
|
|
|
|
|
164 |
function makeInterface(){
|
165 |
console.log('got to make interface')
|
166 |
let title = p5.createElement('h1', 'p5.js Critical AI Prompt Battle')
|
@@ -175,7 +183,10 @@ new p5(function(p5){
|
|
175 |
p5.createP(promptInput.attribute('label')).position(0,100)
|
176 |
// p5.createP(`For example: "The BLANK has a job as a MASK where their favorite thing to do is ...`)
|
177 |
|
|
|
178 |
//make for loop to generate
|
|
|
|
|
179 |
maskInputA = p5.createInput("");
|
180 |
maskInputA.position(0, 240);
|
181 |
maskInputA.size(200);
|
@@ -197,8 +208,31 @@ new p5(function(p5){
|
|
197 |
modOutput.html(result)
|
198 |
}, 2000);
|
199 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
}
|
201 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
// function makeInput(i){
|
203 |
// i = p5.createInput("");
|
204 |
// i.position(0, 300); //append to last input and move buttons down
|
|
|
13 |
let promptButton, buttonButton, promptInput, maskInputA, maskInputB, maskInputC, modOutput
|
14 |
// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
|
15 |
|
16 |
+
let MODELNAME = 'Xenova/gpt-3.5-turbo'
|
17 |
+
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
18 |
|
|
|
19 |
|
20 |
+
var PREPROMPT = `Return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${inputArray}. Replace any [FILL] with an appropriate word of your choice.`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
// var PROMPT = `The [BLANK] works as a [FILL] but wishes for [FILL].`
|
23 |
+
/// this needs to run on button click, use string variables to fill in the form
|
24 |
+
var PROMPT = `${promptInput}`
|
25 |
+
|
26 |
+
// var inputArray = ["mother", "father", "sister", "brother"]
|
27 |
+
// for num of inputs put in list
|
28 |
+
var inputArray = [`${maskInputA}`, `${maskInputB}`, `${maskInputC}`]
|
29 |
+
|
30 |
+
function runModel(){
|
31 |
+
// Chat completion API
|
32 |
+
const out = await inference.chatCompletion({
|
33 |
+
model: MODELNAME,
|
34 |
+
// model: "google/gemma-2-9b",
|
35 |
+
messages: [{ role: "user", content: PREPROMPT + PROMPT }],
|
36 |
+
max_tokens: 100
|
37 |
+
});
|
38 |
+
|
39 |
+
// let out = await pipe(PREPROMPT + PROMPT)
|
40 |
+
// let out = await pipe(PREPROMPT + PROMPT, {
|
41 |
+
// max_new_tokens: 250,
|
42 |
+
// temperature: 0.9,
|
43 |
+
// // return_full_text: False,
|
44 |
+
// repetition_penalty: 1.5,
|
45 |
+
// // no_repeat_ngram_size: 2,
|
46 |
+
// // num_beams: 2,
|
47 |
+
// num_return_sequences: 1
|
48 |
+
// });
|
49 |
+
console.log(out)
|
50 |
+
|
51 |
+
var result = await out.choices[0].message.content
|
52 |
+
// var result = await out[0].generated_text
|
53 |
+
console.log(result);
|
54 |
+
|
55 |
+
return result
|
56 |
+
}
|
57 |
|
58 |
|
59 |
// Reference the elements that we will need
|
|
|
157 |
// p5.background(200)
|
158 |
// p5.textSize(20)
|
159 |
// p5.textAlign(p5.CENTER,p5.CENTER)
|
|
|
|
|
|
|
|
|
160 |
}
|
161 |
+
|
162 |
p5.draw = function(){
|
163 |
//
|
164 |
}
|
|
|
167 |
console.log('sketchfile loaded')
|
168 |
}
|
169 |
|
170 |
+
|
171 |
+
|
172 |
function makeInterface(){
|
173 |
console.log('got to make interface')
|
174 |
let title = p5.createElement('h1', 'p5.js Critical AI Prompt Battle')
|
|
|
183 |
p5.createP(promptInput.attribute('label')).position(0,100)
|
184 |
// p5.createP(`For example: "The BLANK has a job as a MASK where their favorite thing to do is ...`)
|
185 |
|
186 |
+
|
187 |
//make for loop to generate
|
188 |
+
//make a button to make another
|
189 |
+
//add them to the list of items
|
190 |
maskInputA = p5.createInput("");
|
191 |
maskInputA.position(0, 240);
|
192 |
maskInputA.size(200);
|
|
|
208 |
modOutput.html(result)
|
209 |
}, 2000);
|
210 |
|
211 |
+
//a model drop down list?
|
212 |
+
|
213 |
+
//GO BUTTON
|
214 |
+
promptButton = p5.createButton("GO").position(0, 340);
|
215 |
+
promptButton.position(0, 340);
|
216 |
+
promptButton.elt.style.fontSize = "15px";
|
217 |
+
promptButton.mousePressed(runModel)
|
218 |
+
promptInput.changed(runModel)
|
219 |
+
maskInputA.changed(runModel)
|
220 |
+
maskInputB.changed(runModel)
|
221 |
+
maskInputC.changed(runModel)
|
222 |
+
|
223 |
+
// describe(``)
|
224 |
+
// TO-DO alt-text description
|
225 |
+
|
226 |
}
|
227 |
|
228 |
+
// var result = promptButton.mousePressed(runModel) = function(){
|
229 |
+
// // listens for the button to be clicked
|
230 |
+
// // run the prompt through the model here
|
231 |
+
// // result = runModel()
|
232 |
+
// // return result
|
233 |
+
// runModel()
|
234 |
+
// }
|
235 |
+
|
236 |
// function makeInput(i){
|
237 |
// i = p5.createInput("");
|
238 |
// i.position(0, 300); //append to last input and move buttons down
|