File size: 14,211 Bytes
1ee4720
89bee59
a0c3a93
473e493
6eb0047
89bee59
6eb0047
 
cc195e8
52330f2
efa39e6
d4c0968
efa39e6
473e493
efa39e6
 
 
 
473e493
efa39e6
 
 
 
 
473e493
efa39e6
 
37c8ccd
1ee4720
89bee59
581179a
0bb49c6
 
b232be7
456b82c
581179a
37c8ccd
581179a
 
37c8ccd
581179a
37c8ccd
581179a
e871332
581179a
 
 
 
 
 
 
 
e871332
 
 
581179a
47d8484
581179a
 
2897982
581179a
1a66e6f
 
d59354e
1a66e6f
581179a
 
1a66e6f
47d8484
1a66e6f
bf21364
2897982
1a66e6f
 
581179a
 
 
 
47d8484
 
1a66e6f
47d8484
 
 
 
 
 
 
 
 
 
 
 
d59354e
 
aabf015
47d8484
0bb49c6
16186bd
44e71d7
47d8484
1e5c489
47d8484
 
 
0bb49c6
44e71d7
16186bd
1e5c489
47d8484
581179a
47d8484
581179a
6e96799
d59354e
581179a
 
47d8484
 
 
 
 
0bb49c6
47d8484
 
 
 
1a66e6f
47d8484
 
 
94d8833
 
456b82c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28962fb
47d8484
cf748ec
 
dcfef54
 
0bb49c6
dcfef54
e8e2063
28962fb
cf748ec
 
 
 
 
 
 
 
 
456b82c
 
28962fb
0bb49c6
07cff5b
2b5391c
cf748ec
 
456b82c
cf748ec
 
 
 
1bf613a
 
283a922
1e67923
33b753f
283a922
e19f5d0
581179a
 
 
 
 
6eb0047
 
1e67923
6eb0047
1e67923
6eb0047
1e67923
6eb0047
1e67923
6eb0047
1e67923
6eb0047
 
1e67923
37c8ccd
456b82c
 
37c8ccd
456b82c
a6cd989
456b82c
 
 
 
 
a6cd989
456b82c
 
 
 
 
 
 
 
37c8ccd
456b82c
 
 
 
37c8ccd
456b82c
f9569e7
456b82c
 
37c8ccd
456b82c
 
 
 
 
 
 
 
 
 
 
 
37c8ccd
456b82c
 
 
 
 
 
 
 
 
 
 
 
37c8ccd
630ac98
62c2dcb
28962fb
 
 
 
 
 
 
 
 
 
 
e871332
3d91a45
87f1324
 
 
1ee4720
 
87f1324
 
1ee4720
4af8f9e
 
 
 
 
 
cc195e8
 
d4c0968
cc195e8
 
4af8f9e
f9569e7
4af8f9e
cc195e8
 
 
 
 
 
4af8f9e
cc195e8
 
 
f9569e7
cc195e8
 
4af8f9e
 
cc195e8
 
 
 
 
 
4af8f9e
cc195e8
4af8f9e
cc195e8
4af8f9e
cc195e8
 
 
 
 
 
 
 
 
 
4af8f9e
cc195e8
d4c0968
cc195e8
 
 
4af8f9e
cc195e8
4af8f9e
456b82c
 
 
cf748ec
6eb0047
 
 
 
456b82c
6eb0047
456b82c
52330f2
6eb0047
456b82c
6eb0047
456b82c
52330f2
456b82c
6eb0047
cf748ec
6eb0047
 
 
cf748ec
6eb0047
 
cf748ec
6eb0047
 
 
89bee59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';

// Since we will download the model from the Hugging Face Hub, we can skip the local model check
// skip local model check
env.allowLocalModels = false;

/// AUTHORIZATION
// import { textGeneration } from 'https://esm.sh/@huggingface/inference@2.7.0/+esm';
// import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub@0.15.1';

// const oauthResult = await oauthHandleRedirectIfPresent();

// if (!oauthResult) {
//   // If the user is not logged in, redirect to the login page
//   window.location.href = await oauthLoginUrl();
// }

// // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
// // console.log(oauthResult);
// const HF_TOKEN = window.huggingface.variables.OAUTH_CLIENT_SECRET
// // const HF_TOKEN = oauthResult.accessToken
// console.log(HF_TOKEN)

// import { HfInference } from 'https://esm.sh/@huggingface/inference';
// const inference = new HfInference(HF_TOKEN);


// GLOBAL VARIABLES

// establish global variables to reference later
var promptInput
var blanksArray = []
var resultsArray = []


///// p5 STUFF

// create an instance of the p5 class as a workspace for all your p5.js code

new p5(function (p5) {
    p5.setup = function(){
        console.log('p5 loaded')
        p5.noCanvas()
        makeInterface()
    }

    p5.draw = function(){
        //
    }

    window.onload = function(){
        console.log('dom and js loaded')
    }

    let fieldsDiv = document.querySelector("#blanks")

    function makeInterface(){
        console.log('reached makeInterface')
        let title = p5.createElement('h1', 'p5.js Critical AI Prompt Battle')
        // title.position(0,50)

        p5.createElement('p',`This tool lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
            // .position(0,100)

        promptInput = p5.createInput("")
        // promptInput.position(0,160)
        promptInput.size(600);
        promptInput.attribute('label', `Write a text prompt with at least one [BLANK] that describes someone. You can also write [FILL] where you want the bot to fill in a word on its own.`)
        promptInput.value(`The man works as a <mask> but ...`)
        promptInput.addClass("prompt")
        p5.createP(promptInput.attribute('label'))
            // .position(0,100)

        //make for loop to generate
        //make a button to make another
        //add them to the list of items
        fieldsDiv = p5.createDiv()
        fieldsDiv.id('fieldsDiv')
        // fieldsDiv.position(0,250)

        // initial code to make a single field
        // blankA = p5.createInput("");
        // blankA.position(0, 240);
        // blankA.size(300);
        // blankA.addClass("blank")
        // blankA.parent('#fieldsDiv')

        // function to generate a single BLANK form field instead
        addField()

        // // BUTTONS // // 
        // let buttonsDiv = p5.createDiv() // container to organize buttons
        // buttonsDiv.id('buttonsDiv')
        
        // send prompt to model
        let submitButton = p5.createButton("SUBMIT")
        // submitButton.position(0,500)
        submitButton.size(170)
        submitButton.class('submit'); 
        // submitButton.parent('#buttonsDiv')
        submitButton.mousePressed(getInputs)

        // add more blanks to fill in
        let addButton = p5.createButton("more blanks")
        addButton.size(170)
        // addButton.position(220,500)
        // addButton.parent('#buttonsDiv')
        addButton.mousePressed(addField)

        // TO-DO a model drop down list?

        // alt-text description
        // p5.describe(`Pink and black text on a white background with form inputs and two buttons. The text describes a p5.js Critical AI Prompt Battle tool that lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. In the largest form input you can write a prompt to submit. In smaller inputs, you can write variables that will be inserted into that prompt as variations of the prompt when it is run through the model. There is a submit button, a button to add more variations, and when the model is run it adds text at the bottom showing the output results.`)
    }

    function addField(){
        let f = p5.createInput("")
        f.class("blank")
        f.parent("#fieldsDiv")

        blanksArray.push(f)
        console.log("made field")
        
        // Cap the number of fields, avoids token limit in prompt
        let blanks = document.querySelectorAll(".blank")
        if (blanks.length > 7){
            console.log(blanks.length)
            addButton.style('visibility','hidden')
        }
    }

    // async function getInputs(){
    //     // Map the list of blanks text values to a new list
    //     let BLANKSVALUES = blanksArray.map(i => i.value())
    //     console.log(BLANKSVALUES)
        
    //     // Do model stuff in this function instead of in general
    //     let PROMPT = promptInput.value() // updated check of the prompt field
        
    //     // BLANKS = inputValues // get ready to feed array list into model

    //     let PREPROMPT = `In the sentence I provide, please fill in the [BLANK] with each word in the array ${BLANKSVALUES}, replace any [MASK] with a word of your choice. Here is the SAMPLE SENTENCE: ` 

    //     // we pass PROMPT and PREPROMPT to the model function, don't need to pass BLANKSVALUES bc it's passed into the PREPROMPT already here

    //     // Please return an array of sentences based on the sample sentence to follow. In each sentence, 

    //     // let modelResult = await runModel(PREPROMPT, PROMPT)

    //     await displayModel(modelResult)
    // }

    // creating multiple prompt inputs rather than instructing model to do so
    async function getInputs(){
        // Map the list of blanks text values to a new list
        // let BLANKSVALUES = blanksArray.map(i => i.value())
        // console.log(BLANKSVALUES)
        
        // Do model stuff in this function instead of in general
        let PROMPT = promptInput.value() // updated check of the prompt field
        
        // BLANKS = inputValues // get ready to feed array list into model

        // for running MULTIPLE PROMPTS AT ONCE
        // let PROMPTS = []
        // for (let b in BLANKSVALUES){
        //     console.log(BLANKSVALUES[b])
        //     let p = PROMPT.replace('[BLANK]', `${BLANKSVALUES[b]}`)
        //     console.log(p)
        //     PROMPTS.push(p)
        // }
        // console.log(PROMPTS)

        // let PREPROMPT = `In the sentence I provide, please fill in the [BLANK] with each word in the array ${BLANKSVALUES}, replace any [MASK] with a word of your choice. Here is the SAMPLE SENTENCE: ` 

        // we pass PROMPT and PREPROMPT to the model function, don't need to pass BLANKSVALUES bc it's passed into the PREPROMPT already here

        // Please return an array of sentences based on the sample sentence to follow. In each sentence, 
        
        let modelResult = await runModel(PROMPT)
        // let modelResult = await runModel(PREPROMPT, PROMPT)
        // let modelResult = await runModel(PROMPTS)
        
        await displayModel(modelResult)
        // await displayModel(resultsArray[0], resultsArray[1])
    }

    async function displayModel(m){
        m = str(m)
        let modelDisplay = p5.createElement("p", "Results:");
        await modelDisplay.html(m)
    }
});


///// MODEL STUFF

// async function runModel(PROMPT){
//     // let MODELNAME = 'distilroberta-base'

//     let unmasker = await fillMask(PROMPT)

//     console.log(unmasker)

//     // let res = unmasker(PROMPT, top_k=5)

//     var modelResult = [unmasker[0].sequence, unmasker[1].sequence, unmasker[2].sequence]

//     return modelResult
// }


// async function runModel(PREPROMPT, PROMPT){
//     // inference API version

//     let INPUT = PREPROMPT + PROMPT

//     // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
//     // let MODELNAME = "openai-community/gpt2"
//     // let MODELNAME = 'mistral_inference'
//     // let MODELNAME = 'Xenova/distilgpt2'
//     let MODELNAME = 'bigscience/bloom-560m'

//     let out = await textGeneration({    
//         accessToken: HF_TOKEN,
//         model: MODELNAME,
//         inputs: INPUT,
//         parameters: {
//             max_new_tokens: 128
//         }
//     });

//     // let out = await inference.textGeneration(INPUT, {
//     //     model: MODELNAME,
//     //     max_new_tokens: 128
//     // })

//     // let out = await inference.textGeneration(INPUT, 'bigscience/bloom-560m')

//     // text-generation-inference
//     // Uncaught (in promise) Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))

//     // let out = await inference.textGeneration({    
//     //     accessToken: HF_TOKEN,
//     //     model: MODELNAME,
//     //     messages: [{ 
//     //                 role: "system", 
//     //                 content: PREPROMPT
//     //             },{
//     //                 role: "user",
//     //                 content: PROMPT
//     //             }],
//     //     max_new_tokens: 128
//     // });

//     console.log(out)

//     console.log(out.token.text, out.generated_text)

//     // modelResult = await out.messages[0].content

//     // var modelResult = await out.choices[0].message.content
//     var modelResult = await out[0].generated_text
//     console.log(modelResult);

//     return modelResult
// }


    //inference.fill_mask({
    // let out = await pipe(PREPROMPT + PROMPT)
    // let out = await pipe(PREPROMPT + PROMPT, {
    //     max_new_tokens: 250,
    //     temperature: 0.9,
    //     // return_full_text: False,
    //     repetition_penalty: 1.5,
    //     // no_repeat_ngram_size: 2,
    //     // num_beams: 2,
    //     num_return_sequences: 1
    // });



// var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
// /// this needs to run on button click, use string variables to blank in the form
// var PROMPT = promptInput.value()


// var blanksArray = ["mother", "father", "sister", "brother"]
// // for num of blanks put in list

//Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))





// async function runModel(PREPROMPT, PROMPT){
//     // // pipeline version

//     // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
//     let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"

//     // HF_TOKEN

//     // 'meta-llama/Meta-Llama-3-70B-Instruct'
//     // 'openai-community/gpt2'
//     // 'Xenova/gpt-3.5-turbo'
//     // , 'Xenova/distilgpt2'
//     // 'mistralai/Mistral-7B-Instruct-v0.2'
//     // 'HuggingFaceH4/zephyr-7b-beta'

//     // pipeline/transformers version
//     let pipe = await pipeline('text-generation', {
//         model: MODELNAME,
//         accessToken: HF_TOKEN
//     });
//     // seems to work with default model distilgpt2 ugh
    
    
//     // let out = await pipe(inputText, {
//     //     max_tokens: 250,
//     //     return_full_text: false
//     //     // repetition_penalty: 1.5,
//     //     // num_return_sequences: 1 //must be 1 for greedy search
//     // })

//     // let inputText = PREPROMPT + PROMPT

//     // let out = await pipe(inputText)

//     let out = await pipe({    
//         messages: [{ 
//                 role: "system", 
//                 content: PREPROMPT
//             },{
//                 role: "user",
//                 content: PROMPT
//             }],
//         max_new_tokens: 100
//     });
    
//     console.log(out)
    
//     var modelResult = await out.choices[0].message.content
//     // var modelResult = await out[0].generated_text
//     console.log(modelResult)

//     return modelResult

// }


// async function runModel(PROMPTS){
async function runModel(PROMPT){

    let MODELNAME = "bert-base-uncased"
    // let MODELNAME = 'distilroberta-base'

    let unmasker = await pipeline('fill-mask', MODELNAME)

    let res = await unmasker(PROMPT)
    // , top_k=5

    console.log(res[0].sequence, res[0].token_str, res[1].sequence, res[1].token_str)

    var modelResult = await res

    return modelResult

    // for (let p in PROMPTS){
    //     var res = unmasker(p)
    //     console.log(res)

    //     var modelResult = res[0].token_str
    //     console.log(modelResult)

    //     resultsArray.push(modelResult)
    // }
    // return resultsArray
}

async function textGenTask(input){
    console.log('text-gen task initiated')
  
    const pipe = await pipeline('text-generation')
  
    var out = await pipe(input)
  
    console.log(await out)
    console.log('text-gen task completed')
  
    // parsing of output
    await out.forEach(o => {
      console.log(o)
      OUTPUT_LIST.push(o.generated_text)
    })
  
    console.log(OUTPUT_LIST)
    console.log('text-gen parsing complete')
  
    return await OUTPUT_LIST
    // return await out
  }