File size: 8,568 Bytes
37c8ccd
1ee4720
37c8ccd
 
 
 
 
c0412f7
aa77cec
 
3d91a45
 
0ed7c61
1ee4720
581179a
 
0bb49c6
 
b232be7
581179a
37c8ccd
 
 
 
1a66e6f
aa77cec
1ee4720
37c8ccd
94d8833
1ee4720
581179a
 
37c8ccd
581179a
37c8ccd
581179a
 
 
 
 
 
 
 
 
 
 
2897982
581179a
 
47d8484
581179a
 
2897982
581179a
1a66e6f
 
840cbc5
1a66e6f
581179a
 
1a66e6f
47d8484
1a66e6f
47d8484
2897982
1a66e6f
 
581179a
 
 
 
47d8484
 
1a66e6f
47d8484
 
 
 
 
 
 
 
 
 
 
 
 
 
0bb49c6
16186bd
44e71d7
47d8484
 
 
 
0bb49c6
44e71d7
16186bd
47d8484
581179a
47d8484
581179a
 
 
 
 
47d8484
 
 
 
 
 
0bb49c6
47d8484
 
 
 
1a66e6f
47d8484
 
 
94d8833
 
28962fb
47d8484
44e71d7
0bb49c6
dcfef54
 
0bb49c6
dcfef54
e8e2063
28962fb
0bb49c6
28962fb
0bb49c6
07cff5b
0bb49c6
1bf613a
9a92806
1bf613a
 
283a922
9a92806
283a922
e19f5d0
581179a
1bf613a
 
 
 
 
 
 
 
47d8484
581179a
 
 
 
 
 
 
94d8833
581179a
94d8833
581179a
 
 
 
 
 
 
87f1324
 
581179a
87f1324
877944c
0a0456c
877944c
 
 
dcfef54
877944c
 
 
 
 
 
630ac98
877944c
87f1324
 
 
 
 
0a0456c
87f1324
1bf613a
87f1324
581179a
37c8ccd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630ac98
62c2dcb
28962fb
 
 
 
 
 
 
 
 
 
 
1ee4720
3d91a45
87f1324
 
 
1ee4720
 
87f1324
 
 
1ee4720
87f1324
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
// connect to API via module

import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
// const inference = new HfInference();

// PIPELINE MODELS
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
// list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'


// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;

///////// VARIABLES 

// establish global variables to reference later
var promptInput
var blanksArray = []

// pick a model (see list of models)
// INFERENCE MODELS
// let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2";
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")

// const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');




///// p5 STUFF

// create an instance of the p5 class as a workspace for all your p5.js code

new p5(function (p5) {
    p5.setup = function(){
        console.log('p5 loaded')
        p5.noCanvas()
        makeInterface()
    }

    p5.draw = function(){
        //
    }

    window.onload = function(){
        console.log('dom and js loaded')
    }

    let fieldsDiv = document.querySelector("#blanks")

    function makeInterface(){
        console.log('reached makeInterface')
        let title = p5.createElement('h1', 'p5.js Critical AI Prompt Battle')
        // title.position(0,50)

        p5.createElement('p',`This tool lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [XXX][TO-DO]`)
            // .position(0,100)

        promptInput = p5.createInput("")
        // promptInput.position(0,160)
        promptInput.size(600);
        promptInput.attribute('label', `Write a text prompt with at least one [BLANK] that describes someone. You can also write [FILL] where you want the bot to fill in a word on its own.`)
        promptInput.value(`The [BLANK] works as a [FILL] but wishes for...`)
        promptInput.addClass("prompt")
        p5.createP(promptInput.attribute('label'))
            // .position(0,100)

        //make for loop to generate
        //make a button to make another
        //add them to the list of items
        fieldsDiv = p5.createDiv()
        fieldsDiv.id('fieldsDiv')
        // fieldsDiv.position(0,250)

        // initial code to make a single field
        // blankA = p5.createInput("");
        // blankA.position(0, 240);
        // blankA.size(300);
        // blankA.addClass("blank")
        // blankA.parent('#fieldsDiv')

        // function to generate a single BLANK form field instead
        addField()

        // // BUTTONS // // 

        // send prompt to model
        let submitButton = p5.createButton("SUBMIT")
        // submitButton.position(0,500)
        submitButton.size(170)
        submitButton.class('submit'); 
        submitButton.mousePressed(getInputs)

        // add more blanks to fill in
        let addButton = p5.createButton("more blanks")
        addButton.size(170)
        // addButton.position(220,500)
        addButton.mousePressed(addField)

        // TO-DO a model drop down list?

        // describe(``)
        // TO-DO alt-text description
    }

    function addField(){
        let f = p5.createInput("")
        f.class("blank")
        f.parent("#fieldsDiv")

        // DOES THIS WORK???????????????????
        blanksArray.push(f)
        console.log("made field")
        
        // Cap the number of fields, avoids token limit in prompt
        let blanks = document.querySelectorAll(".blank")
        if (blanks.length > 7){
            console.log(blanks.length)
            addButton.style('visibility','hidden')
        }
    }

    async function getInputs(){
        // Map the list of blanks text values to a new list
        let BLANKSVALUES = blanksArray.map(i => i.value())
        console.log(BLANKSVALUES)
        
        // Do model stuff in this function instead of in general
        let PROMPT = promptInput.value() // updated check of the prompt field
        
        // BLANKS = inputValues // get ready to feed array list into model

        let PREPROMPT = `Please return an array of sentences. In each sentence, fill in the [BLANK] in the following sentence with each word I provide in the array ${BLANKSVALUES}. Replace any [FILL] with an appropriate word of your choice.` 

        // we pass PROMPT and PREPROMPT to the model function, don't need to pass BLANKSVALUES bc it's passed into the PREPROMPT already here

        let modelResult = await runModel(PREPROMPT, PROMPT)

        await displayModel(modelResult)
    }

    async function displayModel(m){
        modelDisplay = p5.createElement("p", "Results:");
        await modelDisplay.html(m)
    }

    // async function showResults(){
    //     modelDisplay = p5.createElement("p", "Results:");
    //     // modelDisplay.position(0, 380);
    //         setTimeout(() => {
    //             modelDisplay.html(modelResult)
    //     }, 2000);
    // }

    // var modelResult = submitButton.mousePressed(runModel) = function(){
    //     // listens for the button to be clicked
    //     // run the prompt through the model here
    //     // modelResult = runModel()
    //     // return modelResult
    //     runModel()
    // }

    // function makeblank(i){
    //     i = p5.createInput("");
    //     i.position(0, 300); //append to last blank and move buttons down
    //     i.size(200);
    //   }    
});


///// MODEL STUFF

async function runModel(PREPROMPT, PROMPT){
    // // Chat completion API

    // pipeline/transformers version TEST
    let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');

    // 'meta-llama/Meta-Llama-3-70B-Instruct'
    // 'openai-community/gpt2'
    // 'Xenova/gpt-3.5-turbo'

    out = await pipe((PREPROMPT, PROMPT), {
        max_tokens: 250,
        return_full_text: false,
        repetition_penalty: 1.5,
        num_return_sequences: 2
    })

    // out = await pipe((PREPROMPT, PROMPT))
    
    console.log(out)

    var modelResult = await out.generated_text
    console.log(modelResult)

    return modelResult

}


    // inference API version, not working in spaces
    // const out = await inference.chatCompletion({    
    //     model: MODELNAME,
    //     messages: [{ role: "user", content: PREPROMPT + PROMPT }],
    //     max_tokens: 100
    // });

    // console.log(out)

    // // modelResult = await out.messages[0].content

    // var modelResult = await out.choices[0].message.content
    // // var modelResult = await out[0].generated_text
    // console.log(modelResult);

    // return modelResult



    //inference.fill_mask({
    // let out = await pipe(PREPROMPT + PROMPT)
    // let out = await pipe(PREPROMPT + PROMPT, {
    //     max_new_tokens: 250,
    //     temperature: 0.9,
    //     // return_full_text: False,
    //     repetition_penalty: 1.5,
    //     // no_repeat_ngram_size: 2,
    //     // num_beams: 2,
    //     num_return_sequences: 1
    // });



// var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
// /// this needs to run on button click, use string variables to blank in the form
// var PROMPT = promptInput.value()


// var blanksArray = ["mother", "father", "sister", "brother"]
// // for num of blanks put in list
// var blanksArray = [`${blankAResult}`, `${blankBResult}`, `${blankCResult}`]

//Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))