Sarah Ciston commited on
Commit
e871332
1 Parent(s): 812da65

rewind to basic model, stash backup

Browse files
Files changed (2) hide show
  1. sketch.js +12 -36
  2. sketch.js-backup +256 -0
sketch.js CHANGED
@@ -34,7 +34,7 @@ var blanksArray = []
34
 
35
  new p5(function (p5) {
36
  p5.setup = function(){
37
- console.log('p5 instance loaded')
38
  p5.noCanvas()
39
  makeInterface()
40
  }
@@ -43,9 +43,9 @@ new p5(function (p5) {
43
  //
44
  }
45
 
46
- // window.onload = function(){
47
- // console.log('p5 instance loaded')
48
- // }
49
 
50
  let fieldsDiv = document.querySelector("#blanks")
51
 
@@ -158,53 +158,29 @@ new p5(function (p5) {
158
  async function runModel(PREPROMPT, PROMPT){
159
  // // Chat completion API
160
 
 
 
161
  // pipeline/transformers version
162
- // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
163
  // seems to work with default model distilgpt2 ugh
164
 
165
-
166
- // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
167
-
168
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
169
  // 'openai-community/gpt2'
170
  // 'Xenova/gpt-3.5-turbo'
171
  // , 'Xenova/distilgpt2'
172
 
173
- // let res = await pipe(inputText, {
174
  // max_tokens: 250,
175
  // return_full_text: false
176
  // repetition_penalty: 1.5,
177
  // num_return_sequences: 1 //must be 1 for greedy search
178
  // })
179
 
180
- // let generator = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
181
-
182
- let MESSAGES = PREPROMPT + PROMPT
183
- // for zephyr customizing
184
- // let MESSAGES = [
185
- // {
186
- // "role": "system",
187
- // "content": PREPROMPT
188
- // },{
189
- // "role": "user",
190
- // "content": PROMPT
191
- // }
192
- // ]
193
-
194
- // let res = await pipe(MESSAGES, {
195
- // max_new_tokens: 150,
196
- // temperature: 0.7,
197
- // top_k: 50,
198
- // top_p: 0.95
199
- // });
200
- let generator = pipeline('text-generation', 'Xenova/distilgpt2')
201
-
202
- let res = await generator(MESSAGES)
203
 
204
- console.log(res)
205
 
206
- var modelResult = await res[0].generated_text
207
- // var modelResult = await res[0].generated_text[0].content
208
  console.log(modelResult)
209
 
210
  return modelResult
@@ -243,7 +219,7 @@ async function runModel(PREPROMPT, PROMPT){
243
  // num_return_sequences: 1
244
  // });
245
 
246
- // Must be one of [text-classification,token-classification,question-answering,fill-mask,summarization,translation,text2text-generation,text-generation,zero-shot-classification,audio-classification,zero-shot-audio-classification,automatic-speech-recognition,text-to-audio,image-to-text,image-classification,image-segmentation,zero-shot-image-classification,object-detection,zero-shot-object-detection,document-question-answering,image-to-image,depth-estimation,feature-extraction]
247
 
248
  // var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
249
  // /// this needs to run on button click, use string variables to blank in the form
 
34
 
35
  new p5(function (p5) {
36
  p5.setup = function(){
37
+ console.log('p5 loaded')
38
  p5.noCanvas()
39
  makeInterface()
40
  }
 
43
  //
44
  }
45
 
46
+ window.onload = function(){
47
+ console.log('dom and js loaded')
48
+ }
49
 
50
  let fieldsDiv = document.querySelector("#blanks")
51
 
 
158
  async function runModel(PREPROMPT, PROMPT){
159
  // // Chat completion API
160
 
161
+ let inputText = PREPROMPT + PROMPT
162
+
163
  // pipeline/transformers version
164
+ let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
165
  // seems to work with default model distilgpt2 ugh
166
 
 
 
 
167
  // 'meta-llama/Meta-Llama-3-70B-Instruct'
168
  // 'openai-community/gpt2'
169
  // 'Xenova/gpt-3.5-turbo'
170
  // , 'Xenova/distilgpt2'
171
 
172
+ // let out = await pipe(inputText, {
173
  // max_tokens: 250,
174
  // return_full_text: false
175
  // repetition_penalty: 1.5,
176
  // num_return_sequences: 1 //must be 1 for greedy search
177
  // })
178
 
179
+ let out = await pipe(inputText)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
+ console.log(out)
182
 
183
+ var modelResult = await out[0].generated_text
 
184
  console.log(modelResult)
185
 
186
  return modelResult
 
219
  // num_return_sequences: 1
220
  // });
221
 
222
+
223
 
224
  // var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
225
  // /// this needs to run on button click, use string variables to blank in the form
sketch.js-backup ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // connect to API via module
2
+
3
+ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
4
+ // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
5
+ // const inference = new HfInference();
6
+
7
+ // PIPELINE MODELS
8
+ // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
9
+ // list of models by task: 'https://huggingface.co/docs/transformers.js/index#supported-tasksmodels'
10
+
11
+
12
+ // Since we will download the model from the Hugging Face Hub, we can skip the local model check
13
+ env.allowLocalModels = false;
14
+
15
+ ///////// VARIABLES
16
+
17
+ // establish global variables to reference later
18
+ var promptInput
19
+ var blanksArray = []
20
+
21
+ // pick a model (see list of models)
22
+ // INFERENCE MODELS
23
+ // let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2";
24
+ // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', "meta-llama/Meta-Llama-3-70B-Instruct", 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2', "meta-llama/Meta-Llama-3-70B-Instruct")
25
+
26
+ // const detector = await pipeline('text-generation', 'meta-llama/Meta-Llama-3-8B', 'Xenova/LaMini-Flan-T5-783M');
27
+
28
+
29
+
30
+
31
+ ///// p5 STUFF
32
+
33
+ // create an instance of the p5 class as a workspace for all your p5.js code
34
+
35
+ new p5(function (p5) {
36
+ p5.setup = function(){
37
+ console.log('p5 instance loaded')
38
+ p5.noCanvas()
39
+ makeInterface()
40
+ }
41
+
42
+ p5.draw = function(){
43
+ //
44
+ }
45
+
46
+ // window.onload = function(){
47
+ // console.log('p5 instance loaded')
48
+ // }
49
+
50
+ let fieldsDiv = document.querySelector("#blanks")
51
+
52
+ function makeInterface(){
53
+ console.log('reached makeInterface')
54
+ let title = p5.createElement('h1', 'p5.js Critical AI Prompt Battle')
55
+ // title.position(0,50)
56
+
57
+ p5.createElement('p',`This tool lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
58
+ // .position(0,100)
59
+
60
+ promptInput = p5.createInput("")
61
+ // promptInput.position(0,160)
62
+ promptInput.size(600);
63
+ promptInput.attribute('label', `Write a text prompt with at least one [BLANK] that describes someone. You can also write [FILL] where you want the bot to fill in a word on its own.`)
64
+ promptInput.value(`The [BLANK] works as a [FILL] but wishes for...`)
65
+ promptInput.addClass("prompt")
66
+ p5.createP(promptInput.attribute('label'))
67
+ // .position(0,100)
68
+
69
+ //make for loop to generate
70
+ //make a button to make another
71
+ //add them to the list of items
72
+ fieldsDiv = p5.createDiv()
73
+ fieldsDiv.id('fieldsDiv')
74
+ // fieldsDiv.position(0,250)
75
+
76
+ // initial code to make a single field
77
+ // blankA = p5.createInput("");
78
+ // blankA.position(0, 240);
79
+ // blankA.size(300);
80
+ // blankA.addClass("blank")
81
+ // blankA.parent('#fieldsDiv')
82
+
83
+ // function to generate a single BLANK form field instead
84
+ addField()
85
+
86
+ // // BUTTONS // //
87
+ // let buttonsDiv = p5.createDiv() // container to organize buttons
88
+ // buttonsDiv.id('buttonsDiv')
89
+
90
+ // send prompt to model
91
+ let submitButton = p5.createButton("SUBMIT")
92
+ // submitButton.position(0,500)
93
+ submitButton.size(170)
94
+ submitButton.class('submit');
95
+ // submitButton.parent('#buttonsDiv')
96
+ submitButton.mousePressed(getInputs)
97
+
98
+ // add more blanks to fill in
99
+ let addButton = p5.createButton("more blanks")
100
+ addButton.size(170)
101
+ // addButton.position(220,500)
102
+ // addButton.parent('#buttonsDiv')
103
+ addButton.mousePressed(addField)
104
+
105
+ // TO-DO a model drop down list?
106
+
107
+ // alt-text description
108
+ // p5.describe(`Pink and black text on a white background with form inputs and two buttons. The text describes a p5.js Critical AI Prompt Battle tool that lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. In the largest form input you can write a prompt to submit. In smaller inputs, you can write variables that will be inserted into that prompt as variations of the prompt when it is run through the model. There is a submit button, a button to add more variations, and when the model is run it adds text at the bottom showing the output results.`)
109
+ }
110
+
111
+ function addField(){
112
+ let f = p5.createInput("")
113
+ f.class("blank")
114
+ f.parent("#fieldsDiv")
115
+
116
+ // DOES THIS WORK???????????????????
117
+ blanksArray.push(f)
118
+ console.log("made field")
119
+
120
+ // Cap the number of fields, avoids token limit in prompt
121
+ let blanks = document.querySelectorAll(".blank")
122
+ if (blanks.length > 7){
123
+ console.log(blanks.length)
124
+ addButton.style('visibility','hidden')
125
+ }
126
+ }
127
+
128
+ async function getInputs(){
129
+ // Map the list of blanks text values to a new list
130
+ let BLANKSVALUES = blanksArray.map(i => i.value())
131
+ console.log(BLANKSVALUES)
132
+
133
+ // Do model stuff in this function instead of in general
134
+ let PROMPT = promptInput.value() // updated check of the prompt field
135
+
136
+ // BLANKS = inputValues // get ready to feed array list into model
137
+
138
+ let PREPROMPT = `In the sentence I provide, please fill in the [BLANK] with each word in the array ${BLANKSVALUES}, replace any [FILL] with a word of your choice. Here is the SAMPLE SENTENCE: `
139
+
140
+ // we pass PROMPT and PREPROMPT to the model function, don't need to pass BLANKSVALUES bc it's passed into the PREPROMPT already here
141
+
142
+ // Please return an array of sentences based on the sample sentence to follow. In each sentence,
143
+
144
+ let modelResult = await runModel(PREPROMPT, PROMPT)
145
+
146
+ await displayModel(modelResult)
147
+ }
148
+
149
+ async function displayModel(m){
150
+ let modelDisplay = p5.createElement("p", "Results:");
151
+ await modelDisplay.html(m)
152
+ }
153
+ });
154
+
155
+
156
+ ///// MODEL STUFF
157
+
158
+ async function runModel(PREPROMPT, PROMPT){
159
+ // // Chat completion API
160
+
161
+ // pipeline/transformers version
162
+ // let pipe = await pipeline('text-generation', 'Xenova/distilgpt2');
163
+ // seems to work with default model distilgpt2 ugh
164
+
165
+
166
+ // IMPORTANT: different models have different input/output structures for their API so look to the samples and references on the specific model page for help :)
167
+
168
+ // 'meta-llama/Meta-Llama-3-70B-Instruct'
169
+ // 'openai-community/gpt2'
170
+ // 'Xenova/gpt-3.5-turbo'
171
+ // , 'Xenova/distilgpt2'
172
+
173
+ // let res = await pipe(inputText, {
174
+ // max_tokens: 250,
175
+ // return_full_text: false
176
+ // repetition_penalty: 1.5,
177
+ // num_return_sequences: 1 //must be 1 for greedy search
178
+ // })
179
+
180
+ // let generator = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
181
+
182
+ let MESSAGES = PREPROMPT + PROMPT
183
+ // for zephyr customizing
184
+ // let MESSAGES = [
185
+ // {
186
+ // "role": "system",
187
+ // "content": PREPROMPT
188
+ // },{
189
+ // "role": "user",
190
+ // "content": PROMPT
191
+ // }
192
+ // ]
193
+
194
+ // let res = await pipe(MESSAGES, {
195
+ // max_new_tokens: 150,
196
+ // temperature: 0.7,
197
+ // top_k: 50,
198
+ // top_p: 0.95
199
+ // });
200
+ let generator = pipeline('text-generation', 'Xenova/distilgpt2')
201
+
202
+ let res = await generator(MESSAGES)
203
+
204
+ console.log(res)
205
+
206
+ var modelResult = await res[0].generated_text
207
+ // var modelResult = await res[0].generated_text[0].content
208
+ console.log(modelResult)
209
+
210
+ return modelResult
211
+
212
+ }
213
+
214
+
215
+ // inference API version, not working in spaces
216
+ // const out = await inference.chatCompletion({
217
+ // model: MODELNAME,
218
+ // messages: [{ role: "user", content: PREPROMPT + PROMPT }],
219
+ // max_tokens: 100
220
+ // });
221
+
222
+ // console.log(out)
223
+
224
+ // // modelResult = await out.messages[0].content
225
+
226
+ // var modelResult = await out.choices[0].message.content
227
+ // // var modelResult = await out[0].generated_text
228
+ // console.log(modelResult);
229
+
230
+ // return modelResult
231
+
232
+
233
+
234
+ //inference.fill_mask({
235
+ // let out = await pipe(PREPROMPT + PROMPT)
236
+ // let out = await pipe(PREPROMPT + PROMPT, {
237
+ // max_new_tokens: 250,
238
+ // temperature: 0.9,
239
+ // // return_full_text: False,
240
+ // repetition_penalty: 1.5,
241
+ // // no_repeat_ngram_size: 2,
242
+ // // num_beams: 2,
243
+ // num_return_sequences: 1
244
+ // });
245
+
246
+ // Must be one of [text-classification,token-classification,question-answering,fill-mask,summarization,translation,text2text-generation,text-generation,zero-shot-classification,audio-classification,zero-shot-audio-classification,automatic-speech-recognition,text-to-audio,image-to-text,image-classification,image-segmentation,zero-shot-image-classification,object-detection,zero-shot-object-detection,document-question-answering,image-to-image,depth-estimation,feature-extraction]
247
+
248
+ // var PROMPT = `The [BLANK] works as a [blank] but wishes for [blank].`
249
+ // /// this needs to run on button click, use string variables to blank in the form
250
+ // var PROMPT = promptInput.value()
251
+
252
+
253
+ // var blanksArray = ["mother", "father", "sister", "brother"]
254
+ // // for num of blanks put in list
255
+
256
+ //Error: Server Xenova/distilgpt2 does not seem to support chat completion. Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))