Sarah Ciston commited on
Commit
d4c0968
·
1 Parent(s): 4af8f9e

try pipeline with auth

Browse files
Files changed (1) hide show
  1. sketch.js +71 -63
sketch.js CHANGED
@@ -1,9 +1,12 @@
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
- // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
  import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
6
 
 
 
 
7
  const oauthResult = await oauthHandleRedirectIfPresent();
8
 
9
  if (!oauthResult) {
@@ -17,9 +20,8 @@ const HFAUTH = oauthResult.accessToken
17
  console.log(HFAUTH)
18
 
19
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
20
- // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
21
- import { HfInference } from 'https://esm.sh/@huggingface/inference';
22
- const inference = new HfInference(HFAUTH);
23
 
24
  // PIPELINE MODELS
25
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
@@ -173,35 +175,35 @@ new p5(function (p5) {
173
  ///// MODEL STUFF
174
 
175
 
176
- async function runModel(PREPROMPT, PROMPT){
177
- // inference API version
178
 
179
- let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
180
- // let MODELNAME = "openai-community/gpt2"
181
- // let MODELNAME = 'mistral_inference'
182
 
183
- let out = await inference.textGeneration({
184
- model: MODELNAME,
185
- messages: [{
186
- role: "system",
187
- content: PREPROMPT
188
- },{
189
- role: "user",
190
- content: PROMPT
191
- }],
192
- max_new_tokens: 150
193
- });
194
 
195
- console.log(out)
196
 
197
- // modelResult = await out.messages[0].content
198
 
199
- var modelResult = await out.choices[0].message.content
200
- // var modelResult = await out[0].generated_text
201
- console.log(modelResult);
202
 
203
- return modelResult
204
- }
205
 
206
 
207
  //inference.fill_mask({
@@ -232,50 +234,56 @@ async function runModel(PREPROMPT, PROMPT){
232
 
233
 
234
 
235
- // async function runModel(PREPROMPT, PROMPT){
236
- // // // Chat completion API
 
 
 
237
 
238
- // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
239
 
240
- // // 'meta-llama/Meta-Llama-3-70B-Instruct'
241
- // // 'openai-community/gpt2'
242
- // // 'Xenova/gpt-3.5-turbo'
243
- // // , 'Xenova/distilgpt2'
244
- // // 'mistralai/Mistral-7B-Instruct-v0.2'
245
- // // 'HuggingFaceH4/zephyr-7b-beta'
246
 
247
- // // pipeline/transformers version
248
- // let pipe = await pipeline('text-generation', MODELNAME);
249
- // // seems to work with default model distilgpt2 ugh
 
 
250
 
251
 
252
- // // let out = await pipe(inputText, {
253
- // // max_tokens: 250,
254
- // // return_full_text: false
255
- // // // repetition_penalty: 1.5,
256
- // // // num_return_sequences: 1 //must be 1 for greedy search
257
- // // })
258
 
259
- // // let inputText = PREPROMPT + PROMPT
260
 
261
- // // let out = await pipe(inputText)
262
 
263
- // let out = await pipe({
264
- // messages: [{
265
- // role: "system",
266
- // content: PREPROMPT
267
- // },{
268
- // role: "user",
269
- // content: PROMPT
270
- // }],
271
- // max_new_tokens: 100
272
- // });
273
 
274
- // console.log(out)
275
-
276
- // var modelResult = await out[0].generated_text
277
- // console.log(modelResult)
 
278
 
279
- // return modelResult
280
 
281
- // }
 
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
+ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
  import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
6
 
7
+
8
+
9
+ /// AUTHORIZATION
10
  const oauthResult = await oauthHandleRedirectIfPresent();
11
 
12
  if (!oauthResult) {
 
20
  console.log(HFAUTH)
21
 
22
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
23
+ // import { HfInference } from 'https://esm.sh/@huggingface/inference';
24
+ // const inference = new HfInference(HFAUTH);
 
25
 
26
  // PIPELINE MODELS
27
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
 
175
  ///// MODEL STUFF
176
 
177
 
178
+ // async function runModel(PREPROMPT, PROMPT){
179
+ // // inference API version
180
 
181
+ // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
182
+ // // let MODELNAME = "openai-community/gpt2"
183
+ // // let MODELNAME = 'mistral_inference'
184
 
185
+ // let out = await inference.textGeneration({
186
+ // model: MODELNAME,
187
+ // messages: [{
188
+ // role: "system",
189
+ // content: PREPROMPT
190
+ // },{
191
+ // role: "user",
192
+ // content: PROMPT
193
+ // }],
194
+ // max_new_tokens: 150
195
+ // });
196
 
197
+ // console.log(out)
198
 
199
+ // // modelResult = await out.messages[0].content
200
 
201
+ // var modelResult = await out.choices[0].message.content
202
+ // // var modelResult = await out[0].generated_text
203
+ // console.log(modelResult);
204
 
205
+ // return modelResult
206
+ // }
207
 
208
 
209
  //inference.fill_mask({
 
234
 
235
 
236
 
237
+ async function runModel(PREPROMPT, PROMPT){
238
+ // // Chat completion API
239
+
240
+ // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
241
+ let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
242
 
243
+ // HFAUTH
244
 
245
+ // 'meta-llama/Meta-Llama-3-70B-Instruct'
246
+ // 'openai-community/gpt2'
247
+ // 'Xenova/gpt-3.5-turbo'
248
+ // , 'Xenova/distilgpt2'
249
+ // 'mistralai/Mistral-7B-Instruct-v0.2'
250
+ // 'HuggingFaceH4/zephyr-7b-beta'
251
 
252
+ // pipeline/transformers version
253
+ let pipe = await pipeline('text-generation', {
254
+ model: MODELNAME
255
+ });
256
+ // seems to work with default model distilgpt2 ugh
257
 
258
 
259
+ // let out = await pipe(inputText, {
260
+ // max_tokens: 250,
261
+ // return_full_text: false
262
+ // // repetition_penalty: 1.5,
263
+ // // num_return_sequences: 1 //must be 1 for greedy search
264
+ // })
265
 
266
+ // let inputText = PREPROMPT + PROMPT
267
 
268
+ // let out = await pipe(inputText)
269
 
270
+ let out = await pipe({
271
+ messages: [{
272
+ role: "system",
273
+ content: PREPROMPT
274
+ },{
275
+ role: "user",
276
+ content: PROMPT
277
+ }],
278
+ max_new_tokens: 100
279
+ });
280
 
281
+ console.log(out)
282
+
283
+ var modelResult = await out.choices[0].message.content
284
+ // var modelResult = await out[0].generated_text
285
+ console.log(modelResult)
286
 
287
+ return modelResult
288
 
289
+ }