Sarah Ciston
commited on
Commit
·
d4c0968
1
Parent(s):
4af8f9e
try pipeline with auth
Browse files
sketch.js
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
// connect to API via module
|
2 |
|
3 |
// import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
4 |
-
|
5 |
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
|
6 |
|
|
|
|
|
|
|
7 |
const oauthResult = await oauthHandleRedirectIfPresent();
|
8 |
|
9 |
if (!oauthResult) {
|
@@ -17,9 +20,8 @@ const HFAUTH = oauthResult.accessToken
|
|
17 |
console.log(HFAUTH)
|
18 |
|
19 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
|
20 |
-
// import { HfInference } from 'https://
|
21 |
-
|
22 |
-
const inference = new HfInference(HFAUTH);
|
23 |
|
24 |
// PIPELINE MODELS
|
25 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
@@ -173,35 +175,35 @@ new p5(function (p5) {
|
|
173 |
///// MODEL STUFF
|
174 |
|
175 |
|
176 |
-
async function runModel(PREPROMPT, PROMPT){
|
177 |
-
|
178 |
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
|
195 |
-
|
196 |
|
197 |
-
|
198 |
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
|
203 |
-
|
204 |
-
}
|
205 |
|
206 |
|
207 |
//inference.fill_mask({
|
@@ -232,50 +234,56 @@ async function runModel(PREPROMPT, PROMPT){
|
|
232 |
|
233 |
|
234 |
|
235 |
-
|
236 |
-
//
|
|
|
|
|
|
|
237 |
|
238 |
-
//
|
239 |
|
240 |
-
//
|
241 |
-
//
|
242 |
-
//
|
243 |
-
//
|
244 |
-
//
|
245 |
-
//
|
246 |
|
247 |
-
//
|
248 |
-
|
249 |
-
|
|
|
|
|
250 |
|
251 |
|
252 |
-
//
|
253 |
-
//
|
254 |
-
//
|
255 |
-
// //
|
256 |
-
// //
|
257 |
-
//
|
258 |
|
259 |
-
//
|
260 |
|
261 |
-
//
|
262 |
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
//
|
|
|
278 |
|
279 |
-
|
280 |
|
281 |
-
|
|
|
1 |
// connect to API via module
|
2 |
|
3 |
// import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
4 |
+
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
5 |
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
|
6 |
|
7 |
+
|
8 |
+
|
9 |
+
/// AUTHORIZATION
|
10 |
const oauthResult = await oauthHandleRedirectIfPresent();
|
11 |
|
12 |
if (!oauthResult) {
|
|
|
20 |
console.log(HFAUTH)
|
21 |
|
22 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
|
23 |
+
// import { HfInference } from 'https://esm.sh/@huggingface/inference';
|
24 |
+
// const inference = new HfInference(HFAUTH);
|
|
|
25 |
|
26 |
// PIPELINE MODELS
|
27 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
|
|
175 |
///// MODEL STUFF
|
176 |
|
177 |
|
178 |
+
// async function runModel(PREPROMPT, PROMPT){
|
179 |
+
// // inference API version
|
180 |
|
181 |
+
// let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
|
182 |
+
// // let MODELNAME = "openai-community/gpt2"
|
183 |
+
// // let MODELNAME = 'mistral_inference'
|
184 |
|
185 |
+
// let out = await inference.textGeneration({
|
186 |
+
// model: MODELNAME,
|
187 |
+
// messages: [{
|
188 |
+
// role: "system",
|
189 |
+
// content: PREPROMPT
|
190 |
+
// },{
|
191 |
+
// role: "user",
|
192 |
+
// content: PROMPT
|
193 |
+
// }],
|
194 |
+
// max_new_tokens: 150
|
195 |
+
// });
|
196 |
|
197 |
+
// console.log(out)
|
198 |
|
199 |
+
// // modelResult = await out.messages[0].content
|
200 |
|
201 |
+
// var modelResult = await out.choices[0].message.content
|
202 |
+
// // var modelResult = await out[0].generated_text
|
203 |
+
// console.log(modelResult);
|
204 |
|
205 |
+
// return modelResult
|
206 |
+
// }
|
207 |
|
208 |
|
209 |
//inference.fill_mask({
|
|
|
234 |
|
235 |
|
236 |
|
237 |
+
async function runModel(PREPROMPT, PROMPT){
|
238 |
+
// // Chat completion API
|
239 |
+
|
240 |
+
// let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
|
241 |
+
let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
|
242 |
|
243 |
+
// HFAUTH
|
244 |
|
245 |
+
// 'meta-llama/Meta-Llama-3-70B-Instruct'
|
246 |
+
// 'openai-community/gpt2'
|
247 |
+
// 'Xenova/gpt-3.5-turbo'
|
248 |
+
// , 'Xenova/distilgpt2'
|
249 |
+
// 'mistralai/Mistral-7B-Instruct-v0.2'
|
250 |
+
// 'HuggingFaceH4/zephyr-7b-beta'
|
251 |
|
252 |
+
// pipeline/transformers version
|
253 |
+
let pipe = await pipeline('text-generation', {
|
254 |
+
model: MODELNAME
|
255 |
+
});
|
256 |
+
// seems to work with default model distilgpt2 ugh
|
257 |
|
258 |
|
259 |
+
// let out = await pipe(inputText, {
|
260 |
+
// max_tokens: 250,
|
261 |
+
// return_full_text: false
|
262 |
+
// // repetition_penalty: 1.5,
|
263 |
+
// // num_return_sequences: 1 //must be 1 for greedy search
|
264 |
+
// })
|
265 |
|
266 |
+
// let inputText = PREPROMPT + PROMPT
|
267 |
|
268 |
+
// let out = await pipe(inputText)
|
269 |
|
270 |
+
let out = await pipe({
|
271 |
+
messages: [{
|
272 |
+
role: "system",
|
273 |
+
content: PREPROMPT
|
274 |
+
},{
|
275 |
+
role: "user",
|
276 |
+
content: PROMPT
|
277 |
+
}],
|
278 |
+
max_new_tokens: 100
|
279 |
+
});
|
280 |
|
281 |
+
console.log(out)
|
282 |
+
|
283 |
+
var modelResult = await out.choices[0].message.content
|
284 |
+
// var modelResult = await out[0].generated_text
|
285 |
+
console.log(modelResult)
|
286 |
|
287 |
+
return modelResult
|
288 |
|
289 |
+
}
|