Spaces:
Running
Running
sarahciston
commited on
Commit
•
a083558
1
Parent(s):
84e05b3
try new model with simple no template
Browse files
sketch.js
CHANGED
@@ -1,56 +1,50 @@
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
-
import { pipeline, env
|
4 |
-
// AutoTokenizer
|
5 |
|
6 |
// skip local model check
|
7 |
env.allowLocalModels = false;
|
8 |
|
9 |
// GLOBAL VARIABLES
|
10 |
-
let PROMPT_INPUT = `The woman has a job as a [MASK] and likes to
|
11 |
let pField
|
12 |
-
let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill in any [MASK]
|
13 |
|
14 |
// RUN TEXT-GEN MODEL
|
15 |
|
16 |
-
async function textGenTask(
|
17 |
console.log('text-gen task initiated')
|
18 |
|
|
|
|
|
|
|
19 |
let MODEL = 'Xenova/bloomz-560m'
|
20 |
// const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
|
21 |
|
22 |
const pipe = await pipeline('text-generation', MODEL)
|
23 |
-
let tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
24 |
-
|
25 |
-
const messages = [
|
26 |
-
{"role": "system", "content": PREPROMPT},
|
27 |
-
{"role": "user", "content": input}
|
28 |
-
]
|
29 |
-
|
30 |
-
const prompt = tokenizer.apply_chat_template(messages, { tokenize: false });
|
31 |
-
// const prompt = pipe.tokenizer.apply_chat_template(messages, { tokenize: false });
|
32 |
|
33 |
-
//
|
34 |
-
var out = await pipe(
|
35 |
max_new_tokens: 256,
|
36 |
temperature: 0.7,
|
37 |
do_sample: true,
|
38 |
top_k: 50,
|
39 |
-
})
|
40 |
|
41 |
console.log(await out)
|
42 |
console.log('text-gen task completed')
|
43 |
|
44 |
-
//
|
45 |
|
46 |
let OUTPUT_LIST = [] // a blank array to store the results from the model
|
47 |
-
|
48 |
// parsing of output
|
49 |
await out.forEach(o => {
|
50 |
console.log(o)
|
51 |
OUTPUT_LIST.push(o.generated_text)
|
52 |
})
|
53 |
|
|
|
54 |
// await out.choices.forEach(o => {
|
55 |
// console.log(o)
|
56 |
// OUTPUT_LIST.push(o.message.content)
|
@@ -145,7 +139,7 @@ new p5(function (p5){
|
|
145 |
}
|
146 |
|
147 |
async function displayResults(){
|
148 |
-
console.log('
|
149 |
|
150 |
PROMPT_INPUT = pField.value() // updates prompt if it's changed
|
151 |
console.log("latest prompt: ", PROMPT_INPUT)
|
@@ -154,8 +148,8 @@ new p5(function (p5){
|
|
154 |
// let outs = await getOutputs(fillIn)
|
155 |
|
156 |
// call the function that runs the model for the task of your choice here
|
157 |
-
// make sure to use the PROMPT_INPUT as a parameter
|
158 |
-
let outs = await textGenTask(PROMPT_INPUT)
|
159 |
console.log(outs)
|
160 |
|
161 |
let outText = p5.createP('')
|
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
+
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
|
|
|
4 |
|
5 |
// skip local model check
|
6 |
env.allowLocalModels = false;
|
7 |
|
8 |
// GLOBAL VARIABLES
|
9 |
+
let PROMPT_INPUT = `The woman has a job as a [MASK] and likes to...` // a field for writing or changing a text value
|
10 |
let pField
|
11 |
+
let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill in any [MASK]: `
|
12 |
|
13 |
// RUN TEXT-GEN MODEL
|
14 |
|
15 |
+
async function textGenTask(pre, prompt){
|
16 |
console.log('text-gen task initiated')
|
17 |
|
18 |
+
let INPUT = pre + prompt
|
19 |
+
|
20 |
+
// PICK MODEL
|
21 |
let MODEL = 'Xenova/bloomz-560m'
|
22 |
// const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
|
23 |
|
24 |
const pipe = await pipeline('text-generation', MODEL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
// RUN INPUT THROUGH MODEL, setting hyperparameters
|
27 |
+
var out = await pipe(INPUT, {
|
28 |
max_new_tokens: 256,
|
29 |
temperature: 0.7,
|
30 |
do_sample: true,
|
31 |
top_k: 50,
|
32 |
+
})
|
33 |
|
34 |
console.log(await out)
|
35 |
console.log('text-gen task completed')
|
36 |
|
37 |
+
// PARSE RESULTS as a list of outputs, two different ways depending on the model
|
38 |
|
39 |
let OUTPUT_LIST = [] // a blank array to store the results from the model
|
40 |
+
|
41 |
// parsing of output
|
42 |
await out.forEach(o => {
|
43 |
console.log(o)
|
44 |
OUTPUT_LIST.push(o.generated_text)
|
45 |
})
|
46 |
|
47 |
+
// alternate format for parsing, for chat model type
|
48 |
// await out.choices.forEach(o => {
|
49 |
// console.log(o)
|
50 |
// OUTPUT_LIST.push(o.message.content)
|
|
|
139 |
}
|
140 |
|
141 |
async function displayResults(){
|
142 |
+
console.log('submitButton pressed')
|
143 |
|
144 |
PROMPT_INPUT = pField.value() // updates prompt if it's changed
|
145 |
console.log("latest prompt: ", PROMPT_INPUT)
|
|
|
148 |
// let outs = await getOutputs(fillIn)
|
149 |
|
150 |
// call the function that runs the model for the task of your choice here
|
151 |
+
// make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task
|
152 |
+
let outs = await textGenTask(PREPROMPT, PROMPT_INPUT)
|
153 |
console.log(outs)
|
154 |
|
155 |
let outText = p5.createP('')
|