Candle-CoEdIT-Wasm / utils.js
jbochi's picture
Add more models
964e2fc
raw
history blame
4.15 kB
export async function extractEmbeddings(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
updateStatus,
normalize_embeddings = true
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
normalize_embeddings,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
export async function generateText(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
prompt,
params,
updateStatus
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
prompt,
params,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
const TASKS = {
fluency: {
prefix: "Fix the grammar: ",
max_length: 300,
},
coherence: {
prefix: "Rewrite to make this easier to understand: ",
max_length: 300,
},
simplification: {
prefix: "translate English to Romanian: ",
max_length: 300,
},
simplification: {
prefix: "Paraphrase this: ",
max_length: 300,
},
formalization: {
prefix: "Write this more formally: ",
max_length: 300,
},
neutralize: {
prefix: "Write in a more neutral way: ",
max_length: 300,
},
};
export const MODELS = {
coedit_large_quantized_4k: {
size: "441 MB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model-q4k.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: TASKS,
},
coedit_large_quantized_4_0: {
size: "441 MB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model-q4_0.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: TASKS,
},
coedit_large_quantized_6k: {
size: "643 MB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: TASKS,
},
coedit_xl_quantized_4k: {
size: "1.6 GB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model-xl-q4k.gguf",
tokenizer: "tokenizer.json",
config: "config-xl.json",
tasks: TASKS,
},
coedit_xl_quantized_4_0: {
size: "1.6 GB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model-xl-q4_0.gguf",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: TASKS,
},
coedit_xl_quantized_6k: {
size: "2.34 GB",
base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
model: "model-xl.gguf",
tokenizer: "tokenizer.json",
config: "config-xl.json",
tasks: TASKS,
},
coedit_large: {
size: "3.13 GB",
base_url: "https://huggingface.co/grammarly/coedit-large/resolve/main/",
model: "model.safetensors",
tokenizer: "tokenizer.json",
config: "config.json",
tasks: TASKS,
},
};
export function getModelInfo(id, taskID) {
const model = MODELS[id];
return {
modelURL: model.base_url + model.model,
configURL: model.base_url + model.config,
tokenizerURL: model.base_url + model.tokenizer,
maxLength: model.tasks[taskID].max_length,
};
};