export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } const TASKS = { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }; export const MODELS = { coedit_small_quantized_4_0: { size: "43.4 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-small-q4_0.gguf", tokenizer: "tokenizer.json", config: "config-small.json", tasks: TASKS, }, coedit_small_quantized_4k: { size: "59.6 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-small-q4k.gguf", tokenizer: "tokenizer.json", config: "config-small.json", tasks: TASKS, }, coedit_small_quantized_6k: { size: "78.2 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-small.gguf", tokenizer: "tokenizer.json", config: "config-small.json", tasks: TASKS, }, coedit_small_fp32: { size: "308 MB", base_url: "https://huggingface.co/jbochi/coedit-base/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_base_quantized_4_0: { size: "139 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-base-q4_0.gguf", tokenizer: "tokenizer.json", config: "config-base.json", tasks: TASKS, }, coedit_base_quantized_4k: { size: "139 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-base-q4k.gguf", tokenizer: "tokenizer.json", config: "config-base.json", tasks: TASKS, }, coedit_base_quantized_6k: { size: "203 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-base.gguf", tokenizer: "tokenizer.json", config: "config-base.json", tasks: TASKS, }, coedit_base_fp32: { size: "990 MB", base_url: "https://huggingface.co/jbochi/coedit-base/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_large_quantized_4_0: { size: "441 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-q4_0.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_large_quantized_4k: { size: "441 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-q4k.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_large_quantized_6k: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_xl_quantized_4_0: { size: "1.6 GB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-xl-q4_0.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: TASKS, }, coedit_xl_quantized_4k: { size: "1.6 GB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model-xl-q4k.gguf", tokenizer: "tokenizer.json", config: "config-xl.json", tasks: TASKS, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; };