File size: 2,106 Bytes
3a76a4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import { action } from "./actions.js";
class LlamaCpp {
// callback have to be defined before load_worker
constructor(url, init_callback, write_result_callback, on_complete_callback) {
this.url = url;
this.init_callback = init_callback;
this.write_result_callback = write_result_callback;
this.on_complete_callback = on_complete_callback;
this.loadWorker();
}
loadWorker() {
this.worker = new Worker(
new URL("./main-worker.js", import.meta.url),
{type: "module"}
);
this.worker.onmessage = (event) => {
switch (event.data.event) {
case action.INITIALIZED:
// Load Model
if (this.init_callback) {
this.init_callback();
}
break;
case action.WRITE_RESULT:
// Capture result
if (this.write_result_callback) {
this.write_result_callback(event.data.text);
}
break;
case action.RUN_COMPLETED:
// Execution Completed
if (this.on_complete_callback) {
this.on_complete_callback();
}
break;
}
};
this.worker.postMessage({
event: action.LOAD,
url: this.url,
});
}
run({
prompt,
chatml=false,
n_predict=-2,
ctx_size=2048,
batch_size=512,
temp=0.8,
n_gpu_layers=0,
top_k=40,
top_p=0.9,
no_display_prompt=true,
}={}) {
this.worker.postMessage({
event: action.RUN_MAIN,
prompt,
chatml,
n_predict,
ctx_size,
batch_size,
temp,
n_gpu_layers,
top_k,
top_p,
no_display_prompt,
});
}
}
export { LlamaCpp }; |