|
const { createApp, ref, onMounted } = Vue; |
|
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest"; |
|
|
|
const textGenerationModels = [ |
|
"mistralai/Mistral-7B-v0.1", |
|
"bigscience/bloom" |
|
] |
|
|
|
const responseLengthToTokenCount = { |
|
"short": 100, |
|
"medium": 250, |
|
"long": 500, |
|
} |
|
|
|
const app = createApp({ |
|
setup() { |
|
const token = ref(localStorage.getItem("token") || ""); |
|
const userPrompt = ref("Write about the difference between Star Wars and Star Trek"); |
|
const currentGeneratedText = ref(""); |
|
const models = ref([]); |
|
const selectedModel = ref(""); |
|
const isRunning = ref(false); |
|
const responseLength = ref("medium"); |
|
let controller; |
|
|
|
const createTextGenerationStream = (hfInstance, prompt, abortControllerSignal) => { |
|
return hfInstance.textGenerationStream( |
|
{ |
|
model: selectedModel.value, |
|
inputs: prompt, |
|
parameters: { max_new_tokens: responseLengthToTokenCount[responseLength.value] }, |
|
}, |
|
{ |
|
use_cache: false, |
|
signal: abortControllerSignal, |
|
} |
|
); |
|
}; |
|
|
|
const generateTextStream = async function* (hfInstance, abortSignal, prompt) { |
|
let generatedText = "" |
|
for await (const output of createTextGenerationStream(hfInstance, prompt, abortSignal)) { |
|
generatedText += output.token.text; |
|
yield generatedText; |
|
} |
|
}; |
|
|
|
const run = async () => { |
|
isRunning.value = true; |
|
currentGeneratedText.value = ""; |
|
controller = new AbortController(); |
|
localStorage.setItem("token", token.value); |
|
const hfInstance = new HfInference(token.value); |
|
|
|
try { |
|
for await (const textStream of generateTextStream( |
|
hfInstance, |
|
controller.signal, |
|
userPrompt.value |
|
)) { |
|
currentGeneratedText.value = textStream; |
|
} |
|
} catch (e) { |
|
console.log(e); |
|
} |
|
}; |
|
|
|
const stop = () => { |
|
if (controller) { |
|
controller.abort(); |
|
} |
|
isRunning.value = false; |
|
}; |
|
|
|
onMounted(async () => { |
|
const localStorageToken = localStorage.getItem("token") |
|
if (localStorageToken) { |
|
token.value = localStorageToken; |
|
} |
|
models.value = textGenerationModels |
|
selectedModel.value = textGenerationModels[0] |
|
}); |
|
|
|
return { |
|
token, |
|
userPrompt, |
|
currentGeneratedText, |
|
run, |
|
stop, |
|
models, |
|
selectedModel, |
|
isRunning, |
|
responseLength, |
|
}; |
|
}, |
|
}); |
|
|
|
app.mount("#app"); |
|
|