import useLLM from "@react-llm/headless"; const Loader = () => { const { loadingStatus, isReady, init, gpuDevice } = useLLM(); if (isReady) return null; if (loadingStatus.progress === 1) return null; if (gpuDevice.unsupportedReason) { return (

Sorry, unsupported!

Reason: {gpuDevice.unsupportedReason}

This project runs models in the browser with WebGPU and only works in Google Chrome v113 and above on Desktop with supported GPUs. Experimental support may be available for desktop Firefox and Safari Tech Preview.

); } if (loadingStatus.progress == 0) { return (

web-llm-embed

💎 Edge based document chat. No data is sent to the server. Model is Vicuna 7b trained by LMSys.
📄 This will download the model (~4GB) and may take a few minutes. After the first time, it will be cached.
); } return (
Loading {loadingStatus.progress * 100}%
); }; export default Loader;