Spaces:
Running
Running
<html lang="en"> | |
<head> | |
<meta charset="utf-8" /> | |
<meta name="viewport" content="width=device-width,initial-scale=1" /> | |
<title>Browser LLM (WASM, mobile)</title> | |
<style> | |
:root { --bg:#0b0d10; --card:#14171b; --muted:#9aa4af; --accent:#6ee7b7; --danger:#f87171; --text:#dce3ea; } | |
* { box-sizing:border-box; } | |
body { margin:0; background:var(--bg); color:var(--text); font:16px/1.45 system-ui, -apple-system, Segoe UI, Roboto, "Helvetica Neue", Arial, "Apple Color Emoji","Segoe UI Emoji"; } | |
header { padding:14px 16px; border-bottom:1px solid #21262c; display:flex; gap:10px; align-items:center; } | |
header h1 { font-size:16px; margin:0; font-weight:600; } | |
header .pill { font-size:12px; color:var(--bg); background:var(--accent); padding:.2rem .55rem; border-radius:999px; font-weight:700; letter-spacing:.02em; } | |
main { display:grid; grid-template-rows:auto 1fr auto; height:calc(100dvh - 58px); } | |
.bar { display:flex; flex-wrap:wrap; gap:8px; padding:10px 12px; background:#0f1216; border-bottom:1px solid #21262c; align-items:center; } | |
select, input[type="number"], input[type="text"] { background:var(--card); color:var(--text); border:1px solid #29313a; border-radius:10px; padding:8px 10px; } | |
button { background:#1c2128; color:var(--text); border:1px solid #2a323c; border-radius:12px; padding:10px 12px; font-weight:600; cursor:pointer; } | |
button.primary { background:var(--accent); color:#08261b; border:none; } | |
button.ghost { background:transparent; border-color:#2a323c; } | |
button:disabled { opacity:.6; cursor:not-allowed; } | |
.grow { flex:1 1 auto; } | |
.progress { width:160px; height:8px; background:#1a1f25; border-radius:999px; overflow:hidden; border:1px solid #25303a; } | |
.progress > i { display:block; height:100%; width:0%; background:linear-gradient(90deg,#34d399,#10b981); transition:width .25s ease; } | |
#stats { font-size:12px; color:var(--muted); display:flex; gap:10px; align-items:center; } | |
#chat { padding:14px; overflow:auto; background:linear-gradient(#0b0d10, #0d1117); } | |
.msg { max-width:820px; margin:0 auto 10px auto; display:flex; gap:10px; align-items:flex-start; } | |
.msg .bubble { background:var(--card); padding:12px 14px; border-radius:16px; border:1px solid #242c35; white-space:pre-wrap; } | |
.msg.user .bubble { background:#1d2330; } | |
.msg.assistant .bubble { background:#151c24; } | |
.role { font-size:12px; color:var(--muted); min-width:68px; text-transform:uppercase; letter-spacing:.04em; } | |
.inputbar { display:flex; gap:8px; padding:10px; border-top:1px solid #21262c; background:#0f1216; } | |
textarea { resize:none; height:64px; padding:10px 12px; flex:1 1 auto; border-radius:12px; border:1px solid #2a323c; background:var(--card); color:var(--text); } | |
.tiny { font-size:12px; color:var(--muted); } | |
.warn { color:var(--danger); font-weight:600; } | |
.row { display:flex; gap:8px; align-items:center; flex-wrap:wrap; } | |
.spacer { flex:1; } | |
a { color:#93c5fd; } | |
details { margin-left:8px; } | |
.note { font-size:12px; color:var(--muted); max-width:720px; } | |
</style> | |
</head> | |
<body> | |
<header> | |
<h1>Browser LLM</h1> | |
<span class="pill">WASM • CPU‑only</span> | |
<span id="isoNote" class="tiny"></span> | |
</header> | |
<main> | |
<div class="bar"> | |
<label for="model">Model:</label> | |
<select id="model" class="grow"> | |
<option selected value='{"id":"ggml-org/gemma-3-270m-GGUF","file":"gemma-3-270m-Q8_0.gguf","label":"Gemma‑3‑270M Q8_0 (≈292 MB)"}'>Gemma‑3‑270M Q8_0 (≈292 MB)</option> | |
<option value='{"id":"mradermacher/OpenELM-270M-Instruct-GGUF","file":"OpenELM-270M-Instruct.Q3_K_S.gguf","label":"OpenELM‑270M‑Instruct Q3_K_S (≈134 MB)"}'>OpenELM‑270M‑Instruct Q3_K_S (≈134 MB)</option> | |
<option value='{"id":"mradermacher/OpenELM-270M-Instruct-GGUF","file":"OpenELM-270M-Instruct.Q4_K_M.gguf","label":"OpenELM‑270M‑Instruct Q4_K_M (≈175 MB)"}'>OpenELM‑270M‑Instruct Q4_K_M (≈175 MB)</option> | |
<option value='{"id":"mav23/SmolLM-135M-Instruct-GGUF","file":"smollm-135m-instruct.Q3_K_S.gguf","label":"SmolLM‑135M‑Instruct Q3_K_S (≈88 MB)"}'>SmolLM‑135M‑Instruct Q3_K_S (≈88 MB)</option> | |
<option value='{"id":"QuantFactory/SmolLM-360M-Instruct-GGUF","file":"SmolLM-360M-Instruct.Q3_K_S.gguf","label":"SmolLM‑360M‑Instruct Q3_K_S (≈219 MB)"}'>SmolLM‑360M‑Instruct Q3_K_S (≈219 MB)</option> | |
<option value='{"id":"Qwen/Qwen2.5-0.5B-Instruct-GGUF","file":"qwen2.5-0.5b-instruct-q3_k_m.gguf","label":"Qwen2.5‑0.5B‑Instruct Q3_K_M (≈432 MB)"}'>Qwen2.5‑0.5B‑Instruct Q3_K_M (≈432 MB)</option> | |
<option value='{"id":"Qwen/Qwen2.5-0.5B-Instruct-GGUF","file":"qwen2.5-0.5b-instruct-q4_k_m.gguf","label":"Qwen2.5‑0.5B‑Instruct Q4_K_M (≈491 MB)"}'>Qwen2.5‑0.5B‑Instruct Q4_K_M (≈491 MB)</option> | |
<option value='{"id":"TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF","file":"tinyllama-1.1b-chat-v1.0.Q3_K_S.gguf","label":"TinyLlama‑1.1B‑Chat Q3_K_S (≈500 MB)"}'>TinyLlama‑1.1B‑Chat Q3_K_S (≈500 MB)</option> | |
<option value='{"id":"QuantFactory/SmolLM-360M-GGUF","file":"SmolLM-360M.Q4_0.gguf","label":"SmolLM2‑360M Q4_0 (≈229 MB)"}'>SmolLM2‑360M Q4_0 (≈229 MB)</option> | |
<option value='{"id":"QuantFactory/SmolLM-360M-GGUF","file":"SmolLM-360M.Q3_K_S.gguf","label":"SmolLM2‑360M Q3_K_S (≈219 MB, faster)"}'>SmolLM2‑360M Q3_K_S (≈219 MB, faster)</option> | |
<option value='{"id":"QuantFactory/SmolLM-360M-GGUF","file":"SmolLM-360M.Q2_K.gguf","label":"SmolLM2‑360M Q2_K (≈200 MB, min RAM / quality drop)"}'>SmolLM2‑360M Q2_K (≈200 MB, min RAM / quality drop)</option> | |
<option value='{"custom":true,"label":"Custom HF GGUF (e.g., Gemma‑3‑270M)"}'>Custom HF GGUF (e.g., Gemma‑3‑270M)</option> | |
</select> | |
<details id="customBox"> | |
<summary class="tiny">Custom GGUF (paste HF repo + file)</summary> | |
<div class="row"> | |
<label class="tiny">HF repo id</label> | |
<input id="customRepo" type="text" placeholder="e.g. google/gemma-3-270m-GGUF (when available)" style="width:280px" /> | |
<label class="tiny">file</label> | |
<input id="customFile" type="text" placeholder="e.g. gemma-3-270m.Q4_0.gguf" style="width:240px" /> | |
</div> | |
<div class="note">Note: official <a href="https://huggingface.co/google/gemma-3-270m" target="_blank" rel="noreferrer">Gemma‑3‑270M</a> is the base HF repo. A ready‑to‑use public GGUF is now available at <a href="https://huggingface.co/ggml-org/gemma-3-270m-GGUF" target="_blank" rel="noreferrer">ggml‑org/gemma‑3‑270m‑GGUF</a> (currently providing <code>gemma-3-270m-Q8_0.gguf</code> ≈292 MB). For maximum speed on low‑RAM phones, the OpenELM‑270M‑Instruct Q3_K_S option above is even lighter, but Gemma‑3‑270M offers strong quality for its size.</div> | |
</details> | |
<div class="row"> | |
<label>Max new tokens</label> | |
<input id="nPredict" type="number" min="1" max="512" step="1" value="128" /> | |
</div> | |
<div class="row"> | |
<label>Temp</label><input id="temp" type="number" min="0" max="2" step="0.1" value="0.7" style="width:80px" /> | |
<label>Top‑p</label><input id="topp" type="number" min="0" max="1" step="0.05" value="0.9" style="width:80px" /> | |
<label>Top‑k</label><input id="topk" type="number" min="1" max="100" step="1" value="40" style="width:80px" /> | |
</div> | |
<div class="spacer"></div> | |
<button id="loadBtn" class="primary">Load model</button> | |
<button id="unloadBtn" class="ghost" disabled>Unload</button> | |
<div class="progress" title="download progress"><i id="prog"></i></div> | |
<div id="stats">idle</div> | |
</div> | |
<div id="chat" aria-live="polite"></div> | |
<form class="inputbar" id="form"> | |
<textarea id="input" placeholder="Ask me anything…" required></textarea> | |
<div class="row" style="flex-direction:column; gap:6px; align-items:flex-end"> | |
<button id="sendBtn" class="primary">Send</button> | |
<button id="stopBtn" type="button" class="ghost" disabled>Stop</button> | |
<div class="tiny">Context kept small for mobile perf</div> | |
</div> | |
</form> | |
</main> | |
<script type="module"> | |
// ——— Imports ——— | |
import { Wllama, LoggerWithoutDebug } from "https://cdn.jsdelivr.net/npm/@wllama/wllama@2.3.1/esm/index.js"; | |
const CONFIG_PATHS = { | |
"single-thread/wllama.wasm": "https://cdn.jsdelivr.net/npm/@wllama/wllama@2.3.1/esm/single-thread/wllama.wasm", | |
"multi-thread/wllama.wasm" : "https://cdn.jsdelivr.net/npm/@wllama/wllama@2.3.1/esm/multi-thread/wllama.wasm", | |
}; | |
// ——— DOM refs ——— | |
const $model = document.getElementById('model'); | |
const $load = document.getElementById('loadBtn'); | |
const $unload= document.getElementById('unloadBtn'); | |
const $prog = document.getElementById('prog'); | |
const $stats = document.getElementById('stats'); | |
const $chat = document.getElementById('chat'); | |
const $form = document.getElementById('form'); | |
const $input = document.getElementById('input'); | |
const $send = document.getElementById('sendBtn'); | |
const $stop = document.getElementById('stopBtn'); | |
const $iso = document.getElementById('isoNote'); | |
const $customBox = document.getElementById('customBox'); | |
const $customRepo = document.getElementById('customRepo'); | |
const $customFile = document.getElementById('customFile'); | |
// ——— State ——— | |
const decoder = new TextDecoder(); | |
const wllama = new Wllama(CONFIG_PATHS, { logger: LoggerWithoutDebug }); | |
let aborter = null; | |
let loaded = false; | |
let eotToken = -1; | |
let sysPrompt = "You are a helpful, concise assistant. Keep answers short and clear."; | |
// Keep RAM low for mobile: small context + FP16 V‑cache (WASM safe) | |
const LOAD_CONFIG = { | |
n_ctx: 768, | |
n_batch: 128, | |
cache_type_k: "q4_0", | |
cache_type_v: "f16", | |
flash_attn: false, | |
progressCallback: ({ loaded, total }) => { | |
const pct = (total && total > 0) ? Math.round(loaded / total * 100) : 0; | |
$prog.style.width = pct + '%'; | |
} | |
}; | |
const messages = [ { role: "system", content: sysPrompt } ]; | |
// ——— Chat template for Gemma IT ——— | |
const GEMMA_JINJA = `{{ bos_token }} | |
{%- if messages[0]['role'] == 'system' -%} | |
{%- if messages[0]['content'] is string -%} | |
{%- set first_user_prefix = messages[0]['content'] + '\n\n' -%} | |
{%- else -%} | |
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%} | |
{%- endif -%} | |
{%- set loop_messages = messages[1:] -%} | |
{%- else -%} | |
{%- set first_user_prefix = "" -%} | |
{%- set loop_messages = messages -%} | |
{%- endif -%} | |
{%- for message in loop_messages -%} | |
{%- set role = (message['role'] == 'assistant') and 'model' or message['role'] -%} | |
<start_of_turn>{{ role }} | |
{{ (loop.first and first_user_prefix or '') ~ (message['content'] if message['content'] is string else message['content'][0]['text']) | trim }}<end_of_turn> | |
{%- endfor -%} | |
{%- if add_generation_prompt -%} | |
<start_of_turn>model | |
{%- endif -%}`; | |
// ——— UI helpers ——— | |
const ui = { | |
add(role, text) { | |
const row = document.createElement('div'); | |
row.className = 'msg ' + role; | |
row.innerHTML = ` | |
<div class="role">${role}</div> | |
<div class="bubble"></div> | |
`; | |
row.querySelector('.bubble').textContent = text; | |
$chat.appendChild(row); | |
$chat.scrollTop = $chat.scrollHeight; | |
return row.querySelector('.bubble'); | |
}, | |
setStats(txt) { $stats.textContent = txt; } | |
}; | |
function noteIsolation() { | |
if (!crossOriginIsolated) { | |
$iso.innerHTML = 'Single‑thread mode (serve with COOP/COEP for multithread)'; | |
} else { | |
$iso.textContent = 'Cross‑origin isolated: multithread on'; | |
} | |
} | |
noteIsolation(); | |
function truncateHistoryForMobile(maxTokensRough = 900) { | |
const maxChars = maxTokensRough * 4; // rough heuristic | |
function clip(s) { return s.length <= maxChars ? s : ('…' + s.slice(s.length - maxChars)); } | |
let kept = [messages[0]]; // keep system | |
const lastTurns = messages.slice(-8); | |
for (const m of lastTurns) kept.push({ role: m.role, content: clip(m.content) }); | |
messages.length = 0; messages.push(...kept); | |
} | |
function getSelectedModel() { | |
const parsed = JSON.parse($model.value); | |
if (parsed.custom) { | |
const id = ($customRepo.value || '').trim(); | |
const file = ($customFile.value || '').trim(); | |
if (!id || !file) throw new Error('Enter HF repo id and GGUF file for custom model.'); | |
return { id, file, label: `Custom: ${id}/${file}` }; | |
} | |
return parsed; | |
} | |
function isGemmaSelected() { | |
const { id, file, label } = getSelectedModel(); | |
return /gemma/i.test(id) || /gemma/i.test(file) || /gemma/i.test(label); | |
} | |
async function ensureLoaded() { | |
if (loaded) return; | |
$prog.style.width = '0%'; | |
const choice = getSelectedModel(); | |
ui.setStats(`Fetching ${choice.file}…`); | |
try { | |
await wllama.loadModelFromHF(choice.id, choice.file, LOAD_CONFIG); | |
} catch (e) { | |
throw new Error(`Load failed for ${choice.id}/${choice.file}. If the repo is gated or lacks CORS, try a public mirror / different quant. Details: ${e?.message || e}`); | |
} | |
loaded = true; | |
eotToken = wllama.getEOT(); | |
const meta = await wllama.getModelMetadata(); | |
const ctx = wllama.getLoadedContextInfo(); | |
const thr = wllama.getNumThreads?.() ?? 1; | |
ui.setStats(`Loaded ${choice.file} • ${meta.n_params?.toLocaleString?.() || ''} params • ctx ${ctx.n_ctx} • threads ${thr}`); | |
$load.disabled = true; $unload.disabled = false; | |
} | |
async function unloadModel() { | |
try { await wllama.exit(); } catch {} | |
loaded = false; | |
$load.disabled = false; $unload.disabled = true; | |
$prog.style.width = '0%'; | |
ui.setStats('idle'); | |
} | |
// ——— Chat flow ——— | |
document.getElementById('loadBtn').addEventListener('click', ensureLoaded); | |
document.getElementById('unloadBtn').addEventListener('click', unloadModel); | |
document.getElementById('stopBtn').addEventListener('click', () => aborter?.abort()); | |
$model.addEventListener('change', () => { | |
const isCustom = JSON.parse($model.value).custom === true; | |
$customBox.open = isCustom; | |
}); | |
$form.addEventListener('submit', async (ev) => { | |
ev.preventDefault(); | |
const text = ($input.value || '').trim(); | |
if (!text) return; | |
await ensureLoaded(); | |
messages.push({ role: 'user', content: text }); | |
const userBubble = ui.add('user', text); | |
$input.value = ''; | |
const assistantBubble = ui.add('assistant', ''); | |
truncateHistoryForMobile(600); | |
$send.disabled = true; $stop.disabled = true; // flip to false on stream start | |
aborter = new AbortController(); | |
const nPredict = parseInt(document.getElementById('nPredict').value, 10); | |
const temp = parseFloat(document.getElementById('temp').value); | |
const top_p = parseFloat(document.getElementById('topp').value); | |
const top_k = parseInt(document.getElementById('topk').value, 10); | |
const t0 = performance.now(); | |
let outText = ''; | |
try { | |
const opts = { | |
stream: true, | |
useCache: true, | |
nPredict, | |
sampling: { temp, top_p, top_k }, | |
stopTokens: eotToken > 0 ? [eotToken] : undefined, | |
abortSignal: aborter.signal | |
}; | |
let stream; | |
if (isGemmaSelected()) { | |
// Render messages with Gemma template, then complete as plain text | |
const prompt = await wllama.formatChat(messages, /* addAssistant */ true, GEMMA_JINJA); | |
$stop.disabled = false; | |
stream = await wllama.createCompletion(prompt, opts); | |
} else { | |
// Other models: rely on their embedded chat templates | |
$stop.disabled = false; | |
stream = await wllama.createChatCompletion(messages, opts); | |
} | |
for await (const chunk of stream) { | |
const piece = new TextDecoder().decode(chunk.piece); | |
outText += piece; | |
assistantBubble.textContent = outText; | |
$chat.scrollTop = $chat.scrollHeight; | |
} | |
const dt = (performance.now() - t0) / 1000; | |
const tokSec = Math.max(1, Math.round(outText.length / 4)) / dt; | |
ui.setStats(`gen: ${tokSec.toFixed(1)} tok/s (rough)`); | |
messages.push({ role: 'assistant', content: outText }); | |
} catch (err) { | |
if (err && err.name === 'AbortError') { | |
assistantBubble.textContent += '\n\n[stopped]'; | |
} else { | |
console.error(err); | |
assistantBubble.innerHTML += `\n\n<span class="warn">Error: ${String(err)}</span>`; | |
} | |
} finally { | |
$send.disabled = false; $stop.disabled = true; | |
aborter = null; | |
} | |
}); | |
// Enter‑to‑send on mobile; Shift+Enter for newline | |
$input.addEventListener('keydown', (e) => { | |
if (e.key === 'Enter' && !e.shiftKey) { | |
e.preventDefault(); | |
$send.click(); | |
} | |
}); | |
</script> | |
<!-- | |
Changes for Gemma: | |
• Added GEMMA_JINJA chat template (<start_of_turn>/<end_of_turn> with BOS). | |
• When a Gemma model is selected, messages are formatted via wllama.formatChat(..., GEMMA_JINJA) | |
and sent to createCompletion() to avoid ChatML (<im_start>/<im_end>) fallback. | |
• Non‑Gemma models still use createChatCompletion(). | |
--> | |
</body> | |
</html> | |