Spaces:
Running
Running
Create stream-textgeneration.html
Browse files- stream-textgeneration.html +130 -0
stream-textgeneration.html
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8" />
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
6 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
7 |
+
<!-- polyfill for firefox + import maps -->
|
8 |
+
<script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script>
|
9 |
+
<script type="importmap">
|
10 |
+
{
|
11 |
+
"imports": {
|
12 |
+
"@huggingface/inference": "https://cdn.jsdelivr.net/npm/@huggingface/inference@1.7.1/+esm"
|
13 |
+
}
|
14 |
+
}
|
15 |
+
</script>
|
16 |
+
</head>
|
17 |
+
<body>
|
18 |
+
<form class="w-[90%] mx-auto pt-8" onsubmit="launch(); return false;">
|
19 |
+
<h1 class="text-3xl font-bold">
|
20 |
+
<span
|
21 |
+
class="bg-clip-text text-transparent bg-gradient-to-r from-pink-500 to-violet-500"
|
22 |
+
>
|
23 |
+
Streaming text generation demo with
|
24 |
+
<a href="https://github.com/huggingface/huggingface.js">
|
25 |
+
<kbd>@huggingface/inference</kbd>
|
26 |
+
</a>
|
27 |
+
</span>
|
28 |
+
</h1>
|
29 |
+
|
30 |
+
<p class="mt-8">
|
31 |
+
First, input your token if you have one! Otherwise, you may encounter
|
32 |
+
rate limiting. You can create a token for free at
|
33 |
+
<a
|
34 |
+
target="_blank"
|
35 |
+
href="https://huggingface.co/settings/tokens"
|
36 |
+
class="underline text-blue-500"
|
37 |
+
>hf.co/settings/tokens</a
|
38 |
+
>
|
39 |
+
</p>
|
40 |
+
|
41 |
+
<input
|
42 |
+
type="text"
|
43 |
+
id="token"
|
44 |
+
class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-96 mt-6"
|
45 |
+
placeholder="token (optional)"
|
46 |
+
/>
|
47 |
+
|
48 |
+
<p class="mt-8">
|
49 |
+
Pick the model you want to run. Check out over 10k models for text to
|
50 |
+
text generation
|
51 |
+
<a
|
52 |
+
href="https://huggingface.co/models?pipeline_tag=text2text-generation&sort=likes"
|
53 |
+
class="underline text-blue-500"
|
54 |
+
target="_blank"
|
55 |
+
>
|
56 |
+
here</a
|
57 |
+
>
|
58 |
+
</p>
|
59 |
+
|
60 |
+
<!-- Default model: https://huggingface.co/google/flan-t5-xxl -->
|
61 |
+
<input
|
62 |
+
type="text"
|
63 |
+
id="model"
|
64 |
+
class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-96 mt-6"
|
65 |
+
value="google/flan-t5-xxl"
|
66 |
+
required
|
67 |
+
/>
|
68 |
+
|
69 |
+
<p class="mt-8">Finally the prompt</p>
|
70 |
+
|
71 |
+
<textarea
|
72 |
+
class="rounded border-blue-500 shadow-md px-3 py-2 w-96 mt-6 block"
|
73 |
+
rows="5"
|
74 |
+
id="prompt"
|
75 |
+
>
|
76 |
+
Q: How is butter made?
|
77 |
+
|
78 |
+
Describe the process from the beginning
|
79 |
+
</textarea
|
80 |
+
>
|
81 |
+
|
82 |
+
<button
|
83 |
+
id="submit"
|
84 |
+
class="my-8 bg-green-500 rounded py-3 px-5 text-white shadow-md disabled:bg-slate-300"
|
85 |
+
>
|
86 |
+
Run
|
87 |
+
</button>
|
88 |
+
|
89 |
+
<p class="text-gray-400 text-sm">Output logs</p>
|
90 |
+
<div id="logs" class="bg-gray-100 rounded p-3 mb-8 text-sm">
|
91 |
+
Output will be here
|
92 |
+
</div>
|
93 |
+
|
94 |
+
<p>Check out the <a class="underline text-blue-500" href="https://huggingface.co/spaces/huggingfacejs/streaming-text-generation/blob/main/index.html" target="_blank">source code</a></p>
|
95 |
+
</form>
|
96 |
+
|
97 |
+
<script type="module">
|
98 |
+
import { HfInference } from "@huggingface/inference";
|
99 |
+
let running = false;
|
100 |
+
async function launch() {
|
101 |
+
if (running) {
|
102 |
+
return;
|
103 |
+
}
|
104 |
+
running = true;
|
105 |
+
try {
|
106 |
+
const hf = new HfInference(
|
107 |
+
document.getElementById("token").value.trim() || undefined
|
108 |
+
);
|
109 |
+
const model = document.getElementById("model").value.trim();
|
110 |
+
const prompt = document.getElementById("prompt").value.trim();
|
111 |
+
document.getElementById("logs").textContent = "";
|
112 |
+
for await (const output of hf.textGenerationStream({
|
113 |
+
model,
|
114 |
+
inputs: prompt,
|
115 |
+
parameters: { max_new_tokens: 250 }
|
116 |
+
}, {
|
117 |
+
use_cache: false
|
118 |
+
})) {
|
119 |
+
document.getElementById("logs").textContent += output.token.text;
|
120 |
+
}
|
121 |
+
} catch (err) {
|
122 |
+
alert("Error: " + err.message);
|
123 |
+
} finally {
|
124 |
+
running = false;
|
125 |
+
}
|
126 |
+
}
|
127 |
+
window.launch = launch;
|
128 |
+
</script>
|
129 |
+
</body>
|
130 |
+
</html>
|