coyotte508 HF staff commited on
Commit
6212809
1 Parent(s): 3fe0889

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +119 -11
index.html CHANGED
@@ -1,19 +1,127 @@
1
  <!DOCTYPE html>
2
  <html>
3
  <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
 
 
 
 
 
 
 
 
8
  </head>
9
  <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  </p>
17
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  </body>
19
  </html>
 
1
  <!DOCTYPE html>
2
  <html>
3
  <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <script src="https://cdn.tailwindcss.com"></script>
7
+ <!-- polyfill for firefox + import maps -->
8
+ <script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script>
9
+ <script type="importmap">
10
+ {
11
+ "imports": {
12
+ "@huggingface/inference": "https://cdn.jsdelivr.net/npm/@huggingface/inference@1.7.0/+esm"
13
+ }
14
+ }
15
+ </script>
16
  </head>
17
  <body>
18
+ <form class="w-[90%] mx-auto pt-8" onsubmit="launch(); return false;">
19
+ <h1 class="text-3xl font-bold">
20
+ <span
21
+ class="bg-clip-text text-transparent bg-gradient-to-r from-pink-500 to-violet-500"
22
+ >
23
+ Streaming text generation demo with
24
+ <a href="https://github.com/huggingface/huggingface.js">
25
+ <kbd>@huggingface/inference</kbd>
26
+ </a>
27
+ </span>
28
+ </h1>
29
+
30
+ <p class="mt-8">
31
+ First, input your token if you have one! Otherwise, you may encounter
32
+ rate limits. You can create a token for free at
33
+ <a
34
+ target="_blank"
35
+ href="https://huggingface.co/settings/tokens"
36
+ class="underline text-blue-500"
37
+ >hf.co/settings/tokens</a
38
+ >
39
  </p>
40
+
41
+ <input
42
+ type="text"
43
+ id="token"
44
+ class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-96 mt-6"
45
+ placeholder="token (optional)"
46
+ />
47
+
48
+ <p class="mt-8">
49
+ Pick the model you want to run. Check out over 10k models for text to
50
+ text generation
51
+ <a
52
+ href="https://huggingface.co/models?pipeline_tag=text2text-generation"
53
+ class="underline text-blue-500"
54
+ target="_blank"
55
+ >
56
+ here</a
57
+ >
58
+ </p>
59
+
60
+ <input
61
+ type="text"
62
+ id="model"
63
+ class="rounded border-2 border-blue-500 shadow-md px-3 py-2 w-96 mt-6"
64
+ value="google/flan-t5-xxl"
65
+ required
66
+ />
67
+
68
+ <p class="mt-8">Finally the prompt</p>
69
+
70
+ <textarea
71
+ class="rounded border-blue-500 shadow-md px-3 py-2 w-96 mt-6 block"
72
+ rows="5"
73
+ id="prompt"
74
+ >
75
+ Q: How is butter made?
76
+
77
+ Describe the process from the beginning
78
+ </textarea
79
+ >
80
+
81
+ <button
82
+ id="submit"
83
+ class="my-8 bg-green-500 rounded py-3 px-5 text-white shadow-md disabled:bg-slate-300"
84
+ >
85
+ Run
86
+ </button>
87
+
88
+ <p class="text-gray-400 text-sm">Output logs</p>
89
+ <pre id="logs" class="bg-gray-100 rounded p-3 mb-8 text-sm">
90
+ Output will be here</pre
91
+ >
92
+ </form>
93
+
94
+ <script type="module">
95
+ import { HfInference } from "@huggingface/inference";
96
+
97
+ let running = false;
98
+ async function launch() {
99
+ if (running) {
100
+ return;
101
+ }
102
+ running = true;
103
+ try {
104
+ const hf = new HfInference(
105
+ document.getElementById("token").value.trim() || undefined
106
+ );
107
+ const model = document.getElementById("model").value.trim();
108
+ const prompt = document.getElementById("prompt").value.trim();
109
+ document.getElementById("logs").textContent = "";
110
+
111
+ for await (const output of hf.textGenerationStream({
112
+ model,
113
+ inputs: prompt,
114
+ use_cache: false,
115
+ })) {
116
+ document.getElementById("logs").textContent += output.token.text;
117
+ }
118
+ } catch (err) {
119
+ alert("Error: " + err.message);
120
+ } finally {
121
+ running = false;
122
+ }
123
+ }
124
+ window.launch = launch;
125
+ </script>
126
  </body>
127
  </html>