jbilcke-hf HF staff commited on
Commit
d388d3b
β€’
1 Parent(s): 92f3002

ok, let's use any model from the HF Inference API

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. package-lock.json +2 -2
  3. package.json +3 -3
  4. public/index.html +20 -12
  5. src/index.mts +5 -1
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Webapp Factory OpenAssistant
3
  emoji: πŸ­πŸ§™
4
  colorFrom: brown
5
  colorTo: red
@@ -10,7 +10,7 @@ app_port: 7860
10
 
11
  A basic demo of generating HTML content using the Hugging Face Inference API
12
 
13
- This version is using OpenAssistant, and can only work to generate basic HTML content.
14
 
15
  Ready to be used in a Hugging Face Space.
16
 
@@ -39,6 +39,6 @@ npm run docker
39
  This script is a shortcut executing the following commands:
40
 
41
  ```bash
42
- docker build -t webapp-factory-openassistant .
43
- docker run -it -p 7860:7860 webapp-factory-openassistant
44
- ```
 
1
  ---
2
+ title: Webapp Factory (any model)
3
  emoji: πŸ­πŸ§™
4
  colorFrom: brown
5
  colorTo: red
 
10
 
11
  A basic demo of generating HTML content using the Hugging Face Inference API
12
 
13
+ This version can use any model from the Hugging Face Inference API.
14
 
15
  Ready to be used in a Hugging Face Space.
16
 
 
39
  This script is a shortcut executing the following commands:
40
 
41
  ```bash
42
+ docker build -t webapp-factory-any-model .
43
+ docker run -it -p 7860:7860 webapp-factory-any-model
44
+ ```
package-lock.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "name": "webapp-factory-openassistant",
3
  "version": "1.0.0",
4
  "lockfileVersion": 3,
5
  "requires": true,
6
  "packages": {
7
  "": {
8
- "name": "webapp-factory-openassistant",
9
  "version": "1.0.0",
10
  "license": "Apache License",
11
  "dependencies": {
 
1
  {
2
+ "name": "webapp-factory-any-model",
3
  "version": "1.0.0",
4
  "lockfileVersion": 3,
5
  "requires": true,
6
  "packages": {
7
  "": {
8
+ "name": "webapp-factory-any-model",
9
  "version": "1.0.0",
10
  "license": "Apache License",
11
  "dependencies": {
package.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "name": "webapp-factory-openassistant",
3
  "version": "1.0.0",
4
  "description": "A basic demo of generating HTML content using the Hugging Face Inference API. Designed to run in a Hugging Face space.",
5
  "main": "src/index.mts",
6
  "scripts": {
7
  "start": "node --loader ts-node/esm src/index.mts",
8
  "docker": "npm run docker:build && npm run docker:run",
9
- "docker:build": "docker build -t webapp-factory-openassistant .",
10
- "docker:run": "docker run -it -p 7860:7860 webapp-factory-openassistant"
11
  },
12
  "author": "Julian Bilcke <julian.bilcke@huggingface.co>",
13
  "license": "Apache License",
 
1
  {
2
+ "name": "webapp-factory-any-model",
3
  "version": "1.0.0",
4
  "description": "A basic demo of generating HTML content using the Hugging Face Inference API. Designed to run in a Hugging Face space.",
5
  "main": "src/index.mts",
6
  "scripts": {
7
  "start": "node --loader ts-node/esm src/index.mts",
8
  "docker": "npm run docker:build && npm run docker:run",
9
+ "docker:build": "docker build -t webapp-factory-any-model .",
10
+ "docker:run": "docker run -it -p 7860:7860 webapp-factory-any-model"
11
  },
12
  "author": "Julian Bilcke <julian.bilcke@huggingface.co>",
13
  "license": "Apache License",
public/index.html CHANGED
@@ -19,25 +19,30 @@
19
  <div
20
  class="py-2 space-y-4 text-stone-600 transition-all delay-150 ease-in-out"
21
  :class="open ? 'md:text-lg lg:text-xl' : 'text-2xl'">
22
- <p>A space to generate web content with OpenAssistant using the Inference API.</p>
23
- <p>Feel free to duplicate and create interesting forks πŸ”§</p>
24
  </div>
 
 
 
 
 
 
25
  <textarea
26
- name="draft"
27
- x-model="draft"
28
  rows="10"
29
  placeholder="A simple page to compute the bmi, using kg and meters"
30
  class="input input-bordered w-full rounded text-lg text-stone-500 bg-stone-300 font-mono h-48"
31
  ></textarea>
32
  <button
33
  class="btn disabled:text-stone-400"
34
- @click="open = true, prompt = draft, state = state === 'stopped' ? 'loading' : 'stopped'"
35
- :class="draft.length < minPromptSize ? 'btn-neutral' : state === 'stopped' ? 'btn-accent' : 'btn-warning'"
36
- :disabled="draft.length < minPromptSize"
37
  >
38
- <span x-show="draft.length < minPromptSize">Prompt too short to generate</span>
39
- <span x-show="draft.length >= minPromptSize && state !== 'stopped'">Stop now</span>
40
- <span x-show="draft.length >= minPromptSize && state === 'stopped'">Generate!</span>
41
  </button>
42
  <span class="py-3" x-show="state === 'loading'">Waiting for the stream to begin (might take a few minutes)..</span>
43
  <span class="py-3" x-show="state === 'streaming'">
@@ -53,7 +58,7 @@
53
  class="border-none w-full h-screen"
54
  :src="!open
55
  ? '/placeholder.html'
56
- : `/app?prompt=${prompt}`
57
  "></iframe>
58
  </div>
59
  </div>
@@ -90,10 +95,13 @@ function humanFileSize(bytes, si=false, dp=1) {
90
  return bytes.toFixed(dp) + ' ' + units[u];
91
  }
92
  function app() {
 
93
  return {
94
  open: false,
95
- draft: new URLSearchParams(window.location.search).get('prompt') || 'A simple page to compute the bmi, using kg and meters',
96
  prompt: '',
 
 
97
  size: 0,
98
  minPromptSize: 16, // if you change this, you will need to also change in src/index.mts
99
  timeoutInSec: 10, // time before we determine something went wrong
 
19
  <div
20
  class="py-2 space-y-4 text-stone-600 transition-all delay-150 ease-in-out"
21
  :class="open ? 'md:text-lg lg:text-xl' : 'text-2xl'">
22
+ <p>A space to generate web content using models hosted on Huggging Face.</p>
 
23
  </div>
24
+ <input
25
+ type="text"
26
+ name="modelDraft"
27
+ x-model="modelDraft"
28
+ class="input input-bordered w-full rounded text-lg text-stone-500 bg-stone-200 font-mono"
29
+ />
30
  <textarea
31
+ name="promptDraft"
32
+ x-model="promptDraft"
33
  rows="10"
34
  placeholder="A simple page to compute the bmi, using kg and meters"
35
  class="input input-bordered w-full rounded text-lg text-stone-500 bg-stone-300 font-mono h-48"
36
  ></textarea>
37
  <button
38
  class="btn disabled:text-stone-400"
39
+ @click="open = true, prompt = promptDraft, model = modelDraft, state = state === 'stopped' ? 'loading' : 'stopped'"
40
+ :class="promptDraft.length < minPromptSize ? 'btn-neutral' : state === 'stopped' ? 'btn-accent' : 'btn-warning'"
41
+ :disabled="promptDraft.length < minPromptSize"
42
  >
43
+ <span x-show="promptDraft.length < minPromptSize">Prompt too short to generate</span>
44
+ <span x-show="promptDraft.length >= minPromptSize && state !== 'stopped'">Stop now</span>
45
+ <span x-show="promptDraft.length >= minPromptSize && state === 'stopped'">Generate!</span>
46
  </button>
47
  <span class="py-3" x-show="state === 'loading'">Waiting for the stream to begin (might take a few minutes)..</span>
48
  <span class="py-3" x-show="state === 'streaming'">
 
58
  class="border-none w-full h-screen"
59
  :src="!open
60
  ? '/placeholder.html'
61
+ : `/app?model=${encodeURIComponent(model)}&prompt=${encodeURIComponent(prompt)}`
62
  "></iframe>
63
  </div>
64
  </div>
 
95
  return bytes.toFixed(dp) + ' ' + units[u];
96
  }
97
  function app() {
98
+ const defaultModel = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5'
99
  return {
100
  open: false,
101
+ promptDraft: new URLSearchParams(window.location.search).get('prompt') || 'A simple page to compute the bmi, using kg and meters',
102
  prompt: '',
103
+ modelDraft: defaultModel,
104
+ model: defaultModel,
105
  size: 0,
106
  minPromptSize: 16, // if you change this, you will need to also change in src/index.mts
107
  timeoutInSec: 10, // time before we determine something went wrong
src/index.mts CHANGED
@@ -55,6 +55,10 @@ app.get("/debug", (req, res) => {
55
 
56
  app.get("/app", async (req, res) => {
57
 
 
 
 
 
58
  if (`${req.query.prompt}`.length < minPromptSize) {
59
  res.write(`prompt too short, please enter at least ${minPromptSize} characters`)
60
  res.end()
@@ -96,7 +100,7 @@ ${prefix}`
96
  try {
97
  let result = ''
98
  for await (const output of hf.textGenerationStream({
99
- model: 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
100
  inputs: finalPrompt,
101
  parameters: { max_new_tokens: 1024 }
102
  })) {
 
55
 
56
  app.get("/app", async (req, res) => {
57
 
58
+ const model = `${req.query.model || 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5'}`
59
+
60
+ console.log('model:', model)
61
+
62
  if (`${req.query.prompt}`.length < minPromptSize) {
63
  res.write(`prompt too short, please enter at least ${minPromptSize} characters`)
64
  res.end()
 
100
  try {
101
  let result = ''
102
  for await (const output of hf.textGenerationStream({
103
+ model,
104
  inputs: finalPrompt,
105
  parameters: { max_new_tokens: 1024 }
106
  })) {