Update index.html
Browse files- index.html +21 -22
index.html
CHANGED
@@ -21,36 +21,35 @@
|
|
21 |
<gradio-lite>
|
22 |
<gradio-file name="app.py" entrypoint>
|
23 |
import gradio as gr
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
return output_image
|
30 |
-
|
31 |
-
demo = gr.Interface(
|
32 |
-
process,
|
33 |
-
"image",
|
34 |
-
"image",
|
35 |
-
examples=["lion.jpg", "logo.png"],
|
36 |
)
|
37 |
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
|
45 |
-
return rgb2gray(image)
|
46 |
-
</gradio-file>
|
47 |
|
48 |
-
|
49 |
-
|
50 |
|
51 |
<gradio-requirements>
|
52 |
-
|
53 |
-
scikit-image
|
54 |
</gradio-requirements>
|
55 |
</gradio-lite>
|
56 |
</body>
|
|
|
21 |
<gradio-lite>
|
22 |
<gradio-file name="app.py" entrypoint>
|
23 |
import gradio as gr
|
24 |
+
from transformers_js_py import pipeline
|
25 |
|
26 |
+
generator = await pipeline(
|
27 |
+
"text-generation",
|
28 |
+
"onnx-community/Qwen2.5-0.5B-Instruct",
|
29 |
+
{ "dtype": "q4", "device": "webgpu" }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
)
|
31 |
|
32 |
+
async def chat_response(message, history):
|
33 |
+
messages = [
|
34 |
+
{ "role": "system", "content": "You are a great assistant." },
|
35 |
+
{ "role": "user", "content": message }
|
36 |
+
]
|
37 |
|
38 |
+
output = await generator(messages, {
|
39 |
+
"max_new_tokens": 256,
|
40 |
+
"do_sample": True,
|
41 |
+
"temperature": 0.3,
|
42 |
+
})
|
43 |
+
response = output[0]["generated_text"][-1]["content"]
|
44 |
+
return response
|
45 |
|
46 |
+
demo = gr.ChatInterface(chat_response, type="messages", autofocus=False)
|
|
|
|
|
47 |
|
48 |
+
demo.launch()
|
49 |
+
</gradio-file>
|
50 |
|
51 |
<gradio-requirements>
|
52 |
+
transformers-js-py
|
|
|
53 |
</gradio-requirements>
|
54 |
</gradio-lite>
|
55 |
</body>
|