Update app.py
Browse files
app.py
CHANGED
@@ -3,13 +3,19 @@ from huggingface_hub import InferenceClient
|
|
3 |
# Use a pipeline as a high-level helper
|
4 |
import os
|
5 |
from huggingface_hub import login
|
6 |
-
login(token=os.getenv("access_key"))
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
"""
|
10 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
11 |
"""
|
12 |
-
client = InferenceClient("google/recurrentgemma-2b-it")
|
13 |
|
14 |
|
15 |
def respond(
|
|
|
3 |
# Use a pipeline as a high-level helper
|
4 |
import os
|
5 |
from huggingface_hub import login
|
|
|
6 |
|
7 |
+
from transformers import pipeline
|
8 |
+
login(token=os.getenv("access_key"))
|
9 |
+
messages1 = [
|
10 |
+
{"role": "user", "content": "Who are you?"},
|
11 |
+
]
|
12 |
+
pipe = pipeline("text-generation", model="google/recurrentgemma-2b-it")
|
13 |
+
print (pipe(messages1) )
|
14 |
|
15 |
"""
|
16 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
17 |
"""
|
18 |
+
client = InferenceClient(model="google/recurrentgemma-2b-it")
|
19 |
|
20 |
|
21 |
def respond(
|