Add FastAPI for handling POST requests
Browse files
app.py
CHANGED
@@ -1,29 +1,31 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
-
|
4 |
-
|
5 |
-
import uvicorn
|
6 |
|
7 |
-
app = FastAPI()
|
8 |
|
|
|
|
|
|
|
9 |
|
10 |
-
class Input(BaseModel):
|
11 |
-
input: str
|
12 |
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
|
|
|
|
|
|
15 |
def load_model():
|
16 |
return pipeline("text-generation", model="klyang/MentaLLaMA-chat-7B")
|
17 |
|
18 |
|
19 |
model = load_model()
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
if __name__ == "__main__":
|
29 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
+
import subprocess
|
4 |
+
import sys
|
|
|
5 |
|
|
|
6 |
|
7 |
+
# Ensure PyTorch is installed
|
8 |
+
def install_pytorch():
|
9 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "torch"])
|
10 |
|
|
|
|
|
11 |
|
12 |
+
try:
|
13 |
+
import torch
|
14 |
+
except ImportError:
|
15 |
+
install_pytorch()
|
16 |
|
17 |
+
st.title("Hugging Face Model Demo")
|
18 |
+
|
19 |
+
|
20 |
+
@st.cache_resource
|
21 |
def load_model():
|
22 |
return pipeline("text-generation", model="klyang/MentaLLaMA-chat-7B")
|
23 |
|
24 |
|
25 |
model = load_model()
|
26 |
|
27 |
+
user_input = st.text_input("Enter your text:")
|
28 |
+
if user_input:
|
29 |
+
with st.spinner("Generating response..."):
|
30 |
+
result = model(user_input)
|
31 |
+
st.success(result[0]["generated_text"])
|
|
|
|
|
|
|
|