Spaces:
Paused
Paused
new model
Browse files- app.py +31 -27
- chatui.py +36 -0
- requirements.txt +0 -2
app.py
CHANGED
@@ -1,28 +1,32 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
app = FastAPI()
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
import transformers
|
4 |
+
|
5 |
app = FastAPI()
|
6 |
+
|
7 |
+
model_name = 'Intel/neural-chat-7b-v3-1'
|
8 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
10 |
+
|
11 |
+
class ChatInput(BaseModel):
|
12 |
+
system_input: str
|
13 |
+
user_input: str
|
14 |
+
|
15 |
+
@app.post("/generate-response")
|
16 |
+
async def generate_response(chat_input: ChatInput):
|
17 |
+
try:
|
18 |
+
# Format the input using the provided template
|
19 |
+
prompt = f"### System:\n{chat_input.system_input}\n### User:\n{chat_input.user_input}\n### Assistant:\n"
|
20 |
+
|
21 |
+
# Tokenize and encode the prompt
|
22 |
+
inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
|
23 |
+
|
24 |
+
# Generate a response
|
25 |
+
outputs = model.generate(inputs, max_length=1000, num_return_sequences=1)
|
26 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
27 |
+
|
28 |
+
# Extract only the assistant's response
|
29 |
+
return {"response": response.split("### Assistant:\n")[-1]}
|
30 |
+
|
31 |
+
except Exception as e:
|
32 |
+
raise HTTPException(status_code=500, detail=str(e))
|
chatui.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import random
|
3 |
+
|
4 |
+
|
5 |
+
st.title("💬 Juoksuta AI: R&D LAB")
|
6 |
+
|
7 |
+
col1, col2 = st.columns(2)
|
8 |
+
|
9 |
+
with col1:
|
10 |
+
if st.button('Tulosta viestiketju.'):
|
11 |
+
print(st.session_state)
|
12 |
+
|
13 |
+
with col2:
|
14 |
+
if st.button('Tyhjennä viestiketju'):
|
15 |
+
for key in st.session_state.keys():
|
16 |
+
del st.session_state[key]
|
17 |
+
|
18 |
+
if "messages" not in st.session_state:
|
19 |
+
st.session_state["messages"] = [{"role": "assistant", "content": "Miten voin auttaa?"}]
|
20 |
+
|
21 |
+
for msg in st.session_state.messages:
|
22 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
23 |
+
|
24 |
+
msg_bank = ["Aurinko paistaa", "Kuu loistaa", "Tietokone pörisee"]
|
25 |
+
|
26 |
+
if prompt := st.chat_input("Kirjoita tähän."):
|
27 |
+
|
28 |
+
|
29 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
30 |
+
st.chat_message("user").write(prompt)
|
31 |
+
|
32 |
+
msg = random.choice(msg_bank)
|
33 |
+
|
34 |
+
st.session_state.messages.append({"role": "assistant", "content": msg})
|
35 |
+
st.chat_message("assistant").write(msg)
|
36 |
+
|
requirements.txt
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
fastapi==0.74.*
|
2 |
requests==2.27.*
|
3 |
uvicorn[standard]==0.17.*
|
4 |
-
sentencepiece==0.1.*
|
5 |
-
torch==1.11.*
|
6 |
transformers==4.*
|
|
|
1 |
fastapi==0.74.*
|
2 |
requests==2.27.*
|
3 |
uvicorn[standard]==0.17.*
|
|
|
|
|
4 |
transformers==4.*
|