aleyfin commited on
Commit
d22b8ea
1 Parent(s): 77b190b
Files changed (3) hide show
  1. app.py +31 -27
  2. chatui.py +36 -0
  3. requirements.txt +0 -2
app.py CHANGED
@@ -1,28 +1,32 @@
1
- from fastapi import FastAPI
2
- from transformers import pipeline
3
-
4
- # Create a new FastAPI app instance
5
  app = FastAPI()
6
-
7
- # Initialize the text generation pipeline
8
- # This function will be able to generate text
9
- # given an input.
10
- pipe = pipeline("text2text-generation",
11
- model="google/flan-t5-small")
12
-
13
- # Define a function to handle the GET request at `/generate`
14
- # The generate() function is defined as a FastAPI route that takes a
15
- # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
16
- # containing the generated text under the key "output"
17
- @app.get("/generate")
18
- def generate(text: str):
19
- """
20
- Using the text2text-generation pipeline from `transformers`, generate text
21
- from the given input text. The model used is `google/flan-t5-small`, which
22
- can be found [here](<https://huggingface.co/google/flan-t5-small>).
23
- """
24
- # Use the pipeline to generate text from the given input text
25
- output = pipe(text)
26
-
27
- # Return the generated text in a JSON response
28
- return {"output": output[0]["generated_text"]}
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ import transformers
4
+
5
  app = FastAPI()
6
+
7
+ model_name = 'Intel/neural-chat-7b-v3-1'
8
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
9
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
10
+
11
+ class ChatInput(BaseModel):
12
+ system_input: str
13
+ user_input: str
14
+
15
+ @app.post("/generate-response")
16
+ async def generate_response(chat_input: ChatInput):
17
+ try:
18
+ # Format the input using the provided template
19
+ prompt = f"### System:\n{chat_input.system_input}\n### User:\n{chat_input.user_input}\n### Assistant:\n"
20
+
21
+ # Tokenize and encode the prompt
22
+ inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
23
+
24
+ # Generate a response
25
+ outputs = model.generate(inputs, max_length=1000, num_return_sequences=1)
26
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+
28
+ # Extract only the assistant's response
29
+ return {"response": response.split("### Assistant:\n")[-1]}
30
+
31
+ except Exception as e:
32
+ raise HTTPException(status_code=500, detail=str(e))
chatui.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+
4
+
5
+ st.title("💬 Juoksuta AI: R&D LAB")
6
+
7
+ col1, col2 = st.columns(2)
8
+
9
+ with col1:
10
+ if st.button('Tulosta viestiketju.'):
11
+ print(st.session_state)
12
+
13
+ with col2:
14
+ if st.button('Tyhjennä viestiketju'):
15
+ for key in st.session_state.keys():
16
+ del st.session_state[key]
17
+
18
+ if "messages" not in st.session_state:
19
+ st.session_state["messages"] = [{"role": "assistant", "content": "Miten voin auttaa?"}]
20
+
21
+ for msg in st.session_state.messages:
22
+ st.chat_message(msg["role"]).write(msg["content"])
23
+
24
+ msg_bank = ["Aurinko paistaa", "Kuu loistaa", "Tietokone pörisee"]
25
+
26
+ if prompt := st.chat_input("Kirjoita tähän."):
27
+
28
+
29
+ st.session_state.messages.append({"role": "user", "content": prompt})
30
+ st.chat_message("user").write(prompt)
31
+
32
+ msg = random.choice(msg_bank)
33
+
34
+ st.session_state.messages.append({"role": "assistant", "content": msg})
35
+ st.chat_message("assistant").write(msg)
36
+
requirements.txt CHANGED
@@ -1,6 +1,4 @@
1
  fastapi==0.74.*
2
  requests==2.27.*
3
  uvicorn[standard]==0.17.*
4
- sentencepiece==0.1.*
5
- torch==1.11.*
6
  transformers==4.*
 
1
  fastapi==0.74.*
2
  requests==2.27.*
3
  uvicorn[standard]==0.17.*
 
 
4
  transformers==4.*