Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,13 @@ import tempfile
|
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
import logging
|
|
|
6 |
|
7 |
from operator import itemgetter
|
8 |
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
9 |
from langchain_community.document_loaders import PyPDFLoader
|
10 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
11 |
-
from langchain_core.prompts import ChatPromptTemplate
|
12 |
from langchain_community.vectorstores.chroma import Chroma
|
13 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
14 |
from langchain.schema import AIMessage, HumanMessage
|
@@ -16,6 +17,11 @@ from langchain_core.output_parsers import StrOutputParser
|
|
16 |
from langchain.globals import set_debug
|
17 |
from dotenv import load_dotenv
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
# configure logging
|
20 |
logging.basicConfig(level=logging.INFO)
|
21 |
|
@@ -92,10 +98,6 @@ llm = ChatOpenAI(
|
|
92 |
model_name="gpt-4-0125-preview", openai_api_key=openai_api_key, temperature=0.1, streaming=True
|
93 |
)
|
94 |
|
95 |
-
llm_translate = ChatOpenAI(
|
96 |
-
model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, temperature=0.0
|
97 |
-
)
|
98 |
-
|
99 |
retriever = configure_retriever(local_files)
|
100 |
|
101 |
template = """Answer the question based only on the following context:
|
@@ -103,59 +105,78 @@ template = """Answer the question based only on the following context:
|
|
103 |
|
104 |
Question: {question}
|
105 |
|
106 |
-
Answer in German Language. If the question is not related to the context, answer with "I don't know"
|
107 |
"""
|
108 |
|
|
|
109 |
prompt = ChatPromptTemplate.from_template(template)
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
"question": itemgetter("question"),
|
115 |
-
}
|
116 |
-
| prompt
|
117 |
-
| llm
|
118 |
| StrOutputParser()
|
119 |
)
|
120 |
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
| StrOutputParser()
|
123 |
)
|
124 |
|
125 |
-
|
126 |
def predict(message, history):
|
127 |
-
message = chain_translate.invoke(f"Translate this
|
|
|
128 |
history_langchain_format = []
|
|
|
|
|
129 |
for human, ai in history:
|
130 |
history_langchain_format.append(HumanMessage(content=human))
|
131 |
history_langchain_format.append(AIMessage(content=ai))
|
132 |
history_langchain_format.append(HumanMessage(content=message))
|
133 |
-
|
134 |
-
|
135 |
-
yield
|
136 |
-
|
137 |
-
|
138 |
image_path = "./ui/logo.png" if os.path.exists("./ui/logo.png") else "./logo.png"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
if __name__ == "__main__":
|
161 |
-
demo.launch()
|
|
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
import logging
|
6 |
+
import base64
|
7 |
|
8 |
from operator import itemgetter
|
9 |
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
10 |
from langchain_community.document_loaders import PyPDFLoader
|
11 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
12 |
+
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
13 |
from langchain_community.vectorstores.chroma import Chroma
|
14 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
15 |
from langchain.schema import AIMessage, HumanMessage
|
|
|
17 |
from langchain.globals import set_debug
|
18 |
from dotenv import load_dotenv
|
19 |
|
20 |
+
def image_to_base64(image_path):
|
21 |
+
with open(image_path, "rb") as image_file:
|
22 |
+
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
|
23 |
+
return encoded_string
|
24 |
+
|
25 |
# configure logging
|
26 |
logging.basicConfig(level=logging.INFO)
|
27 |
|
|
|
98 |
model_name="gpt-4-0125-preview", openai_api_key=openai_api_key, temperature=0.1, streaming=True
|
99 |
)
|
100 |
|
|
|
|
|
|
|
|
|
101 |
retriever = configure_retriever(local_files)
|
102 |
|
103 |
template = """Answer the question based only on the following context:
|
|
|
105 |
|
106 |
Question: {question}
|
107 |
|
108 |
+
Answer in German Language. If the question is not related to the context, answer with "I don't know".
|
109 |
"""
|
110 |
|
111 |
+
|
112 |
prompt = ChatPromptTemplate.from_template(template)
|
113 |
|
114 |
+
|
115 |
+
chain_translate = (
|
116 |
+
llm
|
|
|
|
|
|
|
|
|
117 |
| StrOutputParser()
|
118 |
)
|
119 |
|
120 |
+
chain_rag = (
|
121 |
+
{
|
122 |
+
"context": itemgetter("question") | retriever,
|
123 |
+
"question": itemgetter("question"),
|
124 |
+
"history": itemgetter("history")
|
125 |
+
}
|
126 |
+
| prompt
|
127 |
+
| llm
|
128 |
| StrOutputParser()
|
129 |
)
|
130 |
|
|
|
131 |
def predict(message, history):
|
132 |
+
message = chain_translate.invoke(f"Translate this query to English if it is in German otherwise return original contetn: {message}")
|
133 |
+
|
134 |
history_langchain_format = []
|
135 |
+
partial_message = ""
|
136 |
+
|
137 |
for human, ai in history:
|
138 |
history_langchain_format.append(HumanMessage(content=human))
|
139 |
history_langchain_format.append(AIMessage(content=ai))
|
140 |
history_langchain_format.append(HumanMessage(content=message))
|
141 |
+
for response in chain_rag.stream({"question": message, "history": history_langchain_format}):
|
142 |
+
partial_message += response
|
143 |
+
yield partial_message
|
144 |
+
|
|
|
145 |
image_path = "./ui/logo.png" if os.path.exists("./ui/logo.png") else "./logo.png"
|
146 |
+
logo_base64 = image_to_base64(image_path)
|
147 |
+
|
148 |
+
# CSS with the Base64-encoded image
|
149 |
+
css = f"""
|
150 |
+
body::before {{
|
151 |
+
content: '';
|
152 |
+
display: block;
|
153 |
+
height: 150px !important; /* Adjust based on your logo's size */
|
154 |
+
background: url('data:image/png;base64,{logo_base64}') no-repeat center center !important;
|
155 |
+
background-size: contain !important; /* This makes sure the logo fits well in the header */
|
156 |
+
}}
|
157 |
+
|
158 |
+
#q-output {{
|
159 |
+
max-height: 60vh !important;
|
160 |
+
overflow: auto !important;
|
161 |
+
}}
|
162 |
+
"""
|
163 |
|
164 |
+
gr.ChatInterface(
|
165 |
+
predict,
|
166 |
+
chatbot=gr.Chatbot(likeable=True),
|
167 |
+
textbox=gr.Textbox(placeholder="stell mir Fragen", scale=7),
|
168 |
+
description="Ich bin Ihr hilfreicher KI-Assistent",
|
169 |
+
theme="soft",
|
170 |
+
submit_btn="Senden",
|
171 |
+
retry_btn="🔄 Wiederholen",
|
172 |
+
undo_btn="⏪ Rückgängig",
|
173 |
+
clear_btn="🗑️ Löschen",
|
174 |
+
examples=[
|
175 |
+
"Generate auditing questions about Change Management",
|
176 |
+
"Generate auditing questions about Software Maintenance",
|
177 |
+
"Generate auditing questions about Data Protection"
|
178 |
+
],
|
179 |
+
#cache_examples=True,
|
180 |
+
fill_height=True,
|
181 |
+
css=css,
|
182 |
+
).launch(show_api=False)
|
|
|
|
|
|