muhtasham commited on
Commit
a1b0c16
1 Parent(s): d18e04f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -35
app.py CHANGED
@@ -93,46 +93,58 @@ def configure_retriever(local_files, chunk_size=15000, chunk_overlap=2500):
93
  directory = "docs" if os.path.exists("docs") else "."
94
  local_files = [f for f in os.listdir(directory) if f.endswith(".pdf")]
95
 
96
- # Setup LLM
97
- llm = ChatOpenAI(
98
- model_name="gpt-4-0125-preview", openai_api_key=openai_api_key, temperature=0.1, streaming=True
99
- )
 
100
 
101
- retriever = configure_retriever(local_files)
102
 
103
- template = """Answer the question based only on the following context:
104
- {context}
105
 
106
- Question: {question}
 
107
 
108
- Chat History: {history}
109
 
110
- Answer in German Language. If the question is not related to the context, answer with "I don't know".
111
- If the user is asking for follow-up questions on the same topic, generate different questions than you already answered.
112
- """
 
 
 
 
 
113
 
 
114
 
115
- prompt = ChatPromptTemplate.from_template(template)
 
 
 
116
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
- chain_translate = (
119
- llm
120
- | StrOutputParser()
121
- )
122
 
123
- chain_rag = (
124
- {
125
- "context": itemgetter("question") | retriever,
126
- "question": itemgetter("question"),
127
- "history": itemgetter("history")
128
- }
129
- | prompt
130
- | llm
131
- | StrOutputParser()
132
- )
133
 
134
- def predict(message, history):
135
- message = chain_translate.invoke(f"Translate this query to English if it is in German otherwise return original contetn: {message}")
 
 
 
136
 
137
  history_langchain_format = []
138
  partial_message = ""
@@ -141,7 +153,7 @@ def predict(message, history):
141
  history_langchain_format.append(HumanMessage(content=human))
142
  history_langchain_format.append(AIMessage(content=ai))
143
  history_langchain_format.append(HumanMessage(content=message))
144
- for response in chain_rag.stream({"question": message, "history": history_langchain_format}):
145
  partial_message += response
146
  yield partial_message
147
 
@@ -174,12 +186,10 @@ submit_btn="Senden",
174
  retry_btn="🔄 Wiederholen",
175
  undo_btn="⏪ Rückgängig",
176
  clear_btn="🗑️ Löschen",
177
- examples=[
178
- "Generate auditing questions about Change Management",
179
- "Generate auditing questions about Software Maintenance",
180
- "Generate auditing questions about Data Protection"
181
  ],
182
- #cache_examples=True,
183
  fill_height=True,
184
  css=css,
185
  ).launch(show_api=False)
 
93
  directory = "docs" if os.path.exists("docs") else "."
94
  local_files = [f for f in os.listdir(directory) if f.endswith(".pdf")]
95
 
96
+ def setup_llm(system_message):
97
+ # Setup LLM
98
+ llm = ChatOpenAI(
99
+ model_name="gpt-4o", openai_api_key=openai_api_key, temperature=0.1, streaming=True
100
+ )
101
 
102
+ retriever = configure_retriever(local_files)
103
 
104
+ template = system_message + """
 
105
 
106
+ Answer the question based only on the following context in it's original language.
107
+ {context}
108
 
109
+ Question: {question}
110
 
111
+ Original Message: {original_msg}
112
+
113
+ Chat History: {history}
114
+
115
+ If the question is not related to the context, answer with "I don't know" in the original language.
116
+ If the user is asking for follow-up questions on the same topic, generate different questions than you already answered.
117
+ If the user is asking to explain the context, or expand on the context, then provide explanation in the original language.
118
+ """
119
 
120
+ prompt = ChatPromptTemplate.from_template(template)
121
 
122
+ chain_translate = (
123
+ llm
124
+ | StrOutputParser()
125
+ )
126
 
127
+ chain_rag = (
128
+ {
129
+ "context": itemgetter("question") | retriever,
130
+ "question": itemgetter("question"),
131
+ "original_msg": itemgetter("original_msg"),
132
+ "history": itemgetter("history")
133
+ }
134
+ | prompt
135
+ | llm
136
+ | StrOutputParser()
137
+ )
138
 
139
+ return chain_rag, chain_translate
 
 
 
140
 
141
+ def predict(message, history, system_message):
 
 
 
 
 
 
 
 
 
142
 
143
+ logging.info(system_message)
144
+
145
+ chain_rag, chain_translate = setup_llm(system_message)
146
+
147
+ message_transalated = chain_translate.invoke(f"Translate this query to English if it is in German otherwise return original contetn: {message}")
148
 
149
  history_langchain_format = []
150
  partial_message = ""
 
153
  history_langchain_format.append(HumanMessage(content=human))
154
  history_langchain_format.append(AIMessage(content=ai))
155
  history_langchain_format.append(HumanMessage(content=message))
156
+ for response in chain_rag.stream({"question": message_transalated, "original_msg": message, "history": history_langchain_format}):
157
  partial_message += response
158
  yield partial_message
159
 
 
186
  retry_btn="🔄 Wiederholen",
187
  undo_btn="⏪ Rückgängig",
188
  clear_btn="🗑️ Löschen",
189
+ additional_inputs=[
190
+ gr.Textbox("You are an auditor with many years of professional experience and are to develop a questionnaire on the topic of home office in the form of a self-assessment for me. As a basis for the questionnaire, you use standards and best practices (for example, from ISO 27001 and COBIT). The questionnaire should not exceed 20 questions.", label="System Prompt")
 
 
191
  ],
192
+ cache_examples=False,
193
  fill_height=True,
194
  css=css,
195
  ).launch(show_api=False)