not-lain commited on
Commit
331be52
2 Parent(s): 11abe35 448b5d7

Merge branch 'main' of https://huggingface.co/spaces/TeamTonic/MultiMed

Browse files
Files changed (1) hide show
  1. app.py +38 -4
app.py CHANGED
@@ -230,6 +230,32 @@ def convert_to_markdown(vectara_response_json):
230
  return "No data found in the response."
231
  # Main function to handle the Gradio interface logic
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
  def process_and_query(text=None):
235
  try:
@@ -245,12 +271,20 @@ def process_and_query(text=None):
245
  # text = "the user asks the following to his health adviser " + text
246
 
247
 
248
- # Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
 
249
  vectara_response_json = query_vectara(text)
 
 
250
  markdown_output = convert_to_markdown(vectara_response_json)
251
- client = OpenAI()
252
- prompt ="Answer in the same language, write it better,remove the sources, more understandable and make it 2 line answer:"
253
- markdown_output_final = markdown_output
 
 
 
 
 
254
 
255
  completion = client.chat.completions.create(
256
  model="gpt-3.5-turbo",
 
230
  return "No data found in the response."
231
  # Main function to handle the Gradio interface logic
232
 
233
+ def process_summary_with_openai(summary):
234
+ """
235
+ This function takes a summary text as input and processes it with OpenAI's GPT model.
236
+ """
237
+ try:
238
+ # Ensure that the OpenAI client is properly initialized
239
+ client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
240
+
241
+ # Create the prompt for OpenAI's completion
242
+ prompt = "You are clinical consultant discussion training cases with students at TonicUniversity. You will recieve a summary assessment. Assess and describe the proper options in minute detail. Propose a course of action based on your assessment. Exclude any other commentary:"
243
+
244
+ # Call the OpenAI API with the prompt and the summary
245
+ completion = client.chat.completions.create(
246
+ model="gpt-4-1106-preview", # Make sure to use the correct model name
247
+ messages=[
248
+ {"role": "system", "content": prompt},
249
+ {"role": "user", "content": summary}
250
+ ]
251
+ )
252
+
253
+ # Extract the content from the completion
254
+ final_summary = completion.choices[0].message.content
255
+ return final_summary
256
+ except Exception as e:
257
+ return str(e)
258
+
259
 
260
  def process_and_query(text=None):
261
  try:
 
271
  # text = "the user asks the following to his health adviser " + text
272
 
273
 
274
+
275
+ # Use the text to query Vectara
276
  vectara_response_json = query_vectara(text)
277
+
278
+ # Convert the Vectara response to Markdown
279
  markdown_output = convert_to_markdown(vectara_response_json)
280
+
281
+ # Process the summary with OpenAI
282
+ final_response = process_summary_with_openai(markdown_output)
283
+
284
+ # Return the processed summary along with the full output
285
+ return f"**Summary**: {final_response}\n\n**Full output**:\n{markdown_output}"
286
+ except Exception as e:
287
+ return str(e)
288
 
289
  completion = client.chat.completions.create(
290
  model="gpt-3.5-turbo",