omlakhani commited on
Commit
db12bd7
1 Parent(s): a203390

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -101
app.py CHANGED
@@ -8,11 +8,17 @@ from langchain import OpenAI, LLMChain
8
  from cachetools import cached, TTLCache
9
  import openai
10
 
 
11
  s3 = boto3.resource('s3')
12
  bucket_name = "notesinendocrinology"
13
- bucket = s3.Bucket(bucket_name)
14
- for obj in bucket.objects.filter(Prefix="comboindex.json"):
15
- combo_index_path = obj.key
 
 
 
 
 
16
 
17
  @cached(cache=TTLCache(maxsize=1, ttl=3600)) # cache for 1 hour
18
  def get_combo_index():
@@ -21,96 +27,69 @@ def get_combo_index():
21
  index = GPTSimpleVectorIndex.load_from_disk('comboindex.json')
22
  return index
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def generate_variations(question):
25
- def send_message(message_log):
26
- # Use OpenAI's ChatCompletion API to get the chatbot's response
27
- response = openai.ChatCompletion.create(
28
- model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use
29
- messages=message_log, # The conversation history up to this point, as a list of dictionaries
30
- max_tokens=1000, # The maximum number of tokens (words or subwords) in the generated response
31
- stop=None, # The stopping sequence for the generated response, if any (not used here)
32
- temperature=0.7, # The "creativity" of the generated response (higher temperature = more creative)
33
- )
34
-
35
- # Find the first response from the chatbot that has text in it (some responses may not have text)
36
- for choice in response.choices:
37
- if "text" in choice:
38
- return choice.text
39
-
40
- # If no response with text is found, return the first response's content (which may be empty)
41
- return response.choices[0].message.content
42
-
43
  def extract(input):
44
-
45
  message_log = [{"role": "system", "content": input}]
46
- user_input = f"Generate more questions from the following question: {input}. Give two more questions only. The questions are intended for knowledgeable doctors"
47
  message_log.append({"role": "user", "content": user_input})
48
  response = send_message(message_log)
49
  message_log.append({"role": "assistant", "content": response})
50
  text = str(response)
51
  print(response)
52
  return response
53
-
54
  input2 = question
55
-
56
-
57
-
58
  my_string = "0. " + question
59
  output = extract(input2)
60
  output_list = output.split("\n")
61
  final_list = [my_string] + output_list
62
  print(final_list)
63
-
64
 
65
-
66
  return final_list
67
 
68
- def consolidated_answer(question, oginput):
69
- def send_message(message_log):
70
- # Use OpenAI's ChatCompletion API to get the chatbot's response
71
- response = openai.ChatCompletion.create(
72
- model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use
73
- messages=message_log, # The conversation history up to this point, as a list of dictionaries
74
- max_tokens=1000, # The maximum number of tokens (words or subwords) in the generated response
75
- stop=None, # The stopping sequence for the generated response, if any (not used here)
76
- temperature=0.7, # The "creativity" of the generated response (higher temperature = more creative)
77
- )
78
-
79
- # Find the first response from the chatbot that has text in it (some responses may not have text)
80
- for choice in response.choices:
81
- if "text" in choice:
82
- return choice.text
83
-
84
- # If no response with text is found, return the first response's content (which may be empty)
85
- return response.choices[0].message.content
86
-
87
- def extract(input):
88
-
89
- message_log = [{"role": "system", "content": input}]
90
- user_input = f"Give a consolidated answer from this: {input}. It should answer the original question {oginput}. The answer is for knowledgeable doctors so use medical terms."
91
- message_log.append({"role": "user", "content": user_input})
92
- response = send_message(message_log)
93
- message_log.append({"role": "assistant", "content": response})
94
- text = str(response)
95
- print(response)
96
- return response
97
-
98
- input2 = question
99
-
100
-
101
- output = extract(input2)
102
-
103
- print(output)
104
- return output
105
 
106
-
107
-
108
 
109
  def querying_db(query: str):
110
- index = get_combo_index()
111
- response = index.query(query)
112
- return response
113
 
 
114
 
115
 
116
  tools = [
@@ -147,49 +126,68 @@ def get_answer(query_string):
147
  return result
148
 
149
 
150
- def get_answer2(list_thing, answer_type):
151
  responses = []
152
- if answer_type == "Detailed Answer":
153
- for question in list_thing:
154
- answer = get_answer(question)
155
- response = f"{question}\n{answer}"
156
- responses.append(response)
157
-
158
- return "\n\n".join(responses)
159
- elif answer_type == "Consolidated answer":
160
- for question in list_thing:
161
- answer = get_answer(question)
162
- response = f"{question}\n{answer}"
163
- responses.append(response)
164
-
165
- return "\n\n".join(responses)
166
-
167
- else:
168
- question = list_thing[0]
169
  answer = get_answer(question)
170
  response = f"{question}\n{answer}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  return response
172
 
 
 
 
 
 
 
173
 
174
 
 
 
 
175
 
176
-
 
 
 
 
177
 
 
178
 
 
 
 
 
 
 
 
 
179
 
180
- def qa_app(query, answer_type):
181
- question_list = generate_variations(query)
182
- big_answer = get_answer2(question_list, answer_type)
183
- if answer_type == "Consolidated answer":
184
- final_answer = consolidated_answer(big_answer, query)
185
- return final_answer
186
  else:
187
- return big_answer
188
-
189
-
 
 
 
 
 
190
  inputs = [
191
  gr.inputs.Textbox(label="Enter your question:"),
192
- gr.inputs.Dropdown(choices=["Quick Answer", "Detailed Answer", "Consolidated answer"], label="Choose answer type:")
193
  ]
194
 
195
  output = gr.outputs.Textbox(label="Answer:")
@@ -202,4 +200,3 @@ iface = gr.Interface(
202
  )
203
 
204
  iface.launch()
205
-
 
8
  from cachetools import cached, TTLCache
9
  import openai
10
 
11
+
12
  s3 = boto3.resource('s3')
13
  bucket_name = "notesinendocrinology"
14
+ prefixes = ["comboindex.json", "osteoporosis.json", "nafld.json"]
15
+
16
+ for prefix in prefixes:
17
+ for obj in s3.Bucket(bucket_name).objects.filter(Prefix=prefix):
18
+ file_path = obj.key
19
+ s3.Bucket(bucket_name).download_file(file_path, file_path.split("/")[-1])
20
+ print(f"Downloaded {file_path}")
21
+
22
 
23
  @cached(cache=TTLCache(maxsize=1, ttl=3600)) # cache for 1 hour
24
  def get_combo_index():
 
27
  index = GPTSimpleVectorIndex.load_from_disk('comboindex.json')
28
  return index
29
 
30
+ def keywords(query2):
31
+ bucket.download_file(combo_index_path, "comboindex.json")
32
+ index1 = GPTSimpleVectorIndex.load_from_disk('comboindex.json')
33
+ if "NASH" in query2 or "NAFLD" in query2 or "Non-alcoholic fatty live disease" in query2:
34
+ bucket.download_file(combo_index_path, "nafld.json")
35
+ index1 = GPTSimpleVectorIndex.load_from_disk('nafld.json')
36
+ elif "osteoporosis" in query2 or 'osteopenia' in query2 or 'low bone mass' in query2 or 'DEXA-BMD' in query2 or 'BMD' in query2:
37
+ bucket.download_file(combo_index_path, "osteoporosis.json")
38
+ index1 = GPTSimpleVectorIndex.load_from_disk('osteoporosis.json')
39
+ else:
40
+ bucket.download_file(combo_index_path, "comboindex.json")
41
+ index1 = GPTSimpleVectorIndex.load_from_disk('comboindex.json')
42
+ return index1
43
+
44
+ def send_message(message_log):
45
+ # Use OpenAI's ChatCompletion API to get the chatbot's response
46
+ response = openai.ChatCompletion.create(
47
+ model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use
48
+ messages=message_log, # The conversation history up to this point, as a list of dictionaries
49
+ max_tokens=512, # The maximum number of tokens (words or subwords) in the generated response
50
+ stop=None, # The stopping sequence for the generated response, if any (not used here)
51
+ temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
52
+ )
53
+
54
+ # Find the first response from the chatbot that has text in it (some responses may not have text)
55
+ for choice in response.choices:
56
+ if "text" in choice:
57
+ return choice.text
58
+
59
+ # If no response with text is found, return the first response's content (which may be empty)
60
+ return response.choices[0].message.content
61
+
62
+
63
  def generate_variations(question):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  def extract(input):
 
65
  message_log = [{"role": "system", "content": input}]
66
+ user_input = f"Generate one follow-up question from the following question: {input}. Give one more question only. The question is intended for knowledgeable doctors"
67
  message_log.append({"role": "user", "content": user_input})
68
  response = send_message(message_log)
69
  message_log.append({"role": "assistant", "content": response})
70
  text = str(response)
71
  print(response)
72
  return response
73
+
74
  input2 = question
75
+
 
 
76
  my_string = "0. " + question
77
  output = extract(input2)
78
  output_list = output.split("\n")
79
  final_list = [my_string] + output_list
80
  print(final_list)
 
81
 
 
82
  return final_list
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+
86
+
87
 
88
  def querying_db(query: str):
89
+ get_index1 = keywords(query)
90
+ response = get_index1.query(query, response_mode="default")
 
91
 
92
+ return response
93
 
94
 
95
  tools = [
 
126
  return result
127
 
128
 
129
+ def get_answer2(list_thing):
130
  responses = []
131
+ for question in list_thing:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  answer = get_answer(question)
133
  response = f"{question}\n{answer}"
134
+ responses.append(response)
135
+
136
+ return "\n\n".join(responses)
137
+
138
+
139
+ def consolidated_answer(question, oginput):
140
+ def extract(input):
141
+ message_log = [{"role": "system", "content": input}]
142
+ user_input = f"Give a consolidated answer from this: {input}. It should answer the original question {oginput}. The answer is for knowledgeable doctors so use medical terms."
143
+ message_log.append({"role": "user", "content": user_input})
144
+ response = send_message(message_log)
145
+ message_log.append({"role": "assistant", "content": response})
146
+ text = str(response)
147
+ print(response)
148
  return response
149
 
150
+ input2 = question
151
+
152
+ output = extract(input2)
153
+
154
+ print(output)
155
+ return output
156
 
157
 
158
+ def qa_app(query1):
159
+ # 1. function that checks if relevant keyword is found in the query and get the relevant index
160
+ # 2. generate question variartions
161
 
162
+ question_variations_list = generate_variations(query1)
163
+ whichindex = keywords(query1)
164
+ if whichindex == 'nafld.json':
165
+ prefix_answer = "Here is the answer based on Notes in Endocrinology and American Association of Clinical Endocrinology Clinical Practice Guideline for the Diagnosis and Management of Nonalcoholic Fatty Liver Disease in Primary Care and Endocrinology Clinical Settings:"
166
+ suffix_answer = """Citation: \n
167
 
168
+ 1. Cusi, Kenneth, Scott Isaacs, Diana Barb, Rita Basu, Sonia Caprio, W. Timothy Garvey, Sangeeta Kashyap et al. "American Association of Clinical Endocrinology clinical practice guideline for the diagnosis and management of nonalcoholic fatty liver disease in primary care and endocrinology clinical settings: co-sponsored by the American Association for the Study of Liver Diseases (AASLD)." Endocrine Practice 28, no. 5 (2022): 528-562.
169
 
170
+ """
171
+
172
+ elif whichindex == 'osteoporosis_new.json':
173
+ prefix_answer = "According to : Pharmacological Management of Osteoporosis in Postmenopausal Women: An Endocrine Society* Clinical Practice Guideline & Notes in Endocrinology"
174
+ suffix_answer = """Citation: \n
175
+
176
+ 1. Eastell R, Rosen CJ, Black DM, Cheung AM, Murad MH, Shoback D. Pharmacological management of osteoporosis in postmenopausal women: an Endocrine Society clinical practice guideline. The Journal of Clinical Endocrinology & Metabolism. 2019 May;104(5):1595-622.
177
+ """
178
 
 
 
 
 
 
 
179
  else:
180
+ prefix_answer = "According to NIE:"
181
+ suffix_answer = "Citation: NIE"
182
+ big_answer = get_answer2(question_variations_list)
183
+ final_answer = consolidated_answer(big_answer, query1)
184
+ final_answer_with_citation = prefix_answer + "\n\n" + final_answer + "\n\n" + suffix_answer
185
+ return final_answer_with_citation
186
+
187
+
188
  inputs = [
189
  gr.inputs.Textbox(label="Enter your question:"),
190
+
191
  ]
192
 
193
  output = gr.outputs.Textbox(label="Answer:")
 
200
  )
201
 
202
  iface.launch()