neuralleap commited on
Commit
38c4b76
1 Parent(s): 2de5bf4

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +5 -4
main.py CHANGED
@@ -23,7 +23,7 @@ DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
23
  GOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']
24
 
25
  genai.configure(api_key=GOOGLE_API_KEY)
26
- model = genai.GenerativeModel('gemini-pro')
27
 
28
 
29
  embeddings = HuggingFaceInstructEmbeddings(
@@ -174,11 +174,12 @@ async def llm_response(chain,id,mode):
174
 
175
  try:
176
  print("\n google gemini===================\n")
177
- chat = model.start_chat(history=[])
178
 
179
- response = chat.send_message('give next small response for this conversation like a doctor. '+str(chain))
180
- return response.text
181
  except:
 
182
  result_ex = qa_chain(sys+chain+"""\n\n\nalways give small and single response based on the patient
183
  response. don't ask any question give simple response""")
184
  if "Patient:" in str(result_ex['result']) or "Patient response:" in str(result_ex['result']) or "Patient Response" in str(result_ex['result']):
 
23
  GOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']
24
 
25
  genai.configure(api_key=GOOGLE_API_KEY)
26
+ gemini_model = genai.GenerativeModel('gemini-pro')
27
 
28
 
29
  embeddings = HuggingFaceInstructEmbeddings(
 
174
 
175
  try:
176
  print("\n google gemini===================\n")
177
+ gemini_chat = gemini_model.start_chat(history=[])
178
 
179
+ gemini_response = gemini_chat.send_message('give next small response for laste patient response like a doctor. '+str(chain))
180
+ return gemini_response.text
181
  except:
182
+ print("\n llmmaa ===================\n")
183
  result_ex = qa_chain(sys+chain+"""\n\n\nalways give small and single response based on the patient
184
  response. don't ask any question give simple response""")
185
  if "Patient:" in str(result_ex['result']) or "Patient response:" in str(result_ex['result']) or "Patient Response" in str(result_ex['result']):