Yew Chong commited on
Commit
aa1d498
1 Parent(s): 5ab5660

add llm for gpt

Browse files
LLM for Grading (test).ipynb CHANGED
@@ -322,6 +322,7 @@
322
  "\n",
323
  " # prompt # | \n",
324
  " LLMChain(llm=llm, prompt=prompt, memory=memory, verbose=True) #|\n",
 
325
  " \n",
326
  " # StrOutputParser()\n",
327
  ")"
 
322
  "\n",
323
  " # prompt # | \n",
324
  " LLMChain(llm=llm, prompt=prompt, memory=memory, verbose=True) #|\n",
325
+ " # LLMChain(llm=llm_gpt4, prompt=prompt, memory=memory, verbose=True) #| ## GPT4\n",
326
  " \n",
327
  " # StrOutputParser()\n",
328
  ")"
LLM for Patient (test).ipynb CHANGED
@@ -150,7 +150,8 @@
150
  " \"question\": RunnablePassthrough()\n",
151
  " } | \n",
152
  " # prompt | \n",
153
- " LLMChain(llm=llm_gpt4, prompt=prompt, memory=memory, verbose=True) #| \n",
 
154
  " # StrOutputParser()\n",
155
  ")"
156
  ]
 
150
  " \"question\": RunnablePassthrough()\n",
151
  " } | \n",
152
  " # prompt | \n",
153
+ " LLMChain(llm=llm, prompt=prompt, memory=memory, verbose=True) #| \n",
154
+ " # LLMChain(llm=llm_gpt4, prompt=prompt, memory=memory, verbose=True) #| ## GPT4\n",
155
  " # StrOutputParser()\n",
156
  ")"
157
  ]
requirements.txt CHANGED
@@ -12,6 +12,7 @@ tiktoken
12
  faiss-cpu
13
  streamlit
14
  firebase-admin
 
15
 
16
  --index-url https://download.pytorch.org/whl/cu113
17
  torch==2.1.2
 
12
  faiss-cpu
13
  streamlit
14
  firebase-admin
15
+ plotly
16
 
17
  --index-url https://download.pytorch.org/whl/cu113
18
  torch==2.1.2