pgleeson commited on
Commit
83083b0
·
1 Parent(s): e31a853

Adds ChatGoogleGenerativeAI

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. app.py +2 -1
  3. requirements.txt +2 -1
  4. ring.py +17 -1
  5. test.py +9 -1
.gitignore CHANGED
@@ -4,3 +4,4 @@
4
  /models/LEMS_Sim_IClamp_GenericMuscleCell.xml
5
  /report.Sim_IClamp_GenericMuscleCell.txt
6
  /report.Sim_IClamp_GenericNeuronCell.txt
 
 
4
  /models/LEMS_Sim_IClamp_GenericMuscleCell.xml
5
  /report.Sim_IClamp_GenericMuscleCell.txt
6
  /report.Sim_IClamp_GenericNeuronCell.txt
7
+ *v.dat
app.py CHANGED
@@ -7,6 +7,7 @@ from ring import LLM_GPT35
7
  from ring import LLM_GPT4
8
  from ring import LLM_GPT4o
9
  from ring import LLM_LLAMA2
 
10
  from ring import PREF_ORDER_LLMS
11
 
12
 
@@ -87,7 +88,7 @@ with tab_panel:
87
  elif submitted:
88
  response = generate_panel_response(
89
  text,
90
- llm_panelists=[LLM_GPT35, LLM_GPT4],
91
  llm_panel_chair=LLM_GPT4o,
92
  temperature=temperature,
93
  )
 
7
  from ring import LLM_GPT4
8
  from ring import LLM_GPT4o
9
  from ring import LLM_LLAMA2
10
+ from ring import LLM_GEMINI
11
  from ring import PREF_ORDER_LLMS
12
 
13
 
 
88
  elif submitted:
89
  response = generate_panel_response(
90
  text,
91
+ llm_panelists=[LLM_GPT35, LLM_GPT4, LLM_GEMINI],
92
  llm_panel_chair=LLM_GPT4o,
93
  temperature=temperature,
94
  )
requirements.txt CHANGED
@@ -9,4 +9,5 @@ neuromllite
9
  matplotlib
10
  pylems
11
  pyneuroml
12
- paper-qa==4.0.0rc8
 
 
9
  matplotlib
10
  pylems
11
  pyneuroml
12
+ paper-qa==4.0.0rc8
13
+ langchain-google-genai
ring.py CHANGED
@@ -10,10 +10,11 @@ LLM_GPT35 = "GPT3.5"
10
  LLM_GPT4 = "GPT4"
11
  LLM_GPT4o = "GPT4o"
12
  LLM_LLAMA2 = "LLAMA2"
 
13
 
14
  OPENAI_LLMS = [LLM_GPT35, LLM_GPT4, LLM_GPT4o]
15
 
16
- PREF_ORDER_LLMS = (LLM_LLAMA2, LLM_GPT35, LLM_GPT4, LLM_GPT4o)
17
 
18
 
19
  def requires_openai_key(llm_ver):
@@ -40,6 +41,13 @@ def get_llamaapi_key():
40
  return llamaapi_key
41
 
42
 
 
 
 
 
 
 
 
43
  def get_llm(llm_ver, temperature):
44
 
45
  if llm_ver == LLM_GPT35:
@@ -78,6 +86,14 @@ def get_llm(llm_ver, temperature):
78
 
79
  llm = ChatLlamaAPI(client=llama)
80
 
 
 
 
 
 
 
 
 
81
  return llm
82
 
83
 
 
10
  LLM_GPT4 = "GPT4"
11
  LLM_GPT4o = "GPT4o"
12
  LLM_LLAMA2 = "LLAMA2"
13
+ LLM_GEMINI = "Gemini"
14
 
15
  OPENAI_LLMS = [LLM_GPT35, LLM_GPT4, LLM_GPT4o]
16
 
17
+ PREF_ORDER_LLMS = (LLM_GEMINI, LLM_LLAMA2, LLM_GPT35, LLM_GPT4, LLM_GPT4o)
18
 
19
 
20
  def requires_openai_key(llm_ver):
 
41
  return llamaapi_key
42
 
43
 
44
+ def get_gemini_api_key():
45
+
46
+ gemini_api_key = os.environ.get("GEMINIAPI_KEY")
47
+
48
+ return gemini_api_key
49
+
50
+
51
  def get_llm(llm_ver, temperature):
52
 
53
  if llm_ver == LLM_GPT35:
 
86
 
87
  llm = ChatLlamaAPI(client=llama)
88
 
89
+ elif llm_ver == LLM_GEMINI:
90
+
91
+ from langchain_google_genai import ChatGoogleGenerativeAI
92
+
93
+ llm = ChatGoogleGenerativeAI(
94
+ model="gemini-pro", google_api_key=get_gemini_api_key()
95
+ )
96
+
97
  return llm
98
 
99
 
test.py CHANGED
@@ -2,19 +2,27 @@ from ring import LLM_GPT35
2
  from ring import LLM_GPT4
3
  from ring import LLM_GPT4o
4
  from ring import LLM_LLAMA2
 
5
  from ring import PREF_ORDER_LLMS
6
 
7
-
8
  from ring import generate_response
9
 
 
 
10
  question = "What is the most common type of neuron in the brain?"
11
 
12
  llm_ver = LLM_GPT4o
13
 
 
 
 
 
14
  print("--------------------------------------------------------")
15
  print("Asking question:\n %s" % question)
16
  print("--------------------------------------------------------")
17
 
 
 
18
  response = generate_response(question, llm_ver, temperature=0, only_celegans=False)
19
 
20
 
 
2
  from ring import LLM_GPT4
3
  from ring import LLM_GPT4o
4
  from ring import LLM_LLAMA2
5
+ from ring import LLM_GEMINI
6
  from ring import PREF_ORDER_LLMS
7
 
 
8
  from ring import generate_response
9
 
10
+ import sys
11
+
12
  question = "What is the most common type of neuron in the brain?"
13
 
14
  llm_ver = LLM_GPT4o
15
 
16
+ if "-g" in sys.argv:
17
+ llm_ver = LLM_GEMINI
18
+
19
+
20
  print("--------------------------------------------------------")
21
  print("Asking question:\n %s" % question)
22
  print("--------------------------------------------------------")
23
 
24
+ print(" ... Connecting to: %s" % llm_ver)
25
+
26
  response = generate_response(question, llm_ver, temperature=0, only_celegans=False)
27
 
28