refactor: Update libs/llm.py to use ChatGoogleGenerativeAI instead of GoogleGenerativeAI
Browse files- libs/llm.py +2 -2
libs/llm.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# llm.py
|
2 |
from langchain_openai import ChatOpenAI
|
3 |
from langchain_anthropic import ChatAnthropic
|
4 |
-
from langchain_google_genai import
|
5 |
from langchain_groq import ChatGroq
|
6 |
from langchain_community.chat_models import ChatOllama
|
7 |
from langchain_core.runnables import ConfigurableField
|
@@ -35,7 +35,7 @@ def get_llm(streaming=True):
|
|
35 |
streaming=streaming,
|
36 |
callbacks=[StreamCallback()],
|
37 |
),
|
38 |
-
gemini_1_5_flash=
|
39 |
model="gemini-1.5-flash",
|
40 |
temperature=0,
|
41 |
streaming=streaming,
|
|
|
1 |
# llm.py
|
2 |
from langchain_openai import ChatOpenAI
|
3 |
from langchain_anthropic import ChatAnthropic
|
4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
from langchain_groq import ChatGroq
|
6 |
from langchain_community.chat_models import ChatOllama
|
7 |
from langchain_core.runnables import ConfigurableField
|
|
|
35 |
streaming=streaming,
|
36 |
callbacks=[StreamCallback()],
|
37 |
),
|
38 |
+
gemini_1_5_flash=ChatGoogleGenerativeAI(
|
39 |
model="gemini-1.5-flash",
|
40 |
temperature=0,
|
41 |
streaming=streaming,
|