law-bot / libs /llm.py
anpigon's picture
refactor: Update libs/llm.py to use ChatGoogleGenerativeAI instead of GoogleGenerativeAI
edd46dc
# llm.py
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_community.chat_models import ChatOllama
from langchain_core.runnables import ConfigurableField
from langchain.callbacks.base import BaseCallbackHandler
class StreamCallback(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs):
# print(token, end="", flush=True)
pass
def get_llm(streaming=True):
return ChatOpenAI(
model="gpt-4o",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="gpt_4o",
claude_3_5_sonnet=ChatAnthropic(
model="claude-3-5-sonnet-20240620",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
gpt_3_5_turbo=ChatOpenAI(
model="gpt-3.5-turbo-0125",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
gemini_1_5_flash=ChatGoogleGenerativeAI(
model="gemini-1.5-flash",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
llama3_70b=ChatGroq(
model_name="llama3-70b-8192",
temperature=0,
streaming=streaming,
callbacks=[StreamCallback()],
),
eeve=ChatOllama(
model="EEVE-Korean-10.8B",
streaming=streaming,
callbacks=[StreamCallback()],
),
gemma2=ChatOllama(
model="gemma2",
streaming=streaming,
callbacks=[StreamCallback()],
),
)