RAG_APP / src /models /llm_wrapper.py
sxid003's picture
Upload 83 files
3107242 verified
import os
from dotenv import load_dotenv
import logging
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv(dotenv_path="src/configs/.env")
class GeminiWrapper:
def __init__(self, model_name="gemini-2.5-flash", temperature=0):
self.model_name = model_name
self.llm = ChatGoogleGenerativeAI(
model=model_name,
temperature=temperature,
google_api_key=os.getenv("GEMINI_API_KEY")
)
def generate(self, prompt, history=None):
if history:
# Prepend history to the prompt for context
#history_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
prompt = f"{history}\nUser: {prompt}"
return self._generate(prompt)
def _generate(self, prompt):
try:
return self.llm.invoke(prompt).content
except Exception as e:
logging.error(f"Gemini generation failed: {e}")
return "Error generating response."
def bind_tools(self, tools):
return self.llm.bind_tools(tools)