LLM_Model / backup_gemini_llm.py
Shreekant Kalwar (Nokia)
new server
cd55ee8
raw
history blame
955 Bytes
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
import google.generativeai as genai
import os
from dotenv import load_dotenv
# Load variables from .env file
load_dotenv()
# βœ… Configure API Key (set GOOGLE_API_KEY in environment variables)
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
app = FastAPI()
# βœ… Allow all origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ChatRequest(BaseModel):
message: str
# βœ… Load Gemini model (example: gemini-1.5-flash is lightweight & fast)
model = genai.GenerativeModel("gemini-2.5-flash")
@app.get("/")
def root():
return {"status": "ok"}
@app.post("/chat")
def chat(request: ChatRequest):
"""Chat endpoint using Gemini"""
response = model.generate_content(request.message)
return {"reply": response.text}