Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| import json | |
| from deep_translator import GoogleTranslator | |
| import google.generativeai as genai | |
| import time | |
| import random | |
| load_dotenv() | |
| # Configure the Gemini API with your API key | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| def make_call(data): | |
| print(data) | |
| newdata = data.replace("'", '"') | |
| items = json.loads(newdata) | |
| language = items['lang'] | |
| query = items['text'] | |
| query = query.lower() | |
| translated = None | |
| model = genai.GenerativeModel('gemini-2.5-flash-lite') | |
| retries = 0 | |
| max_retries = 5 # You can adjust this number | |
| base_delay = 1 # Initial delay in seconds | |
| while retries < max_retries: | |
| try: | |
| prompt_query = ( | |
| f"Answer the given query in a very short message with wisdom, love, and compassion, answer the query as Krishna would have answered" | |
| f"in context to Bhagavad Gita, that feels like talking to God Krishna itself " | |
| f"provide references of shlokas from chapters of Bhagavad Gita which are " | |
| f"relevant to the query. Keep the answer short, precise, and simple. " | |
| f"Query: {query}" | |
| ) | |
| response = model.generate_content(prompt_query) | |
| answer = response.text | |
| translated = GoogleTranslator(source='auto', target=language).translate(answer) | |
| break # Exit the loop if the call is successful | |
| except Exception as e: | |
| if "429 Quota exceeded" in str(e): | |
| delay = base_delay * (2 ** retries) + random.uniform(0, 1) # Exponential backoff with jitter | |
| print(f"Quota exceeded. Retrying in {delay:.2f} seconds... (Attempt {retries + 1}/{max_retries})") | |
| time.sleep(delay) | |
| retries += 1 | |
| else: | |
| print(f"API call failed: {e}") | |
| translated = f"An error occurred while fetching the answer: {e}" | |
| break # Exit the loop for other errors | |
| else: | |
| # This block executes if the loop completes without a successful break (i.e., max_retries reached) | |
| translated = "Maximum retry attempts reached. Please try again later." | |
| respo = { | |
| "message": translated, | |
| "action": "nothing", | |
| "function": "nothing", | |
| } | |
| print(translated) | |
| return json.dumps(respo) | |
| gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text") | |
| gradio_interface.launch() | |
| # import os | |
| # import gradio as gr | |
| # from groq import Groq | |
| # from dotenv import load_dotenv | |
| # import json | |
| # from deep_translator import GoogleTranslator | |
| # import google.generativeai as genai | |
| # load_dotenv() | |
| # api1 = os.getenv("GEMINI_API_KEY") | |
| # genai.configure(api_key=api1) | |
| # # api2 = os.getenv("Groq_key") | |
| # # api3 = os.getenv("GRoq_key") | |
| # # api2 = os.getenv("Groq_key") | |
| # # api2 = os.getenv("Groq_key") | |
| # # api2 = os.getenv("Groq_key") | |
| # # api2 = os.getenv("Groq_key") | |
| # # apis = [ | |
| # # api1 | |
| # # ] | |
| # # from google import genai | |
| # # client = genai.Client() | |
| # # response = client.models.generate_content( | |
| # # model="gemini-2.5-flash", | |
| # # contents="Explain how AI works in a few words", | |
| # # ) | |
| # # print(response.text) | |
| # def make_call(data): | |
| # print(data) | |
| # newdata = data.replace("'", '"') | |
| # items = json.loads(newdata) | |
| # language = items['lang'] | |
| # query = items['text'] | |
| # query = query.lower() | |
| # answer = None | |
| # while True: | |
| # for api in apis: | |
| # client = genai.Client( | |
| # api_key=api, | |
| # ) # Configure the model with the API key | |
| # # query = st.text_input("Enter your query") | |
| # prmptquery= f"Answer this query in a short message with wisdom, love and compassion, in context to bhagwat geeta, that feels like chatting to a person and provide references of shloks from chapters of bhagwat geeta which is relevant to the query. keep the answer short, precise and simple. Query= {query}" | |
| # try: | |
| # response = client.chat.completions.create( | |
| # messages=[ | |
| # { | |
| # "role": "user", | |
| # "content": prmptquery, | |
| # } | |
| # ], | |
| # model="mixtral-8x7b-32768", | |
| # ) | |
| # answer = response.choices[0].message.content | |
| # translated = GoogleTranslator(source='auto', target=language).translate(answer) | |
| # except Exception as e: | |
| # print(f"API call failed for: {e}") | |
| # if answer: | |
| # break | |
| # if answer: | |
| # break | |
| # respo = { | |
| # "message": translated, | |
| # "action": "nothing", | |
| # "function": "nothing", | |
| # } | |
| # print(translated) | |
| # return json.dumps(respo) | |
| # gradio_interface = gr.Interface(fn=make_call, inputs="text", outputs="text") | |
| # gradio_interface.launch() | |
| # # print(chat_completion) | |
| # # # Text to 3D | |
| # # import streamlit as st | |
| # # import torch | |
| # # from diffusers import ShapEPipeline | |
| # # from diffusers.utils import export_to_gif | |
| # # # Model loading (Ideally done once at the start for efficiency) | |
| # # ckpt_id = "openai/shap-e" | |
| # # @st.cache_resource # Caches the model for faster subsequent runs | |
| # # def load_model(): | |
| # # return ShapEPipeline.from_pretrained(ckpt_id).to("cuda") | |
| # # pipe = load_model() | |
| # # # App Title | |
| # # st.title("Shark 3D Image Generator") | |
| # # # User Inputs | |
| # # prompt = st.text_input("Enter your prompt:", "a shark") | |
| # # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5) | |
| # # # Generate and Display Images | |
| # # if st.button("Generate"): | |
| # # with st.spinner("Generating images..."): | |
| # # images = pipe( | |
| # # prompt, | |
| # # guidance_scale=guidance_scale, | |
| # # num_inference_steps=64, | |
| # # size=256, | |
| # # ).images | |
| # # gif_path = export_to_gif(images, "shark_3d.gif") | |
| # # st.image(images[0]) # Display the first image | |
| # # st.success("GIF saved as shark_3d.gif") |