endrishassen commited on
Commit
2b3daa2
Β·
verified Β·
1 Parent(s): 53e6f75

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +1 -1
  2. ai_helper.py +20 -17
  3. bot.py +1 -29
  4. requirements.txt +0 -1
Dockerfile CHANGED
@@ -1,6 +1,6 @@
1
  FROM python:3.11-slim
2
 
3
- RUN apt-get update && apt-get install -y ffmpeg git && rm -rf /var/lib/apt/lists/*
4
 
5
  WORKDIR /app
6
 
 
1
  FROM python:3.11-slim
2
 
3
+ RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
4
 
5
  WORKDIR /app
6
 
ai_helper.py CHANGED
@@ -3,11 +3,12 @@ import os
3
  import time
4
 
5
  def generate_answer(question, context_list):
6
- api_key = os.getenv("GEMINI_API_KEY")
7
- if not api_key:
 
8
  return "❌ Gemini API key missing. Add GEMINI_API_KEY=your_key to .env"
9
 
10
- genai.configure(api_key=api_key)
11
 
12
  # Truncate chunks and limit to 5 to avoid Token Per Minute (TPM) limits
13
  context = "\n\n".join([c[:2000] for c in context_list[:5]])
@@ -30,19 +31,21 @@ Instructions:
30
  Answer:
31
  """
32
 
33
- # Using gemini-2.0-flash as gemini-1.5-flash returned 404
34
- model = genai.GenerativeModel('gemini-2.0-flash')
35
-
36
- # Simple retry logic for Rate Limits (RPM)
37
- for attempt in range(3):
38
- try:
39
- response = model.generate_content(prompt)
40
- return response.text
41
- except Exception as e:
42
- if "429" in str(e) and attempt < 2:
43
- wait_time = (attempt + 1) * 5
44
- time.sleep(wait_time)
45
- continue
46
- raise e
 
 
47
 
48
  return "❌ I'm currently receiving too many requests. Please try again in a minute."
 
3
  import time
4
 
5
  def generate_answer(question, context_list):
6
+ # Get keys from environment; supports single key or comma-separated list
7
+ raw_keys = os.getenv("GEMINI_API_KEY")
8
+ if not raw_keys:
9
  return "❌ Gemini API key missing. Add GEMINI_API_KEY=your_key to .env"
10
 
11
+ api_keys = [k.strip() for k in raw_keys.split(",") if k.strip()]
12
 
13
  # Truncate chunks and limit to 5 to avoid Token Per Minute (TPM) limits
14
  context = "\n\n".join([c[:2000] for c in context_list[:5]])
 
31
  Answer:
32
  """
33
 
34
+ # Iterate through available API keys to bypass daily/minute quotas
35
+ for api_key in api_keys:
36
+ genai.configure(api_key=api_key)
37
+ model = genai.GenerativeModel('gemini-2.0-flash')
38
+
39
+ for attempt in range(2): # Try twice per key
40
+ try:
41
+ response = model.generate_content(prompt)
42
+ return response.text
43
+ except Exception as e:
44
+ # If it's a 429 error, try the next key or wait briefly
45
+ if "429" in str(e):
46
+ time.sleep(2)
47
+ continue
48
+ logger.error(f"AI Error with key {api_key[:5]}...: {e}")
49
+ break # Try the next API key in the list
50
 
51
  return "❌ I'm currently receiving too many requests. Please try again in a minute."
bot.py CHANGED
@@ -10,7 +10,6 @@ from pdf_loader import load_all_pdfs
10
  from search_engine import SmartSearch
11
  from ai_helper import generate_answer
12
  from translator import process_language, translate_back
13
- from voice import voice_to_text
14
 
15
  # Enable logging
16
  logging.basicConfig(
@@ -42,36 +41,10 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
42
  await update.message.reply_text(final_answer)
43
  except Exception as e:
44
  if "429" in str(e):
45
- await update.message.reply_text("πŸ•’ The AI is currently busy (Rate limit reached). Please try again in 30 seconds.")
46
  logger.error(f"Error in handle_text: {e}")
47
  await update.message.reply_text("Sorry, I encountered an error processing your request.")
48
 
49
- # VOICE HANDLER
50
- async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
51
- file = await update.message.voice.get_file()
52
- logger.info(f"User {update.effective_user.id} sent voice")
53
- file_path = f"voice_{update.effective_user.id}_{update.message.message_id}.ogg"
54
- await file.download_to_drive(file_path)
55
-
56
- try:
57
- text = voice_to_text(file_path)
58
-
59
- processed_text, lang = process_language(text)
60
-
61
- results = search.query(processed_text)
62
- answer = generate_answer(processed_text, results)
63
-
64
- final_answer = translate_back(answer, lang)
65
-
66
- await update.message.reply_text(final_answer)
67
- except Exception as e:
68
- if "429" in str(e):
69
- await update.message.reply_text("πŸ•’ The AI is currently busy. Please wait a moment.")
70
- logger.error(f"Error in handle_voice: {e}")
71
- finally:
72
- if os.path.exists(file_path):
73
- os.remove(file_path)
74
-
75
  # Added timeouts to help with slow connections or initial handshake delays
76
  # Increased timeouts and added max_retries for more robust startup in potentially unstable network conditions.
77
  app = (
@@ -83,6 +56,5 @@ app = (
83
  )
84
  app.add_handler(CommandHandler("start", start))
85
  app.add_handler(MessageHandler(filters.TEXT, handle_text))
86
- app.add_handler(MessageHandler(filters.VOICE, handle_voice))
87
 
88
  app.run_polling(poll_interval=5, timeout=60, bootstrap_retries=20) # Added explicit retries and increased timeout
 
10
  from search_engine import SmartSearch
11
  from ai_helper import generate_answer
12
  from translator import process_language, translate_back
 
13
 
14
  # Enable logging
15
  logging.basicConfig(
 
41
  await update.message.reply_text(final_answer)
42
  except Exception as e:
43
  if "429" in str(e):
44
+ await update.message.reply_text("πŸ•’ The system is currently busy (Rate limit reached). Please try again in 30 seconds.")
45
  logger.error(f"Error in handle_text: {e}")
46
  await update.message.reply_text("Sorry, I encountered an error processing your request.")
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  # Added timeouts to help with slow connections or initial handshake delays
49
  # Increased timeouts and added max_retries for more robust startup in potentially unstable network conditions.
50
  app = (
 
56
  )
57
  app.add_handler(CommandHandler("start", start))
58
  app.add_handler(MessageHandler(filters.TEXT, handle_text))
 
59
 
60
  app.run_polling(poll_interval=5, timeout=60, bootstrap_retries=20) # Added explicit retries and increased timeout
requirements.txt CHANGED
@@ -5,6 +5,5 @@ numpy
5
  PyPDF2
6
  langdetect
7
  deep-translator
8
- openai-whisper
9
  python-dotenv
10
  google-generativeai
 
5
  PyPDF2
6
  langdetect
7
  deep-translator
 
8
  python-dotenv
9
  google-generativeai