Spaces:
No application file
No application file
Upload 16 files
Browse files- .env +9 -0
- .gitignore +2 -0
- Dockerfile +29 -0
- README.md +10 -7
- answer_generation.py +169 -0
- app.py +179 -0
- compose.yaml +50 -0
- controller.py +30 -0
- db.py +80 -0
- llm_pipeline.py +13 -0
- prompts.py +79 -0
- question_generation.py +100 -0
- requirements.txt +21 -0
- run.sh +3 -0
- supervisord.conf +23 -0
- tempCodeRunnerFile.py +79 -0
.env
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GOOGLE_API_KEY=AIzaSyApG0XWhFarEbomf9suO1WMcV_j9Ixm9cA
|
| 2 |
+
MONGO_URL=mongodb+srv://sriomdash04:kpDl69XjZunz9PHD@reasoningdata.4sroyx1.mongodb.net/?retryWrites=true&w=majority&appName=ReasoningData
|
| 3 |
+
|
| 4 |
+
FASTAPI_URL=http://127.0.0.1:8000/generate
|
| 5 |
+
|
| 6 |
+
LLM_MODEL=gemini-2.5-pro
|
| 7 |
+
|
| 8 |
+
QUESTION_GENERATION_URL=http://question_service:9000
|
| 9 |
+
ANSWER_GENERATION_URL=http://answer_service:8000
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
Dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Base image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Install system dependencies
|
| 5 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 6 |
+
nginx supervisor && \
|
| 7 |
+
rm -rf /var/lib/apt/lists/*
|
| 8 |
+
|
| 9 |
+
# Set workdir
|
| 10 |
+
WORKDIR /workspace
|
| 11 |
+
|
| 12 |
+
# Install Python dependencies
|
| 13 |
+
COPY requirements.txt /workspace/requirements.txt
|
| 14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
# Copy application
|
| 17 |
+
COPY . /workspace
|
| 18 |
+
|
| 19 |
+
# Nginx and supervisor configs
|
| 20 |
+
COPY nginx.conf /etc/nginx/nginx.conf
|
| 21 |
+
COPY supervisord.conf /workspace/supervisord.conf
|
| 22 |
+
COPY run.sh /workspace/run.sh
|
| 23 |
+
RUN chmod +x /workspace/run.sh
|
| 24 |
+
|
| 25 |
+
# Expose Space port
|
| 26 |
+
EXPOSE 7860
|
| 27 |
+
|
| 28 |
+
# Start supervisor
|
| 29 |
+
CMD ["/workspace/run.sh"]
|
README.md
CHANGED
|
@@ -1,11 +1,14 @@
|
|
| 1 |
---
|
| 2 |
-
title: Odia
|
| 3 |
-
emoji: π
|
| 4 |
-
colorFrom: pink
|
| 5 |
-
colorTo: gray
|
| 6 |
sdk: docker
|
| 7 |
-
|
| 8 |
-
short_description: Odia QnA with reasoning generation app.
|
| 9 |
---
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Odia QA Generator
|
|
|
|
|
|
|
|
|
|
| 3 |
sdk: docker
|
| 4 |
+
app_port: 7860
|
|
|
|
| 5 |
---
|
| 6 |
|
| 7 |
+
Multi-service Odia question/answer generator with Gradio UI and FastAPI backends.
|
| 8 |
+
|
| 9 |
+
## Environment Variables
|
| 10 |
+
|
| 11 |
+
Set the following variables at runtime for the services to work correctly:
|
| 12 |
+
|
| 13 |
+
- `GOOGLE_API_KEY` β required Google Generative AI API key
|
| 14 |
+
- `MONGO_URL` β optional MongoDB connection string
|
answer_generation.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
from fastapi import FastAPI, HTTPException
|
| 7 |
+
import uvicorn
|
| 8 |
+
from prompts import PROMPTS
|
| 9 |
+
from llm_pipeline import example_odia_answer_json, example_odia_question_json
|
| 10 |
+
|
| 11 |
+
# Setup
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
# Check for required environment variables
|
| 15 |
+
google_api_key = os.getenv("GOOGLE_API_KEY")
|
| 16 |
+
if not google_api_key:
|
| 17 |
+
raise ValueError("GOOGLE_API_KEY not found in environment variables")
|
| 18 |
+
|
| 19 |
+
genai.configure(api_key=google_api_key)
|
| 20 |
+
model = genai.GenerativeModel(os.getenv("LLM_MODEL", "gemini-pro"))
|
| 21 |
+
LANGUAGE = "Odia"
|
| 22 |
+
|
| 23 |
+
# Models
|
| 24 |
+
class QuestionRequest(BaseModel):
|
| 25 |
+
question: str
|
| 26 |
+
|
| 27 |
+
class LLMResponseModel(BaseModel):
|
| 28 |
+
question_content: str
|
| 29 |
+
answer_language: str = LANGUAGE
|
| 30 |
+
reasoning_content: str
|
| 31 |
+
answer_content: str
|
| 32 |
+
|
| 33 |
+
def create_prompt(user_odia_question: str) -> str:
|
| 34 |
+
SIMPLE_PROMPT = PROMPTS["odia_reasoning_generation_prompt"]
|
| 35 |
+
prompt = SIMPLE_PROMPT.format(
|
| 36 |
+
user_odia_question=user_odia_question,
|
| 37 |
+
example_odia_question_json=example_odia_question_json,
|
| 38 |
+
example_answer_json=example_odia_answer_json
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
return prompt
|
| 42 |
+
# Functions
|
| 43 |
+
def chat_with_model(prompt: str) -> str:
|
| 44 |
+
try:
|
| 45 |
+
response = model.generate_content(prompt)
|
| 46 |
+
return response.text if response.text else "Error: Empty response"
|
| 47 |
+
except Exception as e:
|
| 48 |
+
return f"Error: {str(e)}"
|
| 49 |
+
|
| 50 |
+
def clean_json_text(text: str) -> str:
|
| 51 |
+
if text.startswith("Error:"):
|
| 52 |
+
return text
|
| 53 |
+
|
| 54 |
+
# Remove markdown code blocks
|
| 55 |
+
text = text.strip()
|
| 56 |
+
if text.startswith("```"):
|
| 57 |
+
lines = text.split('\n')
|
| 58 |
+
if len(lines) > 2:
|
| 59 |
+
text = '\n'.join(lines[1:-1])
|
| 60 |
+
else:
|
| 61 |
+
text = text.strip("`").replace("json", "", 1).strip()
|
| 62 |
+
|
| 63 |
+
# Extract JSON content
|
| 64 |
+
first = text.find("{")
|
| 65 |
+
last = text.rfind("}")
|
| 66 |
+
if first != -1 and last != -1:
|
| 67 |
+
return text[first:last+1]
|
| 68 |
+
|
| 69 |
+
return text
|
| 70 |
+
|
| 71 |
+
def validate_output(raw_output: str, original_question: str):
|
| 72 |
+
cleaned = clean_json_text(raw_output)
|
| 73 |
+
|
| 74 |
+
if cleaned.startswith("Error:"):
|
| 75 |
+
return {
|
| 76 |
+
"question_content": original_question,
|
| 77 |
+
"answer_language": LANGUAGE,
|
| 78 |
+
"reasoning_content": f"Error occurred: {cleaned}",
|
| 79 |
+
"answer_content": "Unable to generate answer due to error",
|
| 80 |
+
"error": cleaned
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
# Try to parse and validate JSON
|
| 85 |
+
parsed_data = json.loads(cleaned)
|
| 86 |
+
validated = LLMResponseModel(**parsed_data)
|
| 87 |
+
return validated.model_dump()
|
| 88 |
+
except json.JSONDecodeError as je:
|
| 89 |
+
return {
|
| 90 |
+
"question_content": original_question,
|
| 91 |
+
"answer_language": LANGUAGE,
|
| 92 |
+
"reasoning_content": f"JSON parsing failed: {str(je)}",
|
| 93 |
+
"answer_content": "Unable to parse model response",
|
| 94 |
+
"error": f"JSON Error: {str(je)}"
|
| 95 |
+
}
|
| 96 |
+
except Exception as e:
|
| 97 |
+
return {
|
| 98 |
+
"question_content": original_question,
|
| 99 |
+
"answer_language": LANGUAGE,
|
| 100 |
+
"reasoning_content": f"Validation failed: {str(e)}",
|
| 101 |
+
"answer_content": "Unable to validate model response",
|
| 102 |
+
"error": f"Validation Error: {str(e)}"
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
def run_pipeline(question: str):
|
| 106 |
+
try:
|
| 107 |
+
# Use simple prompt if PROMPTS not available
|
| 108 |
+
prompt =create_prompt(user_odia_question=question)
|
| 109 |
+
raw_output = chat_with_model(prompt)
|
| 110 |
+
return validate_output(raw_output, question)
|
| 111 |
+
except Exception as e:
|
| 112 |
+
return {
|
| 113 |
+
"question_content": question,
|
| 114 |
+
"answer_language": LANGUAGE,
|
| 115 |
+
"reasoning_content": f"Pipeline error: {str(e)}",
|
| 116 |
+
"answer_content": "Unable to process question",
|
| 117 |
+
"error": f"Pipeline Error: {str(e)}"
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
# API
|
| 121 |
+
app = FastAPI(title="Odia Question Answering API", version="0.1.0")
|
| 122 |
+
|
| 123 |
+
@app.get("/")
|
| 124 |
+
async def root():
|
| 125 |
+
return {"message": "Odia Question Answering API is running", "status": "healthy"}
|
| 126 |
+
|
| 127 |
+
@app.get("/health")
|
| 128 |
+
async def health_check():
|
| 129 |
+
try:
|
| 130 |
+
# Test model connectivity
|
| 131 |
+
test_response = model.generate_content("Test")
|
| 132 |
+
return {
|
| 133 |
+
"status": "healthy",
|
| 134 |
+
"model": os.getenv("LLM_MODEL", "gemini-pro"),
|
| 135 |
+
"api_configured": bool(google_api_key)
|
| 136 |
+
}
|
| 137 |
+
except Exception as e:
|
| 138 |
+
return {
|
| 139 |
+
"status": "unhealthy",
|
| 140 |
+
"error": str(e),
|
| 141 |
+
"api_configured": bool(google_api_key)
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
@app.post("/generate")
|
| 145 |
+
async def generate_answer(request: QuestionRequest):
|
| 146 |
+
try:
|
| 147 |
+
if not request.question.strip():
|
| 148 |
+
raise HTTPException(status_code=400, detail="Question cannot be empty")
|
| 149 |
+
|
| 150 |
+
result = run_pipeline(request.question.strip())
|
| 151 |
+
|
| 152 |
+
# Check for critical errors that should return 500
|
| 153 |
+
if "error" in result and any(err_type in result["error"] for err_type in ["Error: ", "Pipeline Error:"]):
|
| 154 |
+
raise HTTPException(status_code=500, detail=f"Processing failed: {result['error']}")
|
| 155 |
+
|
| 156 |
+
return {"success": True, "data": result}
|
| 157 |
+
|
| 158 |
+
except HTTPException:
|
| 159 |
+
raise
|
| 160 |
+
except Exception as e:
|
| 161 |
+
raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
|
| 162 |
+
|
| 163 |
+
if __name__ == "__main__":
|
| 164 |
+
print("Starting Odia Question Answering API...")
|
| 165 |
+
print(f"Google API Key configured: {'Yes' if google_api_key else 'No'}")
|
| 166 |
+
print(f"Model: {os.getenv('LLM_MODEL', 'gemini-pro')}")
|
| 167 |
+
host = os.getenv("ANSWER_SERVICE_HOST", "0.0.0.0")
|
| 168 |
+
port = int(os.getenv("ANSWER_SERVICE_PORT", "9000"))
|
| 169 |
+
uvicorn.run(app, host=host, port=port, reload=True)
|
app.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from controller import generate_questions # returns question_list (or a dict containing it)
|
| 4 |
+
from controller import generate_answer # returns a dict with question_content, answer_content, reasoning_content
|
| 5 |
+
from db import save_in_db # saves parsed dict into mongo
|
| 6 |
+
|
| 7 |
+
MAX_QUESTIONS = 20
|
| 8 |
+
|
| 9 |
+
# ---------------- Helpers ---------------- #
|
| 10 |
+
def _extract_question_list(result):
|
| 11 |
+
"""Safely extract question list from a controller response"""
|
| 12 |
+
if not result:
|
| 13 |
+
return []
|
| 14 |
+
if isinstance(result, dict):
|
| 15 |
+
if "success" in result and isinstance(result.get("data"), dict):
|
| 16 |
+
return result["data"].get("question_list", [])
|
| 17 |
+
if "question_list" in result:
|
| 18 |
+
return result["question_list"] or []
|
| 19 |
+
if "data" in result and isinstance(result["data"], list):
|
| 20 |
+
return result["data"]
|
| 21 |
+
if isinstance(result, list):
|
| 22 |
+
return result
|
| 23 |
+
return []
|
| 24 |
+
|
| 25 |
+
def _extract_qa(result, original_q):
|
| 26 |
+
"""Return a dict with question_content, answer_content, reasoning_content"""
|
| 27 |
+
default = {
|
| 28 |
+
"question_content": original_q,
|
| 29 |
+
"answer_content": "No answer returned",
|
| 30 |
+
"reasoning_content": ""
|
| 31 |
+
}
|
| 32 |
+
if not result:
|
| 33 |
+
return default
|
| 34 |
+
if isinstance(result, dict):
|
| 35 |
+
d = result.get("data") if result.get("success") and isinstance(result.get("data"), dict) else result
|
| 36 |
+
return {
|
| 37 |
+
"question_content": d.get("question_content", original_q),
|
| 38 |
+
"answer_content": d.get("answer_content", d.get("answer", "")) or "",
|
| 39 |
+
"reasoning_content": d.get("reasoning_content", d.get("reasoning", "")) or ""
|
| 40 |
+
}
|
| 41 |
+
return default
|
| 42 |
+
|
| 43 |
+
# ---------------- Handlers ---------------- #
|
| 44 |
+
def generate_questions_ui(topic: str, num_questions: int):
|
| 45 |
+
"""Stream Q β A β R one by one"""
|
| 46 |
+
result_values = []
|
| 47 |
+
for _ in range(MAX_QUESTIONS):
|
| 48 |
+
result_values.extend([
|
| 49 |
+
"", "", "", # Q, A, R
|
| 50 |
+
gr.update(visible=False, interactive=False), # accept
|
| 51 |
+
gr.update(visible=False, interactive=False), # reject
|
| 52 |
+
gr.update(visible=False) # group hidden
|
| 53 |
+
])
|
| 54 |
+
yield result_values
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
qres = generate_questions(topic.strip(), int(num_questions))
|
| 58 |
+
except Exception as e:
|
| 59 |
+
result_values[0] = f"Error generating questions: {e}"
|
| 60 |
+
yield result_values
|
| 61 |
+
return
|
| 62 |
+
|
| 63 |
+
question_list = _extract_question_list(qres)
|
| 64 |
+
if not question_list:
|
| 65 |
+
result_values[0] = "No questions returned."
|
| 66 |
+
yield result_values
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
for i, q in enumerate(question_list[:MAX_QUESTIONS]):
|
| 70 |
+
base = i * 6
|
| 71 |
+
result_values[base + 0] = q
|
| 72 |
+
result_values[base + 1] = "Generating answer..."
|
| 73 |
+
result_values[base + 2] = "Generating reasoning..."
|
| 74 |
+
result_values[base + 5] = gr.update(visible=True)
|
| 75 |
+
yield result_values
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
ans_res = generate_answer(q)
|
| 79 |
+
qa = _extract_qa(ans_res, q)
|
| 80 |
+
result_values[base + 1] = qa["answer_content"]
|
| 81 |
+
result_values[base + 2] = qa["reasoning_content"]
|
| 82 |
+
result_values[base + 3] = gr.update(visible=True, interactive=True)
|
| 83 |
+
result_values[base + 4] = gr.update(visible=True, interactive=True)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
result_values[base + 1] = f"Error: {e}"
|
| 86 |
+
result_values[base + 2] = ""
|
| 87 |
+
result_values[base + 4] = gr.update(visible=True, interactive=True)
|
| 88 |
+
|
| 89 |
+
yield result_values
|
| 90 |
+
|
| 91 |
+
yield result_values
|
| 92 |
+
|
| 93 |
+
def accept_question(question, answer, reasoning):
|
| 94 |
+
"""Save in DB and hide card"""
|
| 95 |
+
parsed = {
|
| 96 |
+
"question_content": question,
|
| 97 |
+
"answer_language": "Odia",
|
| 98 |
+
"reasoning_content": reasoning,
|
| 99 |
+
"answer_content": answer,
|
| 100 |
+
}
|
| 101 |
+
try:
|
| 102 |
+
save_in_db(parsed)
|
| 103 |
+
return (
|
| 104 |
+
gr.update(visible=False), # accept_btn
|
| 105 |
+
gr.update(visible=False), # reject_btn
|
| 106 |
+
gr.update(visible=False) # group hidden
|
| 107 |
+
)
|
| 108 |
+
except Exception as e:
|
| 109 |
+
return (
|
| 110 |
+
gr.update(interactive=False, value=f"Error: {e}"),
|
| 111 |
+
gr.update(visible=True),
|
| 112 |
+
gr.update(visible=True)
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def reject_card():
|
| 116 |
+
"""Hide rejected card"""
|
| 117 |
+
return gr.update(visible=False)
|
| 118 |
+
|
| 119 |
+
# ---------------- UI Layout ---------------- #
|
| 120 |
+
custom_css = """
|
| 121 |
+
.gradio-container { background-color: #121212 !important; color: #E0E0E0 !important; }
|
| 122 |
+
.question-card {
|
| 123 |
+
border: 1px solid #333;
|
| 124 |
+
box-shadow: 0 4px 12px rgba(0,0,0,0.4);
|
| 125 |
+
border-radius: 12px;
|
| 126 |
+
padding: 20px !important;
|
| 127 |
+
margin-bottom: 20px !important;
|
| 128 |
+
background-color: #1E1E1E;
|
| 129 |
+
transition: 0.3s ease-in-out;
|
| 130 |
+
}
|
| 131 |
+
.question-card:hover {
|
| 132 |
+
transform: translateY(-3px);
|
| 133 |
+
box-shadow: 0 6px 16px rgba(0,0,0,0.6);
|
| 134 |
+
}
|
| 135 |
+
textarea { background-color: #2A2A2A !important; color: #E0E0E0 !important; border: 1px solid #444 !important; border-radius: 8px !important; }
|
| 136 |
+
button { border-radius: 8px !important; padding: 8px 12px !important; }
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
with gr.Blocks(theme=gr.themes.Base(), css=custom_css) as demo:
|
| 140 |
+
gr.Markdown("<h2 style='color:#90CAF9;'>π Odia Q&A β Generate β Answer (streaming)</h2>")
|
| 141 |
+
|
| 142 |
+
with gr.Row():
|
| 143 |
+
topic_input = gr.Textbox(label="π Topic", placeholder="Enter a topic, e.g., 'Photosynthesis'")
|
| 144 |
+
num_questions_input = gr.Dropdown(label="π’ Number of Questions", choices=[5, 10, 15, 20], value=5)
|
| 145 |
+
generate_btn = gr.Button("β‘ Generate", variant="primary")
|
| 146 |
+
|
| 147 |
+
output_components = []
|
| 148 |
+
for i in range(MAX_QUESTIONS):
|
| 149 |
+
with gr.Group(visible=False, elem_classes=["question-card"]) as output_group:
|
| 150 |
+
with gr.Row():
|
| 151 |
+
with gr.Column(scale=4):
|
| 152 |
+
q_text = gr.Textbox(label="β Question", interactive=False)
|
| 153 |
+
a_text = gr.Textbox(label="β
Answer", interactive=False)
|
| 154 |
+
r_text = gr.Textbox(label="π§ Reasoning", interactive=False)
|
| 155 |
+
with gr.Column(scale=1, min_width=150):
|
| 156 |
+
accept_btn = gr.Button("Accept", variant="primary")
|
| 157 |
+
reject_btn = gr.Button("Reject", variant="stop")
|
| 158 |
+
|
| 159 |
+
# Bind buttons
|
| 160 |
+
accept_btn.click(
|
| 161 |
+
fn=accept_question,
|
| 162 |
+
inputs=[q_text, a_text, r_text], # β
only inputs
|
| 163 |
+
outputs=[accept_btn, reject_btn, output_group] # β
update group visibility
|
| 164 |
+
)
|
| 165 |
+
reject_btn.click(fn=reject_card, outputs=[output_group])
|
| 166 |
+
|
| 167 |
+
output_components.extend([q_text, a_text, r_text, accept_btn, reject_btn, output_group])
|
| 168 |
+
|
| 169 |
+
generate_btn.click(
|
| 170 |
+
fn=generate_questions_ui,
|
| 171 |
+
inputs=[topic_input, num_questions_input],
|
| 172 |
+
outputs=output_components
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
demo.queue()
|
| 176 |
+
|
| 177 |
+
if __name__ == "__main__":
|
| 178 |
+
port = int(os.getenv("UI_PORT", "7860"))
|
| 179 |
+
demo.launch(server_name="0.0.0.0", server_port=port, share=True)
|
compose.yaml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is for local development.
|
| 2 |
+
# It uses "build: ." to create an image from your local source code.
|
| 3 |
+
|
| 4 |
+
services:
|
| 5 |
+
# Service 1: The Answer Generation FastAPI app
|
| 6 |
+
answer_service:
|
| 7 |
+
build: .
|
| 8 |
+
container_name: answer_service
|
| 9 |
+
command: ["uvicorn", "answer_generation:app", "--host", "0.0.0.0", "--port", "9000"]
|
| 10 |
+
ports:
|
| 11 |
+
- "9000:9000"
|
| 12 |
+
env_file:
|
| 13 |
+
- .env
|
| 14 |
+
volumes:
|
| 15 |
+
- .:/app
|
| 16 |
+
environment:
|
| 17 |
+
- PYTHONPATH=/app
|
| 18 |
+
|
| 19 |
+
# Service 2: The Question Generation FastAPI app
|
| 20 |
+
question_service:
|
| 21 |
+
build: .
|
| 22 |
+
container_name: question_service
|
| 23 |
+
command: ["uvicorn", "question_generation:app", "--host", "0.0.0.0", "--port", "8000"]
|
| 24 |
+
ports:
|
| 25 |
+
- "8000:8000"
|
| 26 |
+
env_file:
|
| 27 |
+
- .env
|
| 28 |
+
volumes:
|
| 29 |
+
- .:/app
|
| 30 |
+
environment:
|
| 31 |
+
- PYTHONPATH=/app
|
| 32 |
+
|
| 33 |
+
# Service 3: The Gradio UI app
|
| 34 |
+
gradio_app:
|
| 35 |
+
build: .
|
| 36 |
+
container_name: gradio_app
|
| 37 |
+
command: ["python", "app.py"]
|
| 38 |
+
ports:
|
| 39 |
+
- "7860:7860"
|
| 40 |
+
env_file:
|
| 41 |
+
- .env
|
| 42 |
+
volumes:
|
| 43 |
+
- .:/app
|
| 44 |
+
depends_on:
|
| 45 |
+
- answer_service
|
| 46 |
+
- question_service
|
| 47 |
+
environment:
|
| 48 |
+
- PYTHONPATH=/app
|
| 49 |
+
- ANSWER_SERVICE_URL=http://answer_service:9000
|
| 50 |
+
- QUESTION_SERVICE_URL=http://question_service:8000
|
controller.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
QUESTION_GENERATION_URL = os.getenv("QUESTION_SERVICE_URL", "http://127.0.0.1:8000")
|
| 6 |
+
ANSWER_GENERATION_URL = os.getenv("ANSWER_SERVICE_URL", "http://127.0.0.1:9000")
|
| 7 |
+
|
| 8 |
+
def generate_questions(topic: str, num_questions: int = 10):
|
| 9 |
+
url = f"{QUESTION_GENERATION_URL}/generate-questions"
|
| 10 |
+
payload = {
|
| 11 |
+
"topic": topic,
|
| 12 |
+
"num_questions": num_questions
|
| 13 |
+
}
|
| 14 |
+
try:
|
| 15 |
+
response = requests.post(url, json=payload)
|
| 16 |
+
response.raise_for_status() # Raise error if status != 200
|
| 17 |
+
return response.json()
|
| 18 |
+
except requests.exceptions.RequestException as e:
|
| 19 |
+
return {"error": str(e)}
|
| 20 |
+
|
| 21 |
+
def generate_answer(question: str):
|
| 22 |
+
url = f"{ANSWER_GENERATION_URL}/generate"
|
| 23 |
+
payload = {"question": question}
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
response = requests.post(url, json=payload)
|
| 27 |
+
response.raise_for_status()
|
| 28 |
+
return response.json()
|
| 29 |
+
except requests.exceptions.RequestException as e:
|
| 30 |
+
return {"error": str(e)}
|
db.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pymongo.mongo_client import MongoClient
|
| 2 |
+
from pymongo.server_api import ServerApi
|
| 3 |
+
import datetime
|
| 4 |
+
import os
|
| 5 |
+
import uuid
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
# Load environment variables
|
| 9 |
+
load_dotenv()
|
| 10 |
+
uri = os.getenv("MONGO_URL")
|
| 11 |
+
|
| 12 |
+
# β
Create Mongo client
|
| 13 |
+
client = MongoClient(uri, server_api=ServerApi('1'))
|
| 14 |
+
|
| 15 |
+
# β
Ping test
|
| 16 |
+
try:
|
| 17 |
+
client.admin.command('ping')
|
| 18 |
+
print("β
Connected to MongoDB!")
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print("β MongoDB connection failed:", e)
|
| 21 |
+
|
| 22 |
+
# β
Database & Collection
|
| 23 |
+
db = client["ReasoningData"]
|
| 24 |
+
collection = db["formatted_data"]
|
| 25 |
+
|
| 26 |
+
# β
Global trackers
|
| 27 |
+
global_question_list = []
|
| 28 |
+
counter = 0
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def generate_unique_id():
|
| 32 |
+
"""
|
| 33 |
+
Generates a unique ID for each question.
|
| 34 |
+
Example: ODR_20250822_123456_uuid
|
| 35 |
+
"""
|
| 36 |
+
global counter
|
| 37 |
+
prefix = "ODR"
|
| 38 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
| 39 |
+
unique_id = f"{prefix}_{timestamp}_{counter:05d}"
|
| 40 |
+
counter = (counter + 1) % 100000
|
| 41 |
+
return unique_id
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def convert_into_mongo_document(parsed_json):
|
| 45 |
+
"""
|
| 46 |
+
Converts parsed Pydantic object (or dict) into Mongo document.
|
| 47 |
+
"""
|
| 48 |
+
if hasattr(parsed_json, "dict"): # if it's a Pydantic model
|
| 49 |
+
parsed_json = parsed_json.dict()
|
| 50 |
+
|
| 51 |
+
data = {
|
| 52 |
+
"question_id": generate_unique_id(),
|
| 53 |
+
"question_content": parsed_json.get("question_content"),
|
| 54 |
+
"answer_language": parsed_json.get("answer_language"),
|
| 55 |
+
"reasoning_content": parsed_json.get("reasoning_content"),
|
| 56 |
+
"answer_content": parsed_json.get("answer_content"),
|
| 57 |
+
}
|
| 58 |
+
return data
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def insert_into_mongo(data):
|
| 62 |
+
"""
|
| 63 |
+
Inserts a document into MongoDB.
|
| 64 |
+
"""
|
| 65 |
+
try:
|
| 66 |
+
data["_id"] = data["question_id"]
|
| 67 |
+
result = collection.insert_one(data)
|
| 68 |
+
global_question_list.append(result.inserted_id)
|
| 69 |
+
print("β
Inserted document ID:", result.inserted_id)
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print("β Error inserting document:", e)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def save_in_db(parsed_json):
|
| 75 |
+
"""
|
| 76 |
+
Full pipeline: convert β insert.
|
| 77 |
+
"""
|
| 78 |
+
data = convert_into_mongo_document(parsed_json)
|
| 79 |
+
insert_into_mongo(data)
|
| 80 |
+
|
llm_pipeline.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
example_odia_question_json = {
|
| 2 |
+
"question_content": "ΰ¬ΰ¬ ଦΰΰ¬·ΰΰ¬ΰ¬Ύΰ¬¨ΰ଀ ΰ¬ͺΰΰ¬°ΰ¬Άΰନ",
|
| 3 |
+
"answer_language": "Odia",
|
| 4 |
+
"reasoning_content": "ΰ¬ΰ¬ ΰ¬Ύΰ¬°ΰ ΰ¬Έΰ¬ ΰ¬Ώΰ¬ΰ ଀ରΰΰ¬ΰ¬Ύΰ¬°ΰ ଧାରଣା ବିବΰΰ¬ΰ¬¨ΰ¬Ύ ΰ¬ΰ¬°ΰ¬¨ΰ଀ΰ",
|
| 5 |
+
"answer_content": "ଫଳାଫଳ"
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
example_odia_answer_json = {
|
| 9 |
+
"question_content": "ΰ¬ΰ¬ ଦΰΰ¬·ΰΰ¬ΰ¬Ύΰ¬¨ΰ଀ ΰ¬ͺΰΰ¬°ΰ¬Άΰନ",
|
| 10 |
+
"answer_language": "Odia",
|
| 11 |
+
"reasoning_content": "ΰ¬ΰ¬ ΰ¬Ύΰ¬°ΰ ଧାରଣା ବିବΰΰ¬ΰ¬¨ΰ¬Ύ",
|
| 12 |
+
"answer_content": "ଫଳାଫଳ"
|
| 13 |
+
}
|
prompts.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# prompts.py
|
| 2 |
+
|
| 3 |
+
PROMPTS = {
|
| 4 |
+
"questions_only": """
|
| 5 |
+
You are a meticulous question-setter who writes clear, solvable, but advance level reasoning-based mathematics questions in {language}.
|
| 6 |
+
The topic is: "{topic}".
|
| 7 |
+
|
| 8 |
+
The question should be of advanced level and should be of under graduate standard level, and should include multiple subtopics of the main topic to solve the question.
|
| 9 |
+
|
| 10 |
+
The question should be of 10 marks and at least take 10 minutes duration to solve.
|
| 11 |
+
|
| 12 |
+
It should force the user to think and solve the question by multiple steps.
|
| 13 |
+
|
| 14 |
+
The question should be conclusive , it should not have multiple final answers , although the approach might be multiple.
|
| 15 |
+
|
| 16 |
+
### Requirements:
|
| 17 |
+
- Generate exactly {num_questions} unique questions (Β±1 if absolutely necessary).
|
| 18 |
+
- Language: {language} only (do not use English unless part of notation like symbols, variables, or numbers).
|
| 19 |
+
- Each question must be reasoning-based and solvable with pen-and-paper.
|
| 20 |
+
- Do NOT provide answers, solutions, hints, or commentary.
|
| 21 |
+
- Keep the questions self-contained and unambiguous.
|
| 22 |
+
- Use realistic and moderate numbers (avoid very large, complex, or impractical values).
|
| 23 |
+
- Ensure variety: mix easy, medium, and at most 2 hard-level questions.
|
| 24 |
+
- Stay strictly within the topic.
|
| 25 |
+
|
| 26 |
+
### Output Format:
|
| 27 |
+
Return ONLY valid JSON, following this schema exactly:
|
| 28 |
+
|
| 29 |
+
{{
|
| 30 |
+
"question_language": "{language}",
|
| 31 |
+
"question_list": [
|
| 32 |
+
"<Q1 in {language}>",
|
| 33 |
+
"<Q2 in {language}>",
|
| 34 |
+
"<Q3 in {language}>",
|
| 35 |
+
...
|
| 36 |
+
]
|
| 37 |
+
}}
|
| 38 |
+
|
| 39 |
+
### Rules:
|
| 40 |
+
- Output must be valid JSON (no markdown formatting, no ``` fences).
|
| 41 |
+
- Do NOT include extra keys or metadata.
|
| 42 |
+
- Do NOT repeat questions.
|
| 43 |
+
- Ensure the length of "question_list" is approximately {num_questions}.
|
| 44 |
+
""",
|
| 45 |
+
|
| 46 |
+
"odia_reasoning_generation_prompt": """
|
| 47 |
+
You are an intelligent Odia language reasoning teacher, highly experienced in teaching mental reasoning questions.
|
| 48 |
+
You are known for always giving correct answers and explaining the step-by-step reasoning clearly in Odia.
|
| 49 |
+
You will be provided with a query in JSON format like this: {example_odia_question_json} (use this only as an example).
|
| 50 |
+
|
| 51 |
+
β οΈ IMPORTANT RULES:
|
| 52 |
+
- Output must be ONLY valid JSON (no markdown, no text outside JSON).
|
| 53 |
+
- Use the exact keys: question_content, answer_language, reasoning_content, answer_content.
|
| 54 |
+
- The key "answer_content" MUST contain the final concise answer in Odia; do NOT leave it empty.
|
| 55 |
+
- All fields must be filled.
|
| 56 |
+
- Always answer in Odia language.
|
| 57 |
+
- Do not invent new keys, do not add comments.
|
| 58 |
+
- reasoning_content must show proper step-by-step process, leading from the question to the final answer.
|
| 59 |
+
|
| 60 |
+
The JSON structure you must return is:
|
| 61 |
+
{{
|
| 62 |
+
"question_content": "string (the question in Odia)",
|
| 63 |
+
"answer_language": "Odia",
|
| 64 |
+
"reasoning_content": "string (detailed reasoning in Odia, step by step)",
|
| 65 |
+
"answer_content": "string (final concise answer in Odia)"
|
| 66 |
+
}}
|
| 67 |
+
|
| 68 |
+
Use this answer as an example: {example_answer_json}
|
| 69 |
+
|
| 70 |
+
Now, process the following query (already wrapped in JSON) and return the result strictly in the required JSON structure:
|
| 71 |
+
|
| 72 |
+
{{
|
| 73 |
+
"question_content": "{user_odia_question}",
|
| 74 |
+
"answer_language": "Odia",
|
| 75 |
+
"reasoning_content": "",
|
| 76 |
+
"answer_content": ""
|
| 77 |
+
}}
|
| 78 |
+
"""
|
| 79 |
+
}
|
question_generation.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from pydantic import BaseModel, ValidationError
|
| 5 |
+
from typing import List
|
| 6 |
+
from prompts import PROMPTS
|
| 7 |
+
import google.generativeai as genai
|
| 8 |
+
from fastapi import FastAPI, HTTPException
|
| 9 |
+
import uvicorn
|
| 10 |
+
|
| 11 |
+
# Setup
|
| 12 |
+
load_dotenv()
|
| 13 |
+
google_api_key = os.getenv("GOOGLE_API_KEY")
|
| 14 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 15 |
+
model = genai.GenerativeModel(os.getenv("LLM_MODEL", "gemini-pro"))
|
| 16 |
+
|
| 17 |
+
# Models
|
| 18 |
+
class TopicRequest(BaseModel):
|
| 19 |
+
topic: str
|
| 20 |
+
num_questions: int = 10
|
| 21 |
+
|
| 22 |
+
class GeneratedQuestionModel(BaseModel):
|
| 23 |
+
question_language: str
|
| 24 |
+
question_list: List[str]
|
| 25 |
+
|
| 26 |
+
# Functions
|
| 27 |
+
def chat_with_model(prompt: str) -> str:
|
| 28 |
+
try:
|
| 29 |
+
response = model.generate_content(prompt)
|
| 30 |
+
return response.text if response.text else "Error: Empty response"
|
| 31 |
+
except Exception as e:
|
| 32 |
+
return f"Error: {e}"
|
| 33 |
+
|
| 34 |
+
def clean_json_text(text: str) -> str:
|
| 35 |
+
if text.startswith("Error:"):
|
| 36 |
+
return text
|
| 37 |
+
if text.startswith("```"):
|
| 38 |
+
lines = text.split('\n')
|
| 39 |
+
text = '\n'.join(lines[1:-1]) if len(lines) > 2 else text.strip("`").replace("json", "", 1).strip()
|
| 40 |
+
first, last = text.find("{"), text.rfind("}")
|
| 41 |
+
return text[first:last+1] if first != -1 and last != -1 else text
|
| 42 |
+
|
| 43 |
+
def validate_answer(raw_output: str):
|
| 44 |
+
cleaned = clean_json_text(raw_output)
|
| 45 |
+
if cleaned.startswith("Error:"):
|
| 46 |
+
return {"error": cleaned, "question_language": "Odia", "question_list": []}
|
| 47 |
+
try:
|
| 48 |
+
return GeneratedQuestionModel.model_validate_json(cleaned).model_dump()
|
| 49 |
+
except ValidationError:
|
| 50 |
+
try:
|
| 51 |
+
return GeneratedQuestionModel(**json.loads(cleaned)).model_dump()
|
| 52 |
+
except:
|
| 53 |
+
return {"error": "Invalid JSON", "question_language": "Odia", "question_list": []}
|
| 54 |
+
|
| 55 |
+
def final_pipeline(user_input: str, num_questions: int = 10):
|
| 56 |
+
prompt = PROMPTS["questions_only"].format(language="Odia", topic=user_input, num_questions=num_questions)
|
| 57 |
+
return validate_answer(chat_with_model(prompt))
|
| 58 |
+
|
| 59 |
+
# API
|
| 60 |
+
app = FastAPI()
|
| 61 |
+
|
| 62 |
+
@app.get("/health")
|
| 63 |
+
async def health_check():
|
| 64 |
+
try:
|
| 65 |
+
# Test model connectivity
|
| 66 |
+
test_response = model.generate_content("Test")
|
| 67 |
+
return {
|
| 68 |
+
"status": "healthy",
|
| 69 |
+
"model": os.getenv("LLM_MODEL", "gemini-pro"),
|
| 70 |
+
"api_configured": bool(google_api_key)
|
| 71 |
+
}
|
| 72 |
+
except Exception as e:
|
| 73 |
+
return {
|
| 74 |
+
"status": "unhealthy",
|
| 75 |
+
"error": str(e),
|
| 76 |
+
"api_configured": bool(google_api_key)
|
| 77 |
+
}
|
| 78 |
+
@app.get("/")
|
| 79 |
+
async def root():
|
| 80 |
+
return {"message": "Odia Question Generating API is running", "status": "healthy"}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@app.post("/generate-questions")
|
| 84 |
+
async def generate_questions(request: TopicRequest):
|
| 85 |
+
if not request.topic.strip():
|
| 86 |
+
raise HTTPException(status_code=400, detail="Topic cannot be empty")
|
| 87 |
+
if not 1 <= request.num_questions <= 50:
|
| 88 |
+
raise HTTPException(status_code=400, detail="Questions must be between 1-50")
|
| 89 |
+
|
| 90 |
+
result = final_pipeline(request.topic.strip(), request.num_questions)
|
| 91 |
+
|
| 92 |
+
if "error" in result and "Error:" in result["error"]:
|
| 93 |
+
raise HTTPException(status_code=500, detail=result["error"])
|
| 94 |
+
|
| 95 |
+
return {"success": True, "data": result}
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__":
|
| 98 |
+
host = os.getenv("QUESTION_SERVICE_HOST", "0.0.0.0")
|
| 99 |
+
port = int(os.getenv("QUESTION_SERVICE_PORT", "8000"))
|
| 100 |
+
uvicorn.run(app, host=host, port=port)
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FastAPI and Server
|
| 2 |
+
fastapi==0.115.9
|
| 3 |
+
uvicorn==0.31.0
|
| 4 |
+
|
| 5 |
+
# Google Generative AI
|
| 6 |
+
google-generativeai==0.8.5
|
| 7 |
+
|
| 8 |
+
# Gradio UI
|
| 9 |
+
gradio==5.31.0
|
| 10 |
+
|
| 11 |
+
# Database
|
| 12 |
+
pymongo==4.11.3
|
| 13 |
+
|
| 14 |
+
# Data Validation
|
| 15 |
+
pydantic==2.11.5
|
| 16 |
+
|
| 17 |
+
# HTTP Requests
|
| 18 |
+
requests==2.32.3
|
| 19 |
+
|
| 20 |
+
# Environment Variables
|
| 21 |
+
python-dotenv==1.0.1
|
run.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -e
|
| 3 |
+
exec /usr/bin/supervisord -c /workspace/supervisord.conf
|
supervisord.conf
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[supervisord]
|
| 2 |
+
nodaemon=true
|
| 3 |
+
|
| 4 |
+
[program:answer_service]
|
| 5 |
+
command=/usr/local/bin/python -m answer_generation
|
| 6 |
+
autostart=true
|
| 7 |
+
autorestart=true
|
| 8 |
+
|
| 9 |
+
[program:question_service]
|
| 10 |
+
command=/usr/local/bin/python -m question_generation
|
| 11 |
+
autostart=true
|
| 12 |
+
autorestart=true
|
| 13 |
+
|
| 14 |
+
[program:ui]
|
| 15 |
+
command=/usr/local/bin/python app.py
|
| 16 |
+
autostart=true
|
| 17 |
+
autorestart=true
|
| 18 |
+
environment=UI_PORT=7861
|
| 19 |
+
|
| 20 |
+
[program:nginx]
|
| 21 |
+
command=/usr/sbin/nginx -g 'daemon off;'
|
| 22 |
+
autostart=true
|
| 23 |
+
autorestart=true
|
tempCodeRunnerFile.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pymongo.mongo_client import MongoClient
|
| 2 |
+
from pymongo.server_api import ServerApi
|
| 3 |
+
import datetime
|
| 4 |
+
import os
|
| 5 |
+
import uuid
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
# Load environment variables
|
| 9 |
+
load_dotenv()
|
| 10 |
+
uri = os.getenv("MONGO_URL")
|
| 11 |
+
|
| 12 |
+
# β
Create Mongo client
|
| 13 |
+
client = MongoClient(uri, server_api=ServerApi('1'))
|
| 14 |
+
|
| 15 |
+
# β
Ping test
|
| 16 |
+
try:
|
| 17 |
+
client.admin.command('ping')
|
| 18 |
+
print("β
Connected to MongoDB!")
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print("β MongoDB connection failed:", e)
|
| 21 |
+
|
| 22 |
+
# β
Database & Collection
|
| 23 |
+
db = client["ReasoningData"]
|
| 24 |
+
collection = db["formatted_data"]
|
| 25 |
+
|
| 26 |
+
# β
Global trackers
|
| 27 |
+
global_question_list = []
|
| 28 |
+
counter = 0
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def generate_unique_id():
|
| 32 |
+
"""
|
| 33 |
+
Generates a unique ID for each question.
|
| 34 |
+
Example: ODR_20250822_123456_uuid
|
| 35 |
+
"""
|
| 36 |
+
global counter
|
| 37 |
+
prefix = "ODR"
|
| 38 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
| 39 |
+
unique_id = f"{prefix}_{timestamp}_{counter:05d}"
|
| 40 |
+
counter = (counter + 1) % 100000
|
| 41 |
+
return unique_id
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def convert_into_mongo_document(parsed_json):
|
| 45 |
+
"""
|
| 46 |
+
Converts parsed Pydantic object (or dict) into Mongo document.
|
| 47 |
+
"""
|
| 48 |
+
if hasattr(parsed_json, "dict"): # if it's a Pydantic model
|
| 49 |
+
parsed_json = parsed_json.dict()
|
| 50 |
+
|
| 51 |
+
data = {
|
| 52 |
+
"question_id": generate_unique_id(),
|
| 53 |
+
"question_content": parsed_json.get("question_content"),
|
| 54 |
+
"answer_language": parsed_json.get("answer_language"),
|
| 55 |
+
"reasoning_content": parsed_json.get("reasoning_content"),
|
| 56 |
+
"answer_content": parsed_json.get("answer_content"),
|
| 57 |
+
}
|
| 58 |
+
return data
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def insert_into_mongo(data):
|
| 62 |
+
"""
|
| 63 |
+
Inserts a document into MongoDB.
|
| 64 |
+
"""
|
| 65 |
+
try:
|
| 66 |
+
data["_id"] = data["question_id"]
|
| 67 |
+
result = collection.insert_one(data)
|
| 68 |
+
global_question_list.append(result.inserted_id)
|
| 69 |
+
print("β
Inserted document ID:", result.inserted_id)
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print("β Error inserting document:", e)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def db_pipelining(parsed_json):
|
| 75 |
+
"""
|
| 76 |
+
Full pipeline: convert β insert.
|
| 77 |
+
"""
|
| 78 |
+
data = convert_into_mongo_document(parsed_json)
|
| 79 |
+
insert_into_mongo(data)
|