MetaSearch / pipeline /meta_review.py
Tirath5504's picture
use openrouter only instead of google-genai
08a5a31
import json
import os
from typing import List, Dict
from openai import OpenAI
from pydantic import BaseModel
import asyncio
from dotenv import load_dotenv
load_dotenv()
# Initialize OpenRouter client
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=os.getenv("OPENROUTER_API_KEY"),
)
# Priority list of models (Consistency with other files)
MODELS = [
"deepseek/deepseek-r1",
"deepseek/deepseek-r1-distill-llama-70b",
"google/gemini-2.5-flash-lite"
]
class MetaReviewResult(BaseModel):
meta_review: str
def construct_meta_review_prompt(
paper_title: str,
paper_abstract: str,
resolutions: List[Dict],
search_results: Dict
) -> tuple:
"""
Construct prompt for meta-review generation
"""
# Aggregate all resolutions
all_accepted = {}
all_rejected = {}
resolution_summaries = []
for resolution in resolutions:
details = resolution.get('resolution_details', {})
# Merge accepted points
accepted = details.get('accepted_critique_points', {})
for category, points in accepted.items():
if category not in all_accepted:
all_accepted[category] = []
all_accepted[category].extend(points)
# Merge rejected points
rejected = details.get('rejected_critique_points', {})
for category, points in rejected.items():
if category not in all_rejected:
all_rejected[category] = []
all_rejected[category].extend(points)
# Collect summaries
summary = details.get('final_resolution_summary', '')
if summary:
resolution_summaries.append(summary)
system_prompt = """
You are an expert meta-reviewer. Your task is to generate a structured, comprehensive
meta-review based on reviewer critiques, disagreements, and resolutions.
Your review should be clear, concise, well-structured, and provide actionable feedback.
Respond with ONLY the meta-review text (no JSON, no preamble).
"""
user_prompt = f"""
### **Paper Details**
**Title:** {paper_title}
**Abstract:** {paper_abstract}
### **Disagreement Resolution Summaries**
{chr(10).join(f"- {summary}" for summary in resolution_summaries)}
### **Accepted Critique Points (Valid Feedback)**
{json.dumps(all_accepted, indent=2)}
### **Rejected Critique Points (Unjustified Criticism)**
{json.dumps(all_rejected, indent=2)}
### **State-of-the-Art (SoTA) Findings**
{search_results.get('SoTA_Results', '')[:2000]}
### **Retrieved Evidence for Validation**
{json.dumps(search_results.get('Retrieved_Evidence', {}), indent=2)[:2000]}
### **Meta-Review Task**
Generate a comprehensive meta-review that:
1. Summarizes the paper's main contribution and approach
2. Discusses the strengths of the paper (based on accepted critiques and evidence)
3. Discusses the weaknesses and concerns (based on valid accepted critiques)
4. Addresses key disagreements among reviewers and how they were resolved
5. Compares the paper's claims with state-of-the-art research
6. Provides a final verdict on the paper's quality, novelty, significance, and clarity
7. Offers constructive recommendations for improvement
Format the meta-review professionally with clear sections.
"""
return system_prompt, user_prompt
async def generate_meta_review(
paper_title: str,
paper_abstract: str,
resolutions: List[Dict],
search_results: Dict,
retries: int = 3 # Reduced retries per model
) -> str:
"""
Generate a meta-review using DeepSeek-R1 (with fallback)
"""
if not resolutions:
return "Unable to generate meta-review: No disagreement resolutions available."
system_prompt, user_prompt = construct_meta_review_prompt(
paper_title,
paper_abstract,
resolutions,
search_results
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
last_exception = None
for model in MODELS:
print(f"Generating Meta-Review with model: {model}")
for attempt in range(retries):
try:
response = await asyncio.to_thread(
client.chat.completions.create,
model=model,
messages=messages,
# CRITICAL FIX: Limit max_tokens to prevent 402 Error
# 8192 is plenty for a meta-review (~6000 words) but much cheaper than 64k
max_tokens=8192,
)
if not response.choices or not response.choices[0].message.content.strip():
raise ValueError("Empty response from AI")
meta_review_text = response.choices[0].message.content.strip()
# Remove any JSON formatting/thinking tags if present (cleanup)
if meta_review_text.startswith("```"):
lines = meta_review_text.split("\n")
# Simple heuristic to strip markdown fences
if lines[0].startswith("```"):
lines = lines[1:]
if lines and lines[-1].startswith("```"):
lines = lines[:-1]
meta_review_text = "\n".join(lines)
return meta_review_text
except Exception as e:
last_exception = e
error_msg = str(e)
print(f"Model {model} - Attempt {attempt + 1} failed: {error_msg}")
# Immediate fallback on payment errors
if "402" in error_msg or "insufficient_quota" in error_msg:
print("Insufficient credits detected. Switching to cheaper model...")
break
wait_time = 2 ** attempt
if attempt < retries - 1:
await asyncio.sleep(wait_time)
return f"Error generating meta-review after trying all models: {str(last_exception)}"