from dataclasses import asdict
import json
from pathlib import Path
from loguru import logger

from src.models.article import Article
from src.utils import prompts
from src.services.llm_client import send_async_requests


def filter_articles(articles: list[Article], batch_size: int, model: str = "openai/gpt-4.1") -> list[Article]:
    """
    Filter articles using a language model.
    NOTE: It utilizes the Structured Outputs feature of OpenRouter's API to return a JSON object.
        Not all models support this feature. OpenAI models (GPT-4o and later versions) are highly recommended.
        Check https://openrouter.ai/docs/features/structured-outputs for details.

    Args:
        articles: List of Article objects to filter
        batch_size: Number of articles to send in each request. 5 is a good default value
        model: Model to use for filtering
    Returns:
        List of Article objects that match the filtering criteria
    """
    if not articles:
        logger.error("No articles provided for filtering")
        return []

    num_batches = (len(articles) + batch_size - 1) // batch_size
    logger.info(f"Filtering articles related to AI-based weather and climate research in {num_batches} batches...")
    messages_batches = []
    # Split articles into batches to reduce the hallucination of the LLM
    for idx in range(0, len(articles), batch_size):
        batch = articles[idx: idx + batch_size]
        # Only include the id, title and summary to reduce cost
        keys = ["id", "title", "summary"]
        articles_json = [{key: asdict(article).get(key) for key in keys} for article in batch]
        articles_json = json.dumps(articles_json, indent=2)
        messages = [
            {"role": "system", "content": prompts.system_prompt_json},
            {"role": "user", "content": prompts.user_prompt_filter.format(articles_json=articles_json)},
        ]
        messages_batches.append(messages)

    # Load the JSON schema for the response format
    schema_path = Path(__file__).parent.parent.parent / "config" / "article_list_schema.json"
    with open(schema_path, "r") as f:
        response_format = json.load(f)
    # Send the messages to the LLM in batches
    responses = send_async_requests(messages_batches, model=model, response_format=response_format)
    # A list of http://arxiv.org/abs/id strings
    try:
        selected_ids = [aid for res in responses if res is not None for aid in json.loads(res)["articles"]]
    except Exception as e:
        logger.error(f"Error processing responses: {e}.\nPlease check the prompts or try again later")
        return []
    selected_ids_set = set(selected_ids)
    # Remove duplicates and invalid IDs
    if len(selected_ids_set) != len(selected_ids):
        logger.warning("LLM returned duplicate IDs. Will remove duplicates")
    for aid in selected_ids:
        if not aid.startswith("http://arxiv.org/abs/"):
            logger.warning(f"Found invalid article ID format returned: {aid}. Will ignore")
            selected_ids.remove(aid)
    selected_articles = [article for article in articles if article.id in selected_ids]
    if len(selected_ids) != len(selected_articles):
        logger.warning("LLM returned some IDs that do not exist in the original set. Will ignore")
    logger.info(f"{len(selected_articles)} articles selected")
    return selected_articles
