from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from pathlib import Path
import re
import requests

import feedparser
from loguru import logger
from typing import Any
from src.models.article import Article


def _fetch_arxiv(start_date: datetime, end_date: datetime, category: str, max_results: int = 1000) -> Any:
    """
    Fetch arXiv articles from the API within a date range and category. Keyword search is not supported.
    Args:
        start_date: Start date as datetime object
        end_date: End date as datetime object
        category: arXiv category (e.g., 'physics.ao-ph')
        max_results: Maximum number of results to fetch (default: 1000)
    Returns:
        Parsed feed object from feedparser
    """
    # arXiv suggests a maximum of 1000 results
    if max_results <= 0 or max_results > 1000:
        max_results = 1000
        logger.warning("max_results should be between 1 and 1000. Defaulting to 1000")

    # arXiv expects YYYYMMDDHHMM
    start_str = start_date.strftime("%Y%m%d0000")
    end_str = end_date.strftime("%Y%m%d2359")

    base_url = "http://export.arxiv.org/api/query?"
    submitted_date = f"[{start_str}+TO+{end_str}]"
    query = f"search_query=cat:{category}+AND+submittedDate:{submitted_date}"
    sort = "&sortBy=submittedDate&sortOrder=descending"
    url = f"{base_url}{query}&max_results={max_results}{sort}"

    response = requests.get(url)
    response.raise_for_status()

    feed = feedparser.parse(response.content)
    return feed


def _parse_feed_entry(entry: Any) -> Article:
    """
    Parse a single feed entry into a structured article dictionary.
    Args:
        entry: A single entry from the arXiv feed
    Returns:
        Article object with parsed metadata
    """
    article = {
        "id": entry.id,
        "title": re.sub(r"\n\s*", " ", entry.title).strip(),  # Replace newlines and extra spaces with a single space
        "time": entry.published,
        "authors": [author.name for author in entry.authors],
        "summary": re.sub(r"\n\s*", " ", entry.summary).strip(),
    }

    # Find PDF link
    for link in entry.links:
        if link.type == "application/pdf":
            article["pdf_link"] = link.href
            break
    else:
        raise ValueError("No PDF link found in entry")

    article = Article(**article)
    return article


def _parse_arxiv_feed(feed: Any) -> list[Article]:
    """
    Parse the entire arXiv feed into a list of articles.
    Args:
        feed: Parsed feed object from feedparser
    Returns:
        List of Article objects
    """
    if not feed.entries:
        raise ValueError("No entries found in the feed")

    if len(feed.entries) == 1 and feed.entries[0].title == "Error":
        raise ValueError(f"ArXiv API error: {feed.entries[0].summary}")

    results = []
    skipped_count = 0

    for entry in feed.entries:
        try:
            article = _parse_feed_entry(entry)
            results.append(article)
        except Exception as e:
            logger.warning(f"Skipping entry due to error: {e}")
            skipped_count += 1
            continue

    if not results:
        raise ValueError(f"No valid articles found after parsing {len(feed.entries)} entries")

    logger.info(f"Successfully parsed {len(results)} articles, skipped {skipped_count}")

    return results


def search_arxiv(start_date: str, end_date: str, max_results: int = 1000) -> list[Article]:
    """
    Search for atmospheric and oceanic physics articles on arXiv within a date range.
    Args:
        start_date: Start date in YYYY-MM-DD format
        end_date: End date in YYYY-MM-DD format
        max_results: Maximum number of results to return (default: 1000)
    Returns:
        List of Article objects
    """
    logger.info(f"Searching arXiv for physics.ao-ph articles from {start_date} to {end_date}")

    # Validate date range (max 31 days)
    start_dt = datetime.strptime(start_date, "%Y-%m-%d")
    end_dt = datetime.strptime(end_date, "%Y-%m-%d")

    if end_dt < start_dt:
        raise ValueError("End date must be after start date")

    date_diff = (end_dt - start_dt).days
    if date_diff > 31:
        raise ValueError("Because of the current design, the maximum permitted date range is 31 days.")

    try:
        feed = _fetch_arxiv(start_dt, end_dt, "physics.ao-ph", max_results=max_results)
        articles = _parse_arxiv_feed(feed)
    except requests.RequestException as e:
        logger.error(f"Failed to retrieve data from arXiv due to a network issue: {e}")
        raise
    except ValueError as e:
        logger.error(f"Failed to retrieve data from arXiv: {e}")
        raise

    if len(articles) == max_results:
        logger.warning("Maximum number of results reached. Some articles may not be included")

    return articles


def download_pdfs(articles: list[Article], output_dir: Path, max_workers: int = 4) -> None:
    """
    Downloads multiple PDFs from a list of Article objects to a specified output directory using multithreading.
    Args:
        articles: List of Article objects containing PDF URLs.
        output_dir: The local folder to save the downloaded files.
        max_workers: The maximum number of worker threads to use for downloading.
    """
    logger.info("Downloading PDFs...")
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_article = {executor.submit(article.download_pdf, output_dir): article for article in articles}
        failures = 0
        for future in as_completed(future_to_article):
            article = future_to_article[future]
            try:
                future.result()
            except Exception as e:
                logger.error(f"Failed to download {article.pdf_link}: {e}")
                failures += 1
    if failures > 0:
        logger.warning(f"Failed to download {failures} out of {len(articles)} PDFs to {output_dir.resolve()}")
    else:
        logger.info(f"Successfully downloaded all {len(articles)} PDFs to {output_dir.resolve()}")
