#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Extract abstracts from AMOS papers and compile them into an abstract book.

Usage:
  python src/extract_abstracts.py --csv data/metadata_2025.csv --output data/abstracts_2025.pdf
"""

from __future__ import annotations

import argparse
import csv
import os
import re
import time
from dataclasses import dataclass
from typing import List, Optional

import requests
from bs4 import BeautifulSoup, NavigableString, Tag
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from tqdm import tqdm

from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
from reportlab.lib import colors
from reportlab.platypus import Paragraph, Frame
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_JUSTIFY
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont

from pypdf import PdfReader, PdfWriter
from io import BytesIO


@dataclass
class AbstractEntry:
    title: str
    authors: str
    keywords: str
    abstract: str
    paper_url: str
    pages: int = 0
    start_page: int = 0


def get_session(timeout: float = 30.0) -> requests.Session:
    s = requests.Session()
    retries = Retry(
        total=5,
        backoff_factor=0.5,
        status_forcelist=(429, 500, 502, 503, 504),
        allowed_methods=("GET", "HEAD"),
        raise_on_status=False,
    )
    adapter = HTTPAdapter(max_retries=retries, pool_connections=50, pool_maxsize=50)
    s.mount("http://", adapter)
    s.mount("https://", adapter)
    s.headers.update({
        "User-Agent": "Mozilla/5.0 (compatible; AMOS-Scraper/1.0; +https://amostech.space/)",
        "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
    })
    s.request = _wrap_timeout(s.request, timeout)
    return s


def _wrap_timeout(request_func, timeout_default: float):
    def _wrapped(method, url, **kwargs):
        if "timeout" not in kwargs:
            kwargs["timeout"] = timeout_default
        return request_func(method, url, **kwargs)
    return _wrapped


def extract_abstract_from_url(session: requests.Session, url: str, delay: float = 0.5) -> Optional[str]:
    """Extract abstract text from a paper page URL.
    Preference: use <p> block segmentation inside the Abstract section; ignore <br/> inside a <p>.
    Fallback: text slice between 'Abstract:' and 'Date of Conference:'.
    """
    try:
        r = session.get(url)
        r.raise_for_status()
        soup = BeautifulSoup(r.text, "lxml")

        # Try DOM-driven extraction starting from the 'Abstract:' label
        label = soup.find(string=re.compile(r"^\s*Abstract:\s*$", re.IGNORECASE))
        paragraphs: List[str] = []
        if label:
            # Iterate through subsequent elements until we hit 'Date of Conference:' marker
            for el in label.parent.next_elements:
                if isinstance(el, NavigableString):
                    txt = str(el)
                    if re.search(r"Date of Conference:\s*", txt, re.IGNORECASE):
                        break
                    continue
                if isinstance(el, Tag):
                    # Stop if we've reached another section header containing 'Date of Conference:'
                    if el.get_text(" ", strip=True) and re.search(r"Date of Conference:\s*", el.get_text(" ", strip=True), re.IGNORECASE):
                        break
                    # Only collect <p> blocks; replace <br> with spaces
                    if el.name == 'p':
                        txt = el.get_text(" ", strip=True)
                        if txt:
                            paragraphs.append(txt)
                    # If we have left the abstract container (heuristic): stop when we hit a <h> tag
                    if el.name and el.name.startswith('h'):
                        # Heading indicates next section; stop
                        break
            if paragraphs:
                abstract = "\n\n".join(paragraphs).strip()
                time.sleep(delay)
                return abstract if abstract else None

        # Fallback: slice text between markers
        page_text = soup.get_text("\n", strip=False)
        abstract_match = re.search(r'Abstract:\s*(.*?)\s*Date of Conference:', page_text, re.IGNORECASE | re.DOTALL)
        if abstract_match:
            abstract = abstract_match.group(1)
            abstract = abstract.replace('\r\n', '\n').replace('\r', '\n')
            # Collapse excessive blank lines
            abstract = re.sub(r"\n\s*\n\s*\n+", "\n\n", abstract)
            abstract = abstract.strip()
            time.sleep(delay)
            return abstract if abstract else None

        time.sleep(delay)
        return None

    except Exception as e:
        print(f"  [Error extracting abstract from {url}]: {e}")
        time.sleep(delay)
        return None


def read_metadata(csv_path: str) -> List[dict]:
    """Read metadata CSV and return list of paper records."""
    entries = []
    with open(csv_path, newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            if row.get('title') and row.get('paper_url'):
                entries.append(row)
    return entries


def collect_abstracts(csv_path: str, delay: float = 0.5, cache_file: str = None, refresh: bool = False) -> List[AbstractEntry]:
    """Collect abstracts from all papers in metadata CSV."""
    import json
    
    # Try to load from cache first unless refresh is requested
    if not refresh and cache_file and os.path.exists(cache_file):
        print(f"Loading abstracts from cache: {cache_file}")
        with open(cache_file, 'r', encoding='utf-8') as f:
            cached = json.load(f)
            abstracts = [AbstractEntry(**item) for item in cached]
            print(f"Loaded {len(abstracts)} abstracts from cache")
            return abstracts
    
    session = get_session()
    metadata = read_metadata(csv_path)
    
    print(f"Found {len(metadata)} papers in {csv_path}")
    print("Extracting abstracts...")
    
    abstracts = []
    for row in tqdm(metadata, desc="Fetching abstracts"):
        title = row.get('title', '').strip()
        authors = row.get('authors', '').strip()
        keywords = row.get('keywords', '').strip()
        paper_url = row.get('paper_url', '').strip()
        
        abstract = extract_abstract_from_url(session, paper_url, delay=delay)
        
        if abstract:
            abstracts.append(AbstractEntry(
                title=title,
                authors=authors,
                keywords=keywords,
                abstract=abstract,
                paper_url=paper_url
            ))
        else:
            print(f"  [Warning] No abstract found for: {title}")
    
    print(f"\nSuccessfully extracted {len(abstracts)} abstracts")
    
    # Save to cache
    if cache_file:
        print(f"Saving abstracts to cache: {cache_file}")
        os.makedirs(os.path.dirname(cache_file) or '.', exist_ok=True)
        with open(cache_file, 'w', encoding='utf-8') as f:
            cached = [{'title': e.title, 'authors': e.authors, 'keywords': e.keywords, 
                      'abstract': e.abstract, 'paper_url': e.paper_url} for e in abstracts]
            json.dump(cached, f, ensure_ascii=False, indent=2)
    
    return abstracts


def generate_abstract_book(abstracts: List[AbstractEntry], output_path: str, year: int) -> None:
    """Generate a PDF book of abstracts with cover, TOC, page numbers, and formatted entries."""
    import tempfile
    
    with tempfile.TemporaryDirectory() as tmpdir:
        cover_path = os.path.join(tmpdir, 'cover.pdf')
        toc_path = os.path.join(tmpdir, 'toc.pdf')
        content_path = os.path.join(tmpdir, 'content.pdf')
        
        # Generate cover (1 page)
        cover_pages = _generate_cover(cover_path, year)
        
        # Generate content and measure pages per abstract
        _generate_content(content_path, abstracts)
        
        # Read content pages to measure abstract page counts
        content_reader = PdfReader(content_path)
        _measure_abstract_pages(abstracts, content_reader)
        
        # Compute start pages for TOC
        # First pass: assume 1 TOC page to get initial page numbers
        toc_pages = 1
        _assign_start_pages(abstracts, cover_pages, toc_pages)
        
        # Generate TOC with initial page numbers
        toc_pages = _generate_toc(toc_path, abstracts)
        
        # Second pass: recompute with actual TOC pages
        _assign_start_pages(abstracts, cover_pages, toc_pages)
        
        # Regenerate TOC with final page numbers
        toc_pages = _generate_toc(toc_path, abstracts)
        
        # Merge cover + TOC + content
        writer = PdfWriter()
        
        # Add cover
        r = PdfReader(cover_path)
        for page in r.pages:
            writer.add_page(page)
        
        # Add blank page after cover
        from reportlab.pdfgen import canvas as pdf_canvas
        blank_packet = BytesIO()
        blank_canvas = pdf_canvas.Canvas(blank_packet, pagesize=A4)
        blank_canvas.showPage()
        blank_canvas.save()
        blank_packet.seek(0)
        blank_reader = PdfReader(blank_packet)
        writer.add_page(blank_reader.pages[0])
        
        # Add TOC
        r = PdfReader(toc_path)
        for page in r.pages:
            writer.add_page(page)
        
        # Ensure content starts on odd page by adding blank if needed
        total_before_content = cover_pages + 1 + toc_pages  # cover + blank + toc
        if total_before_content % 2 == 1:
            # Total is odd, so next page is even; insert another blank
            blank_packet2 = BytesIO()
            blank_canvas2 = pdf_canvas.Canvas(blank_packet2, pagesize=A4)
            blank_canvas2.showPage()
            blank_canvas2.save()
            blank_packet2.seek(0)
            blank_reader2 = PdfReader(blank_packet2)
            writer.add_page(blank_reader2.pages[0])
            total_before_content += 1
        
        # Add content with bookmarks
        r = PdfReader(content_path)
        cumulative = total_before_content
        for i, page in enumerate(r.pages):
            writer.add_page(page)
        
        # Add bookmarks for each abstract
        for entry in abstracts:
            writer.add_outline_item(entry.title, entry.start_page - 1)
        
        # Overlay page numbers on content pages (skip cover and TOC)
        total_before_content = cover_pages + 1 + toc_pages
        # Check if we added extra blank
        if total_before_content % 2 == 1:
            total_before_content += 1
        _overlay_page_numbers(writer, total_before_content, len(r.pages))
        
        # Write output
        os.makedirs(os.path.dirname(output_path) or '.', exist_ok=True)
        with open(output_path, 'wb') as f:
            writer.write(f)
        
        print(f"\n✅ Abstract book generated: {output_path}")


def _generate_cover(cover_path: str, year: int) -> int:
    """Generate cover page for abstract book. Returns number of pages (1)."""
    c = canvas.Canvas(cover_path, pagesize=A4)
    width, height = A4
    
    # Blue background
    c.setFillColor(colors.HexColor('#0b3d91'))
    c.rect(0, 0, width, height, fill=1, stroke=0)
    
    # Title
    c.setFillColor(colors.white)
    c.setFont('Helvetica-Bold', 42)
    c.drawCentredString(width/2, height*0.65, f"AMOS {year}")
    
    c.setFont('Helvetica-Bold', 32)
    c.drawCentredString(width/2, height*0.58, "Technical Papers")
    
    c.setFont('Helvetica-Bold', 28)
    c.drawCentredString(width/2, height*0.52, "Abstract Collection")
    
    # Subtitle
    c.setFont('Helvetica', 14)
    c.drawCentredString(width/2, height*0.44, "Advanced Maui Optical and")
    c.drawCentredString(width/2, height*0.41, "Space Surveillance Technologies Conference")
    
    # Footer
    c.setFont('Helvetica', 10)
    c.drawCentredString(width/2, height*0.08, "Compiled from AMOS Technical Library")
    
    c.showPage()
    c.save()
    return 1


def _generate_content(content_path: str, abstracts: List[AbstractEntry]) -> None:
    """Generate content pages with all abstracts using proper frame flow."""
    from reportlab.platypus import SimpleDocTemplate, Spacer, PageBreak, KeepTogether
    
    margin_left = 25 * mm
    margin_right = 25 * mm
    margin_top = 25 * mm
    margin_bottom = 25 * mm
    
    margin_left = 25 * mm
    margin_right = 25 * mm
    margin_top = 25 * mm
    margin_bottom = 25 * mm
    
    # Create document
    doc = SimpleDocTemplate(
        content_path,
        pagesize=A4,
        leftMargin=margin_left,
        rightMargin=margin_right,
        topMargin=margin_top,
        bottomMargin=margin_bottom
    )
    
    # Fonts: try to use a Unicode TTF if available, fall back to Helvetica
    body_font, bold_font, italic_font = _setup_fonts()

    # Styles
    styles = getSampleStyleSheet()
    title_style = ParagraphStyle(
        'CustomTitle',
        parent=styles['Heading2'],
        fontSize=12,
        textColor=colors.HexColor('#0b3d91'),
        spaceAfter=8,
        spaceBefore=12,
        leading=14,
        alignment=TA_LEFT,
        fontName=bold_font,
    )
    
    author_style = ParagraphStyle(
        'CustomAuthor',
        parent=styles['Normal'],
        fontSize=10,
        textColor=colors.HexColor('#555555'),
        spaceAfter=6,
        leading=12,
        fontName=italic_font,
    )
    
    keyword_style = ParagraphStyle(
        'CustomKeyword',
        parent=styles['Normal'],
        fontSize=9,
        textColor=colors.HexColor('#666666'),
        spaceAfter=8,
        leading=11,
        fontName=body_font,
    )
    
    abstract_style = ParagraphStyle(
        'CustomAbstract',
        parent=styles['Normal'],
        fontSize=10,
        spaceAfter=20,
        leading=13,
        alignment=TA_JUSTIFY,
        fontName=body_font,
    )
    
    # Build story (flowable elements)
    story = []
    
    for idx, entry in enumerate(abstracts):
        # Normalize strings to avoid unsupported glyphs (e.g., okina in Hawai‘i)
        title_txt = _normalize_text(entry.title)
        authors_txt = _normalize_text(entry.authors)
        keywords_txt = _normalize_text(entry.keywords)
        abstract_txt = _normalize_text(entry.abstract)

        # Keep title, authors, and keywords together on same page as start of abstract
        header_elements = []
        
        # Title
        title_para = Paragraph(f"<b>{_escape_xml(title_txt)}</b>", title_style)
        header_elements.append(title_para)
        
        # Authors
        if authors_txt:
            author_para = Paragraph(f"<i>{_escape_xml(authors_txt)}</i>", author_style)
            header_elements.append(author_para)
        
        # Keywords
        if keywords_txt:
            kw_text = f"<b>Keywords:</b> {_escape_xml(keywords_txt)}"
            kw_para = Paragraph(kw_text, keyword_style)
            header_elements.append(kw_para)
        
            # Abstract paragraphs: split into true paragraphs; do not use <br/>
            paragraphs = _split_into_paragraphs(abstract_txt)
            para_flowables = [Paragraph(_escape_xml(p), abstract_style) for p in paragraphs]
        
        if para_flowables:
            # Keep header and the first paragraph together so the abstract starts on the same page
            story.append(KeepTogether(header_elements + [para_flowables[0]]))
            for pf in para_flowables[1:]:
                story.append(pf)
        else:
            story.append(KeepTogether(header_elements))
        
        # Separator
        story.append(Spacer(1, 6))
        story.append(Paragraph("_" * 80, ParagraphStyle('Sep', fontSize=1, textColor=colors.grey)))
        story.append(Spacer(1, 12))
    
    # Build PDF
    doc.build(story)


def _escape_xml(text: str) -> str:
    """Escape XML/HTML special characters for ReportLab."""
    text = text.replace('&', '&amp;')
    text = text.replace('<', '&lt;')
    text = text.replace('>', '&gt;')
    return text


def _normalize_text(text: str) -> str:
    """Replace unsupported Unicode punctuation with ASCII fallbacks and tidy whitespace.
    Helps avoid black squares when Unicode fonts are unavailable (e.g., okina in Hawaiʻi).
    """
    if not text:
        return text
    subs = {
        '\u2018': "'",  # left single quote
        '\u2019': "'",  # right single quote
        '\u201C': '"',   # left double quote
        '\u201D': '"',   # right double quote
        '\u2013': '-',   # en dash
        '\u2014': '--',  # em dash
        '\u2212': '-',   # minus sign
        '\u00A0': ' ',   # no-break space
        '\u2022': '-',   # bullet
        '\u02BB': "'",  # okina (modifier letter turned comma)
        '\u02BC': "'",  # modifier letter apostrophe
    }
    for k, v in subs.items():
        text = text.replace(k, v)
    return text


def _split_into_paragraphs(text: str) -> List[str]:
    """Split text into paragraphs by blank lines only (align with <p> segmentation)."""
    if not text:
        return []
    text = text.replace('\r\n', '\n').replace('\r', '\n')
    # Replace 3+ blank lines with a single blank line
    text = re.sub(r"\n\s*\n\s*\n+", "\n\n", text)
    paras = [p.strip() for p in text.split('\n\n')]
    return [p for p in paras if p]


def _measure_abstract_pages(abstracts: List[AbstractEntry], reader: PdfReader) -> None:
    """Measure how many PDF pages each abstract occupies by tracking separator positions."""
    # We'll use a heuristic: count pages and divide by number of abstracts evenly
    # For now, assume 1 page per abstract as placeholder; real logic would parse content PDF
    total_pages = len(reader.pages)
    if not abstracts:
        return
    # Simple: divide pages evenly among abstracts
    avg = max(1, total_pages // len(abstracts))
    for e in abstracts:
        e.pages = avg


def _assign_start_pages(abstracts: List[AbstractEntry], cover_pages: int, toc_pages: int) -> None:
    """Assign start_page for each abstract. Page numbers start at 1 for first content page."""
    cur = 1  # Content pages start at 1
    for e in abstracts:
        e.start_page = cur
        cur += max(1, e.pages)


def _generate_toc(toc_path: str, abstracts: List[AbstractEntry]) -> int:
    """Generate table of contents for abstracts. Returns number of TOC pages."""
    c = canvas.Canvas(toc_path, pagesize=A4)
    width, height = A4
    left = 20 * mm
    right = width - 20 * mm
    top = height - 20 * mm
    bottom = 20 * mm
    
    header_style = ('Helvetica-Bold', 20)
    title_style = ('Helvetica-Bold', 11)
    author_style = ('Helvetica', 10)
    
    line_height = 13
    max_title_chars = 75
    
    y = top
    pages_used = 1
    
    def new_page():
        nonlocal y, pages_used
        c.showPage()
        y = top
        pages_used += 1
    
    # Header
    c.setFont(*header_style)
    c.drawString(left, y, 'Table of Contents')
    y -= 24
    
    for e in abstracts:
        # Title
        c.setFont(*title_style)
        title_text = e.title
        if len(title_text) > max_title_chars:
            title_text = title_text[:max_title_chars - 3] + '...'
        
        if y - line_height < bottom:
            new_page()
            c.setFont(*header_style)
            c.drawString(left, y, 'Table of Contents (cont.)')
            y -= 24
            c.setFont(*title_style)
        
        # Draw title with page number
        page_num = str(e.start_page)
        page_area_width = 40
        title_max_width = right - left - page_area_width - 10
        
        while c.stringWidth(title_text, *title_style) > title_max_width and len(title_text) > 10:
            title_text = title_text[:-4] + '...'
        
        c.drawString(left, y, title_text)
        title_end_x = left + c.stringWidth(title_text, *title_style)
        dots_start = title_end_x + 5
        dots_end = right - page_area_width
        if dots_start < dots_end - 10:
            c.setDash(1, 3)
            c.line(dots_start, y + 3, dots_end, y + 3)
            c.setDash()
        c.drawRightString(right, y, page_num)
        y -= line_height
        
        # Authors
        if e.authors:
            c.setFont(*author_style)
            authors_line = e.authors
            if ';' in authors_line:
                authors_line = authors_line.split(';')[0].strip() + ' et al.'
            elif ',' in authors_line and authors_line.count(',') > 1:
                parts = authors_line.split(',')
                if len(parts) >= 2:
                    authors_line = f"{parts[0].strip()}, {parts[1].strip()} et al."
                else:
                    authors_line = parts[0].strip() + ' et al.'
            
            max_author_width = right - left - 10
            while c.stringWidth(authors_line, *author_style) > max_author_width and len(authors_line) > 20:
                authors_line = authors_line[:len(authors_line)-4] + '...'
            
            if y - line_height < bottom:
                new_page()
                c.setFont(*header_style)
                c.drawString(left, y, 'Table of Contents (cont.)')
                y -= 24
                c.setFont(*author_style)
            
            c.setFillColor(colors.gray)
            c.drawString(left, y, authors_line)
            c.setFillColor(colors.black)
            y -= line_height
        
        y -= 4
    
    c.showPage()
    c.save()
    return pages_used


def _overlay_page_numbers(writer: PdfWriter, skip_pages: int, content_pages: int) -> None:
    """Overlay page numbers on content pages. Content starts at 1."""
    from io import BytesIO
    
    for i in range(content_pages):
        page_idx = skip_pages + i
        page_num = i + 1
        
        # Create overlay
        packet = BytesIO()
        can = canvas.Canvas(packet, pagesize=A4)
        width, height = A4
        
        # Position: bottom-right for odd, bottom-left for even
        can.setFont('Helvetica', 10)
        if page_num % 2 == 1:
            # Odd: bottom-right
            can.drawRightString(width - 15*mm, 10*mm, str(page_num))
        else:
            # Even: bottom-left
            can.drawString(15*mm, 10*mm, str(page_num))
        
        can.save()
        packet.seek(0)
        overlay = PdfReader(packet)
        
        # Merge overlay onto the page
        page = writer.pages[page_idx]
        page.merge_page(overlay.pages[0])


def _setup_fonts(font_dir: Optional[str] = None, force: bool = False):
    """Register Unicode-capable fonts if available. Returns (body, bold, italic) font names.
    If no TTF fonts are found, fall back to base14 Helvetica family.
    If force=True and font_dir is provided, only attempt that directory.
    """
    # Defaults
    default_body = 'Helvetica'
    default_bold = 'Helvetica-Bold'
    default_italic = 'Helvetica-Oblique'

    # If already registered in this run, return the names
    try:
        # Check if we registered our aliases before
        pdfmetrics.getFont('ABody')
        return 'ABody', 'ABold', 'AItalic'
    except Exception:
        pass

    candidates = []
    # Preferred families and expected filenames
    families = [
        ('DejaVuSans', ['DejaVuSans.ttf', 'DejaVuSans-Bold.ttf', 'DejaVuSans-Oblique.ttf']),
        ('NotoSans', ['NotoSans-Regular.ttf', 'NotoSans-Bold.ttf', 'NotoSans-Italic.ttf']),
    ]

    search_dirs = []
    if font_dir:
        search_dirs.append(font_dir)
        if force:
            # Only search in provided dir
            pass
    if not force:
        # Common locations
        search_dirs.extend([
            os.path.join(os.getcwd(), 'assets', 'fonts'),
            os.path.join(os.getcwd(), 'fonts'),
            '/Library/Fonts',
            '/System/Library/Fonts',
            os.path.expanduser('~/Library/Fonts'),
        ])

    chosen = None
    for base, files in families:
        for d in search_dirs:
            paths = [os.path.join(d, f) for f in files]
            if all(os.path.exists(p) for p in paths):
                chosen = (base, paths)
                break
        if chosen:
            break

    if chosen:
        _, (body_path, bold_path, italic_path) = chosen
        try:
            pdfmetrics.registerFont(TTFont('ABody', body_path))
            pdfmetrics.registerFont(TTFont('ABold', bold_path))
            pdfmetrics.registerFont(TTFont('AItalic', italic_path))
            return 'ABody', 'ABold', 'AItalic'
        except Exception as e:
            # Fall back if registration fails
            print(f"[WARN] Failed to register Unicode fonts: {e}. Falling back to Helvetica.")
            return default_body, default_bold, default_italic

    # Fall back
    return default_body, default_bold, default_italic


def main(argv=None) -> int:
    ap = argparse.ArgumentParser(description='Extract abstracts and compile into a book')
    ap.add_argument('--csv', required=True, help='Path to metadata CSV')
    ap.add_argument('--output', default='data/abstracts_2025.pdf', help='Output PDF path')
    ap.add_argument('--delay', type=float, default=0.5, help='Delay between requests (seconds)')
    ap.add_argument('--year', type=int, default=None, help='Conference year (auto-detected from CSV if not set)')
    ap.add_argument('--cache', default=None, help='Cache file for abstracts (JSON). Default: <output_dir>/abstracts_cache.json')
    ap.add_argument('--font-dir', default=None, help='Directory containing Unicode TTF fonts (DejaVuSans*.ttf or NotoSans*.ttf) to embed')
    ap.add_argument('--refresh', action='store_true', help='Ignore cache and re-fetch/re-parse all abstracts')
    args = ap.parse_args(argv)
    
    # Extract year from CSV if not provided
    year = args.year
    if not year:
        try:
            with open(args.csv, newline='', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                row = next(reader)
                year = int(row.get('year', 2025))
        except Exception:
            year = 2025
    
    # Set default cache path
    cache_file = args.cache
    if cache_file is None:
        output_dir = os.path.dirname(args.output) or 'data'
        cache_file = os.path.join(output_dir, 'abstracts_cache.json')
    
    # Collect abstracts
    abstracts = collect_abstracts(args.csv, delay=args.delay, cache_file=cache_file, refresh=args.refresh)
    
    if not abstracts:
        print("[ERROR] No abstracts collected")
        return 1
    
    # Generate PDF
    # Try to set up fonts with user-provided directory once before generation
    if args.font_dir:
        _setup_fonts(args.font_dir, force=True)
    generate_abstract_book(abstracts, args.output, year)
    
    return 0


if __name__ == '__main__':
    raise SystemExit(main())
