import argparse
import time
import random
import requests
from requests.adapters import HTTPAdapter, Retry
from datetime import datetime
from loguru import logger
from bs4 import BeautifulSoup
from tqdm import tqdm
import re

class GCNCrawler:
    LIST_API   = "https://gcn.nasa.gov/circulars"
    DETAIL_URL = "https://gcn.nasa.gov/circulars/{cid}"

    def __init__(self, page_limit, sleep_min, sleep_max, event_filter=None):
        # HTTP session with retries
        self.session = requests.Session()
        retries = Retry(total=5, backoff_factor=0.5,
                        status_forcelist=[500,502,503,504])
        self.session.mount('https://', HTTPAdapter(max_retries=retries))
        self.session.headers.update({
            'User-Agent': (
                'Mozilla/5.0 (X11; Linux x86_64) '
                'AppleWebKit/537.36 (KHTML, like Gecko) '
                'Chrome/114.0.0.0 Safari/537.36'
            )
        })

        self.page_limit   = page_limit
        self.sleep_min    = sleep_min
        self.sleep_max    = sleep_max
        self.event_filter = event_filter

        # 用来存储所有解析后的结果
        self.results = []

    def fetch_page(self, page):
        r = self.session.get(
            self.LIST_API,
            params={
                'page': page,
                'limit': self.page_limit,
                'view': 'index',
                '_data': 'routes/circulars._archive._index'
            }, timeout=10
        )
        r.raise_for_status()
        return r.json()

    def fetch_html(self, cid):
        url = self.DETAIL_URL.format(cid=cid)
        r = self.session.get(url, timeout=10)
        r.raise_for_status()
        return r.text

    def parse_html(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        data = {}

        # 1) 用正则从 <h1> 提取 circular_id
        h1 = soup.find('h1')
        if h1:
            m = re.search(r'(\d+)', h1.get_text(strip=True))
            if m:
                data['circular_id'] = int(m.group(1))

        # 2) 按行、按列提取 Key–Value
        for row in soup.select('main .grid-row'):
            cols = row.find_all('div', recursive=False)
            if len(cols) < 2:
                continue
            key = cols[0].get_text(strip=True).rstrip(':')
            val = cols[1].get_text(strip=True)
            if key == 'Subject':
                data['subject'] = val
            elif key == 'Date':
                try:
                    data['date'] = datetime.fromisoformat(val.replace('Z', '+00:00'))
                except:
                    data['date'] = val
            elif key == 'From':
                data['sender'] = val
            elif key == 'Via':
                data['via'] = val

        # 3) 正文 body
        pre = soup.find('pre')
        data['body'] = pre.get_text() if pre else ''

        return data

    def process_circular(self, cid, subj):
        """
        拉取、解析并存储到 self.results
        """
        try:
            html = self.fetch_html(cid)
            info = self.parse_html(html)
        except Exception as e:
            logger.warning(f"Failed parse {cid}: {e}")
            return

        # 如果需要，可以合并 list 接口给出的 subj/event
        info.setdefault('subject', subj)
        info.setdefault('event', self.event_filter or '')

        self.results.append(info)

    def crawl(self, start_page, end_page=None):
        page = start_page
        total_pages = None

        while True:
            logger.info(f"Fetching list page {page}/{total_pages or '?'}")
            js = self.fetch_page(page)
            if total_pages is None:
                total_pages = js.get('totalPages', 1)

            items = js.get('items', [])
            if not items:
                break

            for it in tqdm(items, desc=f"Page {page}", unit="it"):
                cid  = int(it['circularId'])
                subj = it.get('subject','').strip()

                if self.event_filter and self.event_filter not in subj:
                    continue

                # 抓取并存储
                self.process_circular(cid, subj)

                # 防封延迟
                time.sleep(random.uniform(self.sleep_min, self.sleep_max))

            page += 1
            if page > total_pages or (end_page and page > end_page):
                break
            time.sleep(random.uniform(1.0,2.0))

        logger.success("Crawling complete.")
        return self.results


if __name__ == "__main__":
    p = argparse.ArgumentParser(description="GCN Circular Scraper")
    p.add_argument("--start-page", type=int, default=1, help="起始页码")
    p.add_argument("--max-pages",  type=int, default=1,
                   help="最多抓取多少页（从 start-page 开始计数）")
    p.add_argument("--limit",      type=int, default=100, help="每页条目数")
    p.add_argument("--sleep-min",  type=float, default=0.5, help="最小延迟（秒）")
    p.add_argument("--sleep-max",  type=float, default=1.5, help="最大延迟（秒）")
    p.add_argument("--event",      default="GRB 250516A",
                   help="只抓取 subject 中包含该字符串的 Circular")
    args = p.parse_args()

    crawler = GCNCrawler(
        page_limit   = args.limit,
        sleep_min    = args.sleep_min,
        sleep_max    = args.sleep_max,
        event_filter = args.event
    )
    # end_page = start_page + max_pages - 1
    results = crawler.crawl(
        start_page = args.start_page,
        end_page   = args.start_page + args.max_pages - 1
    )

    # 最后统一输出所有结果
    with open(f'result/{args.event}.txt', 'w') as file:
        for info in results:
            file.write(f"\n=== GCN {info.get('circular_id')} ===\n")
            file.write(f"Subject: {info.get('subject')}\n")
            file.write(f"Date:    {info.get('date')}\n")
            file.write(f"From:    {info.get('sender')}\n")
            file.write(f"Via:     {info.get('via')}\n")
            file.write("Body:\n" + info.get('body').rstrip() + "...\n")
