#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
文件名: nature_multi_journal_final.py
功能:
  - 静默运行，无中间打印
  - 最后输出 JSON 结果供 Node.js 解析
  - 包含统计信息与错误
"""

import requests
from bs4 import BeautifulSoup
import pymysql
import os
import time
import uuid
from urllib.parse import urljoin
from typing import List, Dict, Any
import json
import sys

# ==================== 配置 ====================

JOURNALS = [
    # {"name": "NATURE COMMUNICATIONS", "key": "ncomms"},
    # {"name": "NATURE IMMUNOLOGY", "key": "ni"},
     {"name": "NATURE MEDICINE", "key": "nm"},
    {"name": "NATURE NEUROSCIENCE", "key": "neuro"},
    {"name": "NATURE REVIEWS CANCER", "key": "nrc"},
    {"name": "NATURE REVIEWS CARDIOLOGY", "key": "nrcardio"},
    {"name": "NATURE REVIEWS CLINICAL ONCOLOGY", "key": "nrclinonc"},
    {"name": "NATURE REVIEWS DISEASE PRIMERS", "key": "nrdp"},
    {"name": "NATURE REVIEWS DRUG DISCOVERY", "key": "nrd"},
    {"name": "NATURE REVIEWS ENDOCRINOLOGY", "key": "nrendo"},
    {"name": "NATURE REVIEWS GASTROENTEROLOGY & HEPATOLOGY", "key": "nrgastro"},
    {"name": "NATURE REVIEWS IMMUNOLOGY", "key": "nri"},
    {"name": "NATURE REVIEWS NEPHROLOGY", "key": "nrneph"},
    {"name": "NATURE REVIEWS NEUROLOGY", "key": "nrneurol"},
    {"name": "NATURE REVIEWS NEUROSCIENCE", "key": "nrn"},
    {"name": "NATURE REVIEWS RHEUMATOLOGY", "key": "nrrheum"},
    {"name": "NATURE REVIEWS UROLOGY", "key": "nrurol"},
    {"name": "NATURE HUMAN BEHAVIOUR", "key": "nathumbehav"},
    {"name": "NATURE ASTRONOMY", "key": "natastron"},
    {"name": "NATURE PHOTONICS", "key": "nphoton"},
    {"name": "NATURE PHYSICS", "key": "nphys"},
    {"name": "NATURE CELL BIOLOGY", "key": "ncb"},
    {"name": "NATURE CHEMICAL BIOLOGY", "key": "nchembio"},
    {"name": "NATURE ECOLOGY & EVOLUTION", "key": "natecolevol"},
    {"name": "NATURE GENETICS", "key": "ng"},
    {"name": "NATURE METHODS", "key": "nmeth"},
    {"name": "NATURE MICROBIOLOGY", "key": "nmicrobiol"},
    {"name": "NATURE PLANTS", "key": "nplants"},
    {"name": "NATURE PROTOCOLS", "key": "nprot"},
    {"name": "NATURE REVIEWS GENETICS", "key": "nrg"},
    {"name": "NATURE REVIEWS MICROBIOLOGY", "key": "nrmicro"},
    {"name": "NATURE REVIEWS MOLECULAR CELL BIOLOGY", "key": "nrm"},
    {"name": "NATURE STRUCTURAL & MOLECULAR BIOLOGY", "key": "nsmb"},
    {"name": "NATURE CHEMISTRY", "key": "nchem"},
    {"name": "NATURE REVIEWS CHEMISTRY", "key": "natrevchem"},
    {"name": "NATURE BIOMEDICAL ENGINEERING", "key": "natbiomedeng"},
    {"name": "NATURE BIOTECHNOLOGY", "key": "nbt"},
    {"name": "NATURE CLIMATE CHANGE", "key": "nclimate"},
    {"name": "NATURE GEOSCIENCE", "key": "ngeo"},
    {"name": "NATURE ENERGY", "key": "nenergy"},
    {"name": "NATURE MATERIALS", "key": "nmat"},
    {"name": "NATURE NANOTECHNOLOGY", "key": "nnano"},
    {"name": "NATURE REVIEWS MATERIALS", "key": "natrevmats"},
    {"name": "NATURE CATALYSIS", "key": "natcatal"}
]

DB_CONFIG = {
    'host': 'localhost',
    'user': 'root',
    'password': '123456',
    'database': 'superbackend',
    'charset': 'utf8mb4',
    'autocommit': False,
    'cursorclass': pymysql.cursors.DictCursor,
}

HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36"
}

DELAY_BETWEEN_JOURNALS = 2
CONSECUTIVE_DUPLICATE_LIMIT = 2
MAX_ARTICLES_PER_JOURNAL = 50

# ==================== 工具函数 ====================

def get_script_dir() -> str:
    return os.path.dirname(os.path.abspath(__file__))

SCRIPT_DIR = get_script_dir()
PDF_DIR = os.path.join(SCRIPT_DIR, "../uploads/nature_pdfs")

def sanitize_filename(name: str) -> str:
    return "".join(c for c in name if c.isalnum() or c in " _-").rstrip()[:100]

def is_access_restricted(pdf_url: str) -> bool:
    try:
        response = requests.get(pdf_url, headers=HEADERS, timeout=10)
        if response.status_code == 200:
            html = response.text
            if 'Access through your institution' in html:
                return True
    except Exception as e:
        pass  # 不打印
    return False

def generate_pdf_filename(journal_name: str, publish_date: str) -> str:
    today = time.strftime("%Y%m%d_%H%M%S")
    uid = str(uuid.uuid4())[:10]
    safe_journal = sanitize_filename(journal_name.replace("/", "_"))
    date_str = publish_date.replace("-", "")[:8] if publish_date else "unknown"
    filename = f"{safe_journal}_{date_str}_{uid}_{today}.pdf"
    return filename

# ==================== 数据库操作 ====================

def create_table_if_not_exists():
    conn = pymysql.connect(**DB_CONFIG)
    try:
        with conn.cursor() as cursor:
            sql = """
            CREATE TABLE IF NOT EXISTS nature_pdf (
                id INT AUTO_INCREMENT PRIMARY KEY,
                journal_name VARCHAR(255) NOT NULL,
                title VARCHAR(500) NOT NULL,
                author VARCHAR(255),
                pdf_url VARCHAR(1000) NOT NULL,
                pdf_path VARCHAR(1000),
                upload_date DATE,
                content TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                
                UNIQUE KEY uk_pdf_url (pdf_url(191)),
                INDEX idx_journal (journal_name),
                INDEX idx_created (created_at)
            ) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
            """
            cursor.execute(sql)
        conn.commit()
    except Exception as e:
        raise RuntimeError(f"Failed to create table: {str(e)}")
    finally:
        conn.close()

def batch_check_urls_exist(pdf_urls: List[str]) -> set:
    if not pdf_urls:
        return set()
    conn = pymysql.connect(**DB_CONFIG)
    try:
        placeholders = ','.join(['%s'] * len(pdf_urls))
        query = f"SELECT pdf_url FROM nature_pdf WHERE pdf_url IN ({placeholders})"
        with conn.cursor() as cursor:
            cursor.execute(query, pdf_urls)
            result = {row['pdf_url'] for row in cursor.fetchall()}
        return result
    except Exception as e:
        raise RuntimeError(f"Database query failed: {str(e)}")
    finally:
        conn.close()

def batch_insert_articles(articles: List[Dict[str, Any]]) -> bool:
    if not articles:
        return True
    conn = pymysql.connect(**DB_CONFIG)
    try:
        with conn.cursor() as cursor:
            sql = """
            INSERT INTO nature_pdf 
            (journal_name, title, author, pdf_url, pdf_path, upload_date, content)
            VALUES (%(journal_name)s, %(title)s, %(author)s, %(pdf_url)s, %(pdf_path)s, %(upload_date)s, %(content)s)
            """
            cursor.executemany(sql, articles)
        conn.commit()
        return True
    except Exception as e:
        conn.rollback()
        raise RuntimeError(f"Batch insert failed: {str(e)}")
    finally:
        conn.close()

# ==================== 爬虫核心 ====================

def crawl_journal(journal: Dict[str, str], stats: Dict):
    name = journal["name"]
    key = journal["key"]
    base_search_url = "https://www.nature.com/search"
    params = {'journal': key, 'order': 'date_desc'}

    consecutive_duplicates = 0
    no_new_articles_pages = 0
    page = 1
    processed_count = 0
    journal_errors = []

    journal_dir = os.path.join(PDF_DIR, name.replace("/", "_"))
    os.makedirs(journal_dir, exist_ok=True)

    while processed_count < MAX_ARTICLES_PER_JOURNAL:
        params['page'] = page
        try:
            response = requests.get(base_search_url, params=params, headers=HEADERS, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')
        except Exception as e:
            journal_errors.append(f"Request failed on page {page}: {str(e)}")
            break

        items = soup.select("li.app-article-list-row__item")
        if not items:
            break

        all_pdf_urls = []
        item_data_list = []

        for item in items:
            link_tag = item.select_one("h3.c-card__title a")
            if not link_tag:
                continue
            article_path = link_tag['href']
            if not article_path.startswith('/'):
                article_path = '/' + article_path
            pdf_url = f"https://www.nature.com{article_path}.pdf"
            all_pdf_urls.append(pdf_url)
            item_data_list.append((item, pdf_url, article_path))

        try:
            existing_urls = batch_check_urls_exist(all_pdf_urls)
        except Exception as e:
            journal_errors.append(f"URL check failed: {str(e)}")
            break

        new_articles_this_page = []
        found_new_article = False

        for item, pdf_url, article_path in item_data_list:
            if processed_count >= MAX_ARTICLES_PER_JOURNAL:
                break

            if consecutive_duplicates >= CONSECUTIVE_DUPLICATE_LIMIT:
                consecutive_duplicates += 1
                continue

            if pdf_url in existing_urls:
                consecutive_duplicates += 1
                continue
            else:
                consecutive_duplicates = 0
                found_new_article = True

            title_tag = item.select_one("h3.c-card__title a")
            title = title_tag.get_text(strip=True) if title_tag else "Unknown"

            time_tag = item.select_one("time[datetime]")
            upload_date = "2025-01-01"
            if time_tag and time_tag.get('datetime'):
                dt_str = time_tag['datetime'].strip()
                try:
                    time.strptime(dt_str, "%Y-%m-%d")
                    upload_date = dt_str
                except ValueError:
                    pass

            author_spans = item.select("ul.app-author-list span[itemprop='name']")
            authors = [span.get_text(strip=True) for span in author_spans]
            author = ", ".join(authors)[:255] if authors else "Unknown"

            abstract_tag = item.select_one("div.c-card__summary p")
            content = abstract_tag.get_text(strip=True) if abstract_tag else ""

            if is_access_restricted(pdf_url):
                continue

            filename = generate_pdf_filename(name, upload_date)
            pdf_path = os.path.join(journal_dir, filename)

            try:
                pdf_response = requests.get(pdf_url, headers=HEADERS, timeout=15, stream=True)
                if pdf_response.status_code == 200 and 'application/pdf' in pdf_response.headers.get('Content-Type', ''):
                    with open(pdf_path, 'wb') as f:
                        for chunk in pdf_response.iter_content(chunk_size=8192):
                            f.write(chunk)
                    new_articles_this_page.append({
                        "journal_name": name,
                        "title": title,
                        "author": author,
                        "pdf_url": pdf_url,
                        "pdf_path": pdf_path,
                        "upload_date": upload_date,
                        "content": content[:65535]
                    })
                    processed_count += 1
            except Exception as e:
                journal_errors.append(f"Download failed: {pdf_url}, error: {str(e)}")

        if new_articles_this_page:
            try:
                if batch_insert_articles(new_articles_this_page):
                    stats['success_downloads'] += len(new_articles_this_page)
                    stats['total_new_articles'] += len(new_articles_this_page)
                    no_new_articles_pages = 0
                else:
                    journal_errors.append(f"Batch insert failed for {len(new_articles_this_page)} articles.")
            except Exception as e:
                journal_errors.append(f"Insert failed: {str(e)}")
        else:
            no_new_articles_pages += 1

        if no_new_articles_pages >= 3:
            break

        page += 1
        time.sleep(1)

    stats['journal_summaries'].append({
        "journal_name": name,
        "new_articles": processed_count,
        "errors": journal_errors
    })

# ==================== 主程序 ====================

def main():
    total_start_time = time.time()
    os.makedirs(PDF_DIR, exist_ok=True)

    # 初始化统计
    stats = {
        "status": "success",
        "error": None,
        "total_journals": len(JOURNALS),
        "total_new_articles": 0,
        "success_downloads": 0,
        "failed_downloads": 0,
        "start_time": time.strftime("%Y-%m-%d %H:%M:%S"),
        "end_time": None,
        "duration_seconds": None,
        "pdf_storage_path": PDF_DIR,
        "journal_summaries": []  # 每个期刊的详情
    }

    try:
        create_table_if_not_exists()
        for journal in JOURNALS:
            crawl_journal(journal, stats)
            time.sleep(DELAY_BETWEEN_JOURNALS)
    except Exception as e:
        stats['status'] = 'error'
        stats['error'] = str(e)
    finally:
        stats['end_time'] = time.strftime("%Y-%m-%d %H:%M:%S")
        stats['duration_seconds'] = round(time.time() - total_start_time, 2)

        # ✅ 唯一一次输出：JSON
        print(json.dumps(stats, ensure_ascii=False))  # Node.js 会读取这一行
        sys.stdout.flush()  # 确保立即输出

if __name__ == "__main__":
    main()