import os
import time
import random
import json
import csv
from playwright.sync_api import sync_playwright
from urllib.parse import urljoin
import logging
from datetime import datetime

# 配置日志记录
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('cell_scraper.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# ==================== 配置参数 ====================
KEYWORD = "scrna"           # 搜索关键词
PAGE_SIZE = 100            # 每页显示的文章数量
START_PAGE = 6             # 开始爬取的页码
END_PAGE = 10              # 结束爬取的页码
OUTPUT_DIR = "cell_data"   # 输出目录名称
DELAY_MIN = 5              # 最小延迟时间(秒)
DELAY_MAX = 10             # 最大延迟时间(秒)
MAX_RETRIES = 3            # 最大重试次数
BASE_URL = "https://www.cell.com"

class SearchLimitReached(Exception):
    """自定义异常：达到搜索限制"""
    pass

# ==================== 辅助函数 ====================
def check_search_limit(page):
    """检查是否达到搜索限制"""
    try:
        if page.is_visible("text=You've reached the search limit.", timeout=3000):
            logger.error("已达到搜索限制，停止爬取")
            raise SearchLimitReached("You've reached the search limit.")
    except Exception as e:
        if not isinstance(e, SearchLimitReached):
            logger.debug(f"检查搜索限制时出错: {str(e)}")

def ensure_output_dir():
    """确保输出目录存在"""
    if not os.path.exists(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)
        logger.info(f"创建输出目录: {OUTPUT_DIR}")

def load_existing_pdf_links(filename):
    """加载已有数据的PDF链接用于去重"""
    filepath = os.path.join(OUTPUT_DIR, filename)
    if not os.path.exists(filepath):
        return set()
    
    existing_pdf_links = set()
    try:
        if filename.endswith('.json'):
            with open(filepath, 'r', encoding='utf-8') as f:
                try:
                    data = json.load(f)
                    if isinstance(data, list):
                        for article in data:
                            if isinstance(article, dict) and 'pdf_link' in article:
                                existing_pdf_links.add(article['pdf_link'])
                except json.JSONDecodeError:
                    logger.warning(f"JSON文件 {filename} 解析失败，将忽略已有数据")
                    
        elif filename.endswith('.csv'):
            with open(filepath, 'r', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    if 'pdf_link' in row and row['pdf_link']:
                        existing_pdf_links.add(row['pdf_link'])
    except Exception as e:
        logger.error(f"加载 {filename} 时出错: {str(e)}")
    
    logger.info(f"从 {filename} 加载了 {len(existing_pdf_links)} 条已有PDF链接")
    return existing_pdf_links

def save_to_json(data, filename, existing_pdf_links):
    """保存数据到JSON文件"""
    filepath = os.path.join(OUTPUT_DIR, filename)
    
    # 过滤掉已存在的文章(基于pdf_link)
    new_data = [article for article in data if article.get('pdf_link') not in existing_pdf_links]
    if not new_data:
        logger.info(f"没有新数据需要添加到JSON文件 {filename}")
        return False
    
    try:
        # 尝试读取现有数据
        existing_data = []
        if os.path.exists(filepath):
            with open(filepath, 'r', encoding='utf-8') as f:
                try:
                    existing_data = json.load(f)
                    if not isinstance(existing_data, list):
                        existing_data = []
                except json.JSONDecodeError:
                    existing_data = []
        
        # 合并数据
        combined_data = existing_data + new_data
        
        # 写回文件
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(combined_data, f, indent=2, ensure_ascii=False)
        
        logger.info(f"已更新JSON文件: {filepath}, 新增 {len(new_data)} 篇文章")
        return True
    except Exception as e:
        logger.error(f"保存JSON文件 {filename} 时出错: {str(e)}")
        return False

def save_to_csv(data, filename, existing_pdf_links):
    """保存数据到CSV文件"""
    filepath = os.path.join(OUTPUT_DIR, filename)
    
    # 过滤掉已存在的文章(基于pdf_link)
    new_data = [article for article in data if article.get('pdf_link') not in existing_pdf_links]
    if not new_data:
        logger.info(f"没有新数据需要添加到CSV文件 {filename}")
        return False
    
    try:
        # 收集所有可能的字段
        all_fields = set()
        for item in new_data:
            all_fields.update(item.keys())
        
        # 尝试读取现有数据
        existing_data = []
        existing_fields = set()
        if os.path.exists(filepath):
            with open(filepath, 'r', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                existing_fields = set(reader.fieldnames or [])
                existing_data = list(reader)
        
        # 合并所有字段
        all_fields.update(existing_fields)
        
        # 合并数据
        combined_data = existing_data + new_data
        
        # 写回文件
        with open(filepath, 'w', encoding='utf-8', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=sorted(all_fields))
            writer.writeheader()
            writer.writerows(combined_data)
        
        logger.info(f"已更新CSV文件: {filepath}, 新增 {len(new_data)} 篇文章")
        return True
    except Exception as e:
        logger.error(f"保存CSV文件 {filename} 时出错: {str(e)}")
        return False

def handle_cookie_dialog(page):
    """处理cookie弹窗"""
    try:
        if page.is_visible("text=Accept Cookies", timeout=5000):
            page.click("text=Accept Cookies")
            logger.debug("已接受Cookies")
    except Exception as e:
        logger.debug(f"处理cookie弹窗时出错: {str(e)}")

def scroll_page(page):
    """滚动页面确保加载所有内容"""
    try:
        page.evaluate("""async () => {
            await new Promise(resolve => {
                let totalHeight = 0;
                const distance = 100;
                const timer = setInterval(() => {
                    const scrollHeight = document.body.scrollHeight;
                    window.scrollBy(0, distance);
                    totalHeight += distance;
                    if(totalHeight >= scrollHeight){
                        clearInterval(timer);
                        resolve();
                    }
                }, 100);
            });
        }""")
    except Exception as e:
        logger.warning(f"页面滚动时出错: {str(e)}")

def get_page_articles(page, page_num):
    """获取单页文章数据"""
    search_url = f"{BASE_URL}/action/doSearch?type=quicksearch&text1={KEYWORD}&journalCode=cell&SeriesKey=cell&startPage={page_num}&pageSize={PAGE_SIZE}"
    
    for attempt in range(MAX_RETRIES):
        try:
            page.goto(search_url, timeout=60000)
            check_search_limit(page)  # 检查搜索限制
            handle_cookie_dialog(page)
            page.wait_for_selector("div.search__item__body", timeout=15000)
            scroll_page(page)
            break
        except SearchLimitReached:
            raise  # 重新抛出搜索限制异常
        except Exception as e:
            if attempt == MAX_RETRIES - 1:
                raise
            time.sleep(3)
    
    article_items = page.query_selector_all("div.search__item__body")
    articles = []
    
    for item in article_items:
        try:
            # 提取标题
            title_element = item.query_selector("h2.meta__title")
            if not title_element:
                continue
            title = title_element.inner_text().strip()
            
            # 提取PDF链接(作为唯一标识)
            pdf_element = item.query_selector("a.pdf-link")
            if not pdf_element:
                continue
            pdf_link = urljoin(BASE_URL, pdf_element.get_attribute("href"))
            
            # 提取其他信息
            authors = item.query_selector("div.meta__authors").inner_text().strip() if item.query_selector("div.meta__authors") else ""
            date = item.query_selector("div.meta__date").inner_text().strip() if item.query_selector("div.meta__date") else ""
            journal = item.query_selector("div.meta__journal").inner_text().strip() if item.query_selector("div.meta__journal") else ""
            
            articles.append({
                'title': title,
                'pdf_link': pdf_link,
                'authors': authors,
                'date': date,
                'journal': journal,
                'scraped_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            })
        except Exception as e:
            logger.error(f"解析文章时出错: {str(e)}")
            continue
    
    logger.info(f"从第 {page_num} 页获取到 {len(articles)} 篇有效文章")
    return articles

def scrape_page(page_num, existing_pdf_links):
    """爬取单页数据"""
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=False)
        context = browser.new_context(
            user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        )
        page = context.new_page()
        
        try:
            articles = get_page_articles(page, page_num)
            if articles:
                # 保存数据
                json_filename = "cell_articles.json"
                csv_filename = "cell_articles.csv"
                
                json_saved = save_to_json(articles, json_filename, existing_pdf_links)
                csv_saved = save_to_csv(articles, csv_filename, existing_pdf_links)
                
                # 更新已有PDF链接集合
                if json_saved or csv_saved:
                    for article in articles:
                        existing_pdf_links.add(article['pdf_link'])
                
                logger.info(f"第 {page_num} 页数据已处理")
            return True
        except SearchLimitReached:
            return False  # 返回False表示遇到搜索限制
        except Exception as e:
            logger.error(f"处理第 {page_num} 页时出错: {str(e)}")
            return False
        finally:
            browser.close()

def main():
    """主函数"""
    ensure_output_dir()
    logger.info(f"开始搜索: 关键词='{KEYWORD}', 页码范围={START_PAGE}-{END_PAGE}")
    
    # 加载已有PDF链接用于去重
    existing_pdf_links = set()
    json_filename = "cell_articles.json"
    csv_filename = "cell_articles.csv"
    
    existing_pdf_links.update(load_existing_pdf_links(json_filename))
    existing_pdf_links.update(load_existing_pdf_links(csv_filename))
    
    total_articles = 0
    
    for page_num in range(START_PAGE, END_PAGE + 1):
        logger.info(f"正在处理第 {page_num} 页...")
        
        success = scrape_page(page_num, existing_pdf_links)
        
        if not success:
            break  # 如果返回False(遇到搜索限制或其他错误)，停止爬取
        
        if success:
            # 统计新增文章数量
            current_count = len(existing_pdf_links)
            total_articles = current_count
            logger.info(f"当前累计文章数: {total_articles}")
        
        if page_num < END_PAGE:
            delay = random.uniform(DELAY_MIN, DELAY_MAX)
            logger.info(f"等待 {delay:.1f} 秒后继续...")
            time.sleep(delay)
    
    logger.info(f"爬取完成! 总共获取 {total_articles} 篇唯一文章")
    logger.info(f"数据已保存到 {OUTPUT_DIR} 目录")

if __name__ == "__main__":
    main()