import requests
from bs4 import BeautifulSoup
import os
import urllib.parse
import time
import random
import pandas as pd
from datetime import datetime
import threading

def debug_print(message, level=1):
    """辅助调试打印函数"""
    indent = "  " * level
    print(f"{indent}{message}")

class SafeExcelWriter:
    """线程安全的Excel写入器（修复重复写入问题）"""
    def __init__(self, file_path):
        self.file_path = file_path
        self.lock = threading.Lock()
        # 初始化空的Excel文件（如果不存在）
        with self.lock:
            if not os.path.exists(self.file_path):
                pd.DataFrame(columns=[
                    'id', 'title', 'publish_date', 'doi', 'journal',
                    'authors', 'url', 'pdf_filename', 'category', 'status'
                ]).to_excel(self.file_path, index=False, engine='openpyxl')
    
    def append_data(self, data):
        """安全地追加数据到Excel（确保不重复）"""
        with self.lock:
            try:
                # 读取现有数据
                existing_df = pd.read_excel(self.file_path, engine='openpyxl')
                
                # 检查是否已存在相同ID的记录
                if not existing_df.empty and str(data['id']) in existing_df['id'].astype(str).values:
                    debug_print(f"数据已存在，跳过写入: {data['id']}", 4)
                    return False
                
                # 追加新数据
                combined_df = pd.concat([existing_df, pd.DataFrame([data])], ignore_index=True)
                
                # 写入文件
                with pd.ExcelWriter(self.file_path, engine='openpyxl') as writer:
                    combined_df.to_excel(writer, index=False)
                
                debug_print(f"成功写入数据到Excel: {data.get('id', '')}", 4)
                return True
                
            except Exception as e:
                debug_print(f"写入Excel失败: {str(e)}", 4)
                # 创建错误日志
                error_log = os.path.join(os.path.dirname(self.file_path), "error_log.txt")
                with open(error_log, 'a', encoding='utf-8') as f:
                    f.write(f"{datetime.now()} - 写入失败: {str(e)}\n")
                    f.write(f"失败数据: {str(data)}\n\n")
                return False

def download_nature_open_access_pdfs(keyword, max_pages=1, output_dir='nature_pdfs', start_id=1, start_page=1):
    """
    下载Nature网站上Open Access文章的PDF并实时写入Excel
    :param keyword: 搜索关键词
    :param max_pages: 最大爬取页数（从start_page开始计算）
    :param output_dir: PDF保存目录
    :param start_id: 起始ID（用于断点续爬）
    :param start_page: 起始页数（用于断点续爬）
    :return: (下载的PDF文件列表, 下一个起始ID, 下一个起始页数)
    """
    # 记录开始时间
    start_time = datetime.now()
    print(f"\n脚本开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 创建关键词子目录
    keyword_dir = os.path.join(output_dir, keyword)
    os.makedirs(keyword_dir, exist_ok=True)
    
    base_url = "https://www.nature.com"
    search_url = f"{base_url}/search?q={urllib.parse.quote(keyword)}&order=date_desc"
    
    headers = {
        'User-Agent': 'Academic-Research-Crawler/1.0 (contact@example.edu)',
        'From': 'contact@example.edu',
        'Accept-Language': 'en-US,en;q=0.9',
    }
    
    downloaded_files = []
    current_id = start_id  # 使用传入的起始ID
    
    try:
        # 初始化Excel写入器
        excel_path = os.path.join(keyword_dir, f'{keyword}_articles_metadata.xlsx')
        excel_writer = SafeExcelWriter(excel_path)
        
        # 创建会话对象
        session = requests.Session()
        session.headers.update(headers)
        
        debug_print(f"开始搜索关键词: {keyword}", 0)
        debug_print(f"从ID {start_id} 开始爬取", 0)
        debug_print(f"从第 {start_page} 页开始爬取", 0)
        
        # 计算实际要爬取的页数范围
        actual_start_page = start_page
        actual_end_page = start_page + max_pages - 1
        
        for page in range(actual_start_page, actual_end_page + 1):
            url = f"{search_url}&page={page}"
            debug_print(f"处理第 {page} 页: {url}", 1)
            
            try:
                # 添加随机延迟
                time.sleep(random.uniform(5, 15))
                
                response = session.get(url)
                debug_print(f"搜索页面状态码: {response.status_code}", 2)
                response.raise_for_status()
                
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 查找文章项
                items = soup.find_all('article', class_='u-full-height c-card c-card--flush')
                debug_print(f"找到 {len(items)} 篇文章", 2)
                
                for i, item in enumerate(items, 1):
                    debug_print(f"处理第 {i} 篇文章", 3)
                    
                    # 检查是否为Open Access文章
                    oa_tag = item.find('span', class_='u-color-open-access')
                    if not oa_tag:
                        debug_print("不是Open Access文章，跳过", 4)
                        continue
                    
                    # 生成8位ID（使用当前ID，不自动递增）
                    article_id = f"{current_id:08d}"
                    
                    # 获取文章基本信息
                    title_elem = item.find('h3', class_='c-card__title')
                    title = title_elem.text.strip() if title_elem else "untitled"
                    clean_title = "".join(c for c in title if c.isalnum() or c in (' ', '_')).rstrip()
                    
                    # 获取文章链接
                    link_elem = item.find('a', href=True)
                    if not link_elem:
                        debug_print("未找到文章链接，跳过", 4)
                        continue
                    
                    article_url = base_url + link_elem['href']
                    debug_print(f"文章页面URL: {article_url}", 4)
                    
                    # 获取发布时间和期刊信息
                    date_elem = item.find('time')
                    publish_date = date_elem['datetime'] if date_elem else ""
                    journal_elem = item.find('span', class_='c-meta__journal')
                    journal = journal_elem.text.strip() if journal_elem else ""
                    
                    # 准备基础元数据
                    metadata = {
                        'id': article_id,
                        'title': title,
                        'publish_date': publish_date,
                        'journal': journal,
                        'url': article_url,
                        'category': keyword,
                        'status': 'processing',
                        'timestamp': datetime.now().isoformat()
                    }
                    
                    # 访问文章页面获取更多信息
                    try:
                        time.sleep(random.uniform(8, 20))
                        article_response = session.get(article_url)
                        debug_print(f"文章页面状态码: {article_response.status_code}", 4)
                        article_response.raise_for_status()
                        
                        article_soup = BeautifulSoup(article_response.text, 'html.parser')
                        
                        # 更新元数据
                        doi_elem = article_soup.find('meta', {'name': 'citation_doi'})
                        metadata['doi'] = doi_elem['content'] if doi_elem else ""
                        
                        authors = []
                        author_elems = article_soup.find_all('meta', {'name': 'citation_author'})
                        for author in author_elems:
                            authors.append(author['content'])
                        metadata['authors'] = "; ".join(authors)
                        
                        # 查找PDF链接
                        pdf_link = None
                        pdf_link = article_soup.find('a', {'data-test': 'download-pdf'})
                        if not pdf_link:
                            pdf_link = article_soup.find('a', href=lambda x: x and 'pdf' in x.lower())
                        if not pdf_link:
                            pdf_link = article_soup.find('a', string=lambda x: x and 'download pdf' in x.lower())
                        
                        if not pdf_link or not pdf_link.get('href'):
                            debug_print("未找到PDF下载链接", 5)
                            metadata['status'] = 'no_pdf_link'
                            excel_writer.append_data(metadata)
                            current_id += 1  # 即使没有PDF也要递增ID
                            continue
                        
                        pdf_url = base_url + pdf_link['href'] if not pdf_link['href'].startswith('http') else pdf_link['href']
                        debug_print(f"PDF URL: {pdf_url}", 5)
                        
                        # 下载PDF文件
                        try:
                            time.sleep(random.uniform(5, 15))
                            pdf_response = session.get(pdf_url, stream=True)
                            debug_print(f"PDF响应状态码: {pdf_response.status_code}", 5)
                            pdf_response.raise_for_status()
                            
                            # 检查内容类型
                            content_type = pdf_response.headers.get('Content-Type', '')
                            if 'pdf' not in content_type.lower():
                                debug_print(f"响应不是PDF类型: {content_type}", 5)
                                metadata['status'] = 'invalid_content_type'
                                excel_writer.append_data(metadata)
                                current_id += 1  # 递增ID
                                continue
                            
                            # 添加ID到文件名
                            filename = f"{article_id}_{clean_title[:80]}.pdf"
                            filepath = os.path.join(keyword_dir, filename)
                            
                            # 分块写入文件
                            with open(filepath, 'wb') as f:
                                for chunk in pdf_response.iter_content(chunk_size=8192):
                                    if chunk:  # 过滤掉保持连接的无效块
                                        f.write(chunk)
                            
                            downloaded_files.append(filepath)
                            metadata['pdf_filename'] = filename
                            metadata['status'] = 'success'
                            debug_print(f"成功下载: {filepath}", 5)
                            
                        except Exception as e:
                            debug_print(f"PDF下载失败: {str(e)}", 5)
                            metadata['status'] = f'download_error: {str(e)}'
                        
                        # 写入数据并递增ID
                        excel_writer.append_data(metadata)
                        current_id += 1  # 成功处理后递增ID
                        
                    except Exception as e:
                        debug_print(f"文章页面处理失败: {str(e)}", 4)
                        metadata['status'] = f'article_page_error: {str(e)}'
                        excel_writer.append_data(metadata)
                        current_id += 1  # 即使出错也要递增ID
                        continue
                    
            except Exception as e:
                debug_print(f"页面爬取失败: {str(e)}", 1)
                continue
    
    except Exception as e:
        debug_print(f"爬虫发生严重错误: {str(e)}", 0)
    
    finally:
        # 记录结束时间并计算耗时
        end_time = datetime.now()
        total_time = end_time - start_time
        print(f"\n脚本结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
        print(f"总耗时: {total_time}")
        debug_print("爬取过程结束", 0)
        debug_print(f"共下载 {len(downloaded_files)} 个PDF文件", 0)
        debug_print(f"下一个起始ID应为: {current_id}", 0)
        return downloaded_files, current_id, actual_end_page + 1

if __name__ == "__main__":
    keyword = "scrna"
    start_id = 1  # 设置起始ID（可以从上次中断的位置继续）
    start_page = 1  # 设置起始页数（可以从上次中断的位置继续）
    max_pages = 190  # 设置最大爬取页数
    debug_print("开始执行爬虫", 0)
    downloaded, next_id, next_page = download_nature_open_access_pdfs(
        keyword=keyword,
        max_pages=max_pages,
        start_id=start_id,
        start_page=start_page
    )
    debug_print("爬取完成", 0)
    print(f"\n下载结果: {len(downloaded)} 个PDF文件")
    for file in downloaded:
        print(f"- {file}")
    print(f"\n下次可以从ID {next_id} 继续")