#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
重新爬取失败URL的脚本
读取urls_to_recrawl.txt文件，重新爬取失败的URL并保存成功的结果
"""

import csv
import logging
import os
import sys
import time
from typing import List, Dict, Tuple
from urllib.parse import urlparse
import traceback

# 添加爬虫模块路径
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'crawl'))

from crawler_sciencedirect import ScienceDirectCrawler

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('recrawl_urls.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class URLRecrawler:
    """重新爬取失败URL的爬虫"""
    
    def __init__(self):
        """初始化重新爬取器"""
        self.sciencedirect_crawler = ScienceDirectCrawler()
        self.results = []
        self.processed_count = 0
        self.success_count = 0
        self.error_count = 0
        self.urls_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\urls_to_recrawl.txt"
        self.output_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\recrawled_urls_results.csv"
        
    def extract_article_title_from_url(self, url: str) -> str:
        """
        从URL中提取文章标题（简化版本）
        
        Args:
            url: 文章URL
            
        Returns:
            str: 提取的标题
        """
        try:
            # 从URL路径中提取可能的标题信息
            parsed_url = urlparse(url)
            path_parts = parsed_url.path.split('/')
            if len(path_parts) >= 4 and path_parts[-1]:
                return path_parts[-1]
            return "Unknown Title"
        except Exception as e:
            logger.warning(f"无法从URL提取标题: {url}, 错误: {e}")
            return "Unknown Title"
    
    def crawl_single_url(self, url: str) -> tuple:
        """
        爬取单个URL
        
        Args:
            url: 要爬取的URL
            
        Returns:
            tuple: (是否成功, 结果数据)
        """
        try:
            logger.info(f"开始爬取URL: {url}")
            
            # 使用ScienceDirectCrawler的正确方法
            driver = self.sciencedirect_crawler.fetch_article_html_enhanced(url)
            if driver:
                try:
                    # 获取页面HTML内容
                    html_content = driver.page_source
                    
                    # 获取文章标题
                    article_title = driver.title or self.extract_article_title_from_url(url)
                    
                    # 解析通讯作者信息
                    authors = self.sciencedirect_crawler.parse_article(html_content)
                    
                    # 构建结果数据
                    article_data = {
                        '源文章标题': 'Recrawled Article',  # 重新爬取的文章，源标题设为固定值
                        'URL文章标题': article_title,
                        'url': url,
                        '通讯作者1姓名': '',
                        '通讯作者1邮箱': '',
                        '通讯作者2姓名': '',
                        '通讯作者2邮箱': '',
                        '通讯作者3姓名': '',
                        '通讯作者3邮箱': '',
                        '通讯作者4姓名': '',
                        '通讯作者4邮箱': '',
                        '通讯作者5姓名': '',
                        '通讯作者5邮箱': '',
                        '通讯作者6姓名': '',
                        '通讯作者6邮箱': '',
                        '通讯作者7姓名': '',
                        '通讯作者7邮箱': '',
                        '通讯作者8姓名': '',
                        '通讯作者8邮箱': '',
                        '通讯作者9姓名': '',
                        '通讯作者9邮箱': '',
                        '通讯作者10姓名': '',
                        '通讯作者10邮箱': ''
                    }
                    
                    # 填充通讯作者信息
                    if authors:
                        for i, author in enumerate(authors[:10]):  # 最多10个通讯作者
                            author_num = i + 1
                            article_data[f'通讯作者{author_num}姓名'] = author.get('name', '')
                            article_data[f'通讯作者{author_num}邮箱'] = author.get('email', '')
                    
                    logger.info(f"成功爬取URL: {url}, 标题: {article_title}, 作者数量: {len(authors) if authors else 0}")
                    return True, article_data
                    
                finally:
                    # 确保关闭WebDriver
                    driver.quit()
            else:
                logger.warning(f"页面加载失败: {url}")
                return False, None
                
        except Exception as e:
            logger.error(f"爬取URL时发生错误: {url}, 错误: {e}")
            logger.error(traceback.format_exc())
            return False, None
    
    def read_urls_file(self) -> List[str]:
        """
        读取URLs文件
        
        Returns:
            List[str]: URL列表
        """
        urls = []
        try:
            with open(self.urls_file, 'r', encoding='utf-8') as f:
                for line in f:
                    url = line.strip()
                    if url and url.startswith('http'):
                        urls.append(url)
            logger.info(f"从文件读取到 {len(urls)} 个URL")
            return urls
        except Exception as e:
            logger.error(f"读取URLs文件失败: {e}")
            return []
    
    def save_results_to_csv(self):
        """保存结果到CSV文件"""
        if not self.results:
            logger.warning("没有结果需要保存")
            return
            
        try:
            # 定义CSV字段
            fieldnames = [
                '源文章标题', 'URL文章标题', 'url',
                '通讯作者1姓名', '通讯作者1邮箱',
                '通讯作者2姓名', '通讯作者2邮箱',
                '通讯作者3姓名', '通讯作者3邮箱',
                '通讯作者4姓名', '通讯作者4邮箱',
                '通讯作者5姓名', '通讯作者5邮箱',
                '通讯作者6姓名', '通讯作者6邮箱',
                '通讯作者7姓名', '通讯作者7邮箱',
                '通讯作者8姓名', '通讯作者8邮箱',
                '通讯作者9姓名', '通讯作者9邮箱',
                '通讯作者10姓名', '通讯作者10邮箱'
            ]
            
            with open(self.output_file, 'w', newline='', encoding='utf-8') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(self.results)
                
            logger.info(f"结果已保存到: {self.output_file}")
            logger.info(f"成功保存 {len(self.results)} 条记录")
            
        except Exception as e:
            logger.error(f"保存结果失败: {e}")
            logger.error(traceback.format_exc())
    
    def process_urls(self) -> bool:
        """
        处理所有URL
        
        Returns:
            bool: 是否成功完成处理
        """
        try:
            # 读取URL列表
            urls = self.read_urls_file()
            if not urls:
                logger.error("没有找到需要处理的URL")
                return False
            
            total_urls = len(urls)
            logger.info(f"开始处理 {total_urls} 个URL")
            
            # 逐个处理URL
            for i, url in enumerate(urls, 1):
                logger.info(f"处理进度: {i}/{total_urls} - {url}")
                
                success, result_data = self.crawl_single_url(url)
                self.processed_count += 1
                
                if success and result_data:
                    self.results.append(result_data)
                    self.success_count += 1
                    logger.info(f"成功爬取第 {i} 个URL")
                else:
                    self.error_count += 1
                    logger.warning(f"第 {i} 个URL爬取失败")
                
                # 添加延迟避免过于频繁的请求
                time.sleep(2)
                
                # 每处理10个URL保存一次中间结果
                if i % 10 == 0:
                    logger.info(f"已处理 {i} 个URL，保存中间结果...")
                    self.save_results_to_csv()
            
            # 保存最终结果
            self.save_results_to_csv()
            
            # 输出统计信息
            logger.info("=" * 50)
            logger.info("重新爬取完成!")
            logger.info(f"总处理数量: {self.processed_count}")
            logger.info(f"成功数量: {self.success_count}")
            logger.info(f"失败数量: {self.error_count}")
            logger.info(f"成功率: {self.success_count/self.processed_count*100:.2f}%")
            logger.info(f"结果文件: {self.output_file}")
            logger.info("=" * 50)
            
            return True
            
        except Exception as e:
            logger.error(f"处理URL时发生错误: {e}")
            logger.error(traceback.format_exc())
            return False
        finally:
            # 确保关闭爬虫
            if hasattr(self.sciencedirect_crawler, 'close'):
                self.sciencedirect_crawler.close()


def main():
    """主函数"""
    logger.info("开始重新爬取失败的URL...")
    
    recrawler = URLRecrawler()
    success = recrawler.process_urls()
    
    if success:
        logger.info("重新爬取任务完成!")
    else:
        logger.error("重新爬取任务失败!")
        sys.exit(1)


if __name__ == "__main__":
    main()