#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量爬取通讯作者信息脚本
读取citing_articles.csv文件，根据URL类型调用相应的爬虫，提取通讯作者信息并生成CSV文件
"""

import csv
import logging
import os
import sys
import time
from typing import List, Dict, Tuple
from urllib.parse import urlparse
import traceback

# 添加当前目录到Python路径，以便导入爬虫模块
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from crawler_sciencedirect import ScienceDirectCrawler
from crawler_others import OtherSitesCrawler

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('run_crawl.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class BatchCrawler:
    """批量爬取通讯作者信息的主类"""
    
    def __init__(self):
        """初始化批量爬虫"""
        self.sciencedirect_crawler = ScienceDirectCrawler()
        self.others_crawler = OtherSitesCrawler()
        self.results = []
        self.processed_count = 0
        self.success_count = 0
        self.error_count = 0
        self.csv_file_path = "../data/citing_articles.csv"
        
    def is_sciencedirect_url(self, url: str) -> bool:
        """
        判断URL是否为ScienceDirect网站
        
        Args:
            url: 待判断的URL
            
        Returns:
            bool: 如果是ScienceDirect URL返回True，否则返回False
        """
        try:
            parsed_url = urlparse(url)
            domain = parsed_url.netloc.lower()
            return 'sciencedirect.com' in domain
        except Exception as e:
            logger.error(f"解析URL时出错: {url}, 错误: {e}")
            return False
    
    def extract_article_title_from_url(self, url: str) -> str:
        """
        从URL中提取文章标题（简化版本，实际可能需要爬取页面获取真实标题）
        
        Args:
            url: 文章URL
            
        Returns:
            str: 文章标题或URL的简化版本
        """
        try:
            # 这里简化处理，实际应该爬取页面获取真实标题
            # 暂时返回URL的最后部分作为标题标识
            parsed_url = urlparse(url)
            path_parts = parsed_url.path.strip('/').split('/')
            if path_parts:
                return path_parts[-1][:50]  # 取最后部分的前50个字符
            return url[:50]
        except Exception:
            return url[:50]
    
    def crawl_single_url(self, source_title: str, url: str) -> tuple:
        """
        爬取单个URL的文章标题和通讯作者信息
        
        Args:
            source_title: 源文章标题
            url: 目标URL
            
        Returns:
            tuple: (article_title, authors_list)
                - article_title: 文章标题
                - authors_list: 通讯作者信息列表
        """
        try:
            logger.info(f"开始爬取URL: {url}")
            
            if self.is_sciencedirect_url(url):
                logger.info(f"使用ScienceDirectCrawler爬取: {url}")
                # 使用ScienceDirectCrawler的正确方法
                driver = self.sciencedirect_crawler.fetch_article_html_enhanced(url)
                if driver:
                    try:
                        # 获取页面HTML内容
                        html_content = driver.page_source
                        
                        # 保存HTML页面供调试使用
                        saved_path = self.sciencedirect_crawler.save_html_for_debug(
                            html_content, url, "sciencedirect"
                        )
                        if saved_path:
                            logger.info(f"ScienceDirect页面已保存到: {saved_path}")
                        
                        # 获取文章标题
                        article_title = driver.title or self.extract_article_title_from_url(url)
                        # 解析通讯作者信息
                        authors = self.sciencedirect_crawler.parse_article(html_content)
                        return article_title, authors or []
                    finally:
                        # 确保关闭WebDriver
                        driver.quit()
                else:
                    # 页面标题加载超时，返回NULL值
                    logger.warning(f"页面标题加载超时，跳过此条记录: {url}")
                    return "NULL", []
            else:
                logger.info(f"使用OtherSitesCrawler爬取: {url}")
                authors = self.others_crawler.extract_authors_from_url(url)
                # 使用OtherSitesCrawler的标题提取方法
                article_title = self.others_crawler.extract_title_from_url(url)
                return article_title, authors or []
                
        except Exception as e:
            logger.error(f"爬取URL时出错: {url}, 错误: {e}")
            logger.error(f"错误详情: {traceback.format_exc()}")
            return self.extract_article_title_from_url(url), []
    
    def get_total_records(self) -> int:
        """获取CSV文件中的总记录数"""
        try:
            if not os.path.exists(self.csv_file_path):
                return 0
            
            with open(self.csv_file_path, 'r', encoding='utf-8') as file:
                reader = csv.reader(file)
                next(reader)  # 跳过表头
                count = sum(1 for row in reader if len(row) >= 3 and row[0].strip() and row[2].strip())
            
            return count
            
        except Exception as e:
            logger.error(f"获取记录总数失败: {e}")
            return 0
    
    def parse_line_input(self, user_input: str) -> List[int]:
        """
        解析用户输入的行号
        
        支持的格式:
        - 单个行号: "5"
        - 多个行号: "1,3,5"
        - 范围: "1-5" 或 "1:5"
        - 混合: "1,3-5,8"
        
        Args:
            user_input: 用户输入的字符串
            
        Returns:
            行号列表（1-based）
        """
        line_numbers = []
        
        try:
            # 分割逗号分隔的部分
            parts = user_input.strip().split(',')
            
            for part in parts:
                part = part.strip()
                
                # 检查是否是范围（支持 - 和 : 两种分隔符）
                if '-' in part:
                    start, end = part.split('-', 1)
                    start, end = int(start.strip()), int(end.strip())
                    line_numbers.extend(range(start, end + 1))
                elif ':' in part:
                    start, end = part.split(':', 1)
                    start, end = int(start.strip()), int(end.strip())
                    line_numbers.extend(range(start, end + 1))
                else:
                    # 单个行号
                    line_numbers.append(int(part))
            
            # 去重并排序
            line_numbers = sorted(list(set(line_numbers)))
            
            # 验证行号有效性
            total_records = self.get_total_records()
            valid_lines = [line for line in line_numbers if 1 <= line <= total_records]
            
            if len(valid_lines) != len(line_numbers):
                invalid_lines = [line for line in line_numbers if line not in valid_lines]
                logger.warning(f"无效的行号将被忽略: {invalid_lines}")
                logger.info(f"有效行号范围: 1-{total_records}")
            
            return valid_lines
            
        except ValueError as e:
            logger.error(f"行号格式错误: {e}")
            return []
        except Exception as e:
            logger.error(f"解析行号失败: {e}")
            return []
    
    def get_records_by_lines(self, line_numbers: List[int]) -> List[Tuple[str, str, int]]:
        """
        根据行号获取记录列表
        
        Args:
            line_numbers: 行号列表（1-based）
            
        Returns:
            (源文章标题, URL, 行号) 的列表
        """
        try:
            with open(self.csv_file_path, 'r', encoding='utf-8') as file:
                reader = csv.reader(file)
                next(reader)  # 跳过表头
                
                # 读取所有有效记录
                valid_records = []
                for row in reader:
                    if len(row) >= 3 and row[0].strip() and row[2].strip():
                        valid_records.append((row[0].strip(), row[2].strip()))
                
                # 根据行号获取记录
                records = []
                for line_num in line_numbers:
                    if 1 <= line_num <= len(valid_records):
                        source_title, url = valid_records[line_num - 1]
                        records.append((source_title, url, line_num))
                    else:
                        logger.warning(f"行号 {line_num} 超出范围 (1-{len(valid_records)})")
                
                return records
                
        except Exception as e:
            logger.error(f"读取记录失败: {e}")
            return []
    
    def generate_output_filename(self, line_numbers: List[int]) -> str:
        """
        根据行号范围生成输出文件名
        
        Args:
            line_numbers: 处理的行号列表
            
        Returns:
            生成的文件名
        """
        if not line_numbers:
            return "../data/crawl_results.csv"
        
        # 排序行号列表
        sorted_lines = sorted(line_numbers)
        min_line = sorted_lines[0]
        max_line = sorted_lines[-1]
        
        # 生成文件名格式：crawl_results_1-10.csv
        if min_line == max_line:
            filename = f"../data/crawl_results_{min_line}.csv"
        else:
            filename = f"../data/crawl_results_{min_line}-{max_line}.csv"
        
        return filename

    def read_csv_file(self, csv_file_path: str) -> List[Tuple[str, str]]:
        """
        读取CSV文件，提取源文章标题和URL
        
        Args:
            csv_file_path: CSV文件路径
            
        Returns:
            List[Tuple[str, str]]: (源文章标题, URL) 的列表
        """
        data = []
        try:
            with open(csv_file_path, 'r', encoding='utf-8') as file:
                reader = csv.reader(file)
                header = next(reader)  # 跳过表头
                logger.info(f"CSV文件表头: {header}")
                
                for row in reader:
                    if len(row) >= 3:  # 确保至少有3列
                        source_title = row[0].strip()  # 第一列：源文章标题
                        url = row[2].strip()  # 第三列：URL
                        if source_title and url:
                            data.append((source_title, url))
                    
            logger.info(f"成功读取CSV文件，共{len(data)}条记录")
            return data
            
        except Exception as e:
            logger.error(f"读取CSV文件时出错: {e}")
            return []
    
    def save_results_to_csv(self, output_file: str):
        """
        将结果保存到CSV文件
        
        Args:
            output_file: 输出文件路径
        """
        try:
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_file), exist_ok=True)
            
            with open(output_file, 'w', newline='', encoding='utf-8') as file:
                writer = csv.writer(file)
                
                # 写入表头
                header = ['源文章标题', 'URL文章标题', 'url']
                # 动态添加通讯作者列（最多支持10位通讯作者）
                for i in range(1, 11):
                    header.extend([f'通讯作者{i}姓名', f'通讯作者{i}邮箱'])
                writer.writerow(header)
                
                # 写入数据
                for result in self.results:
                    row = [result['source_title'], result['article_title'], result.get('url', '')]
                    
                    # 添加通讯作者信息
                    authors = result.get('authors', [])
                    # 如果文章标题为"NULL"（页面标题加载超时），则所有通讯作者信息也设为"NULL"
                    if result['article_title'] == "NULL":
                        for i in range(10):  # 最多10位通讯作者
                            row.extend(['NULL', 'NULL'])
                    else:
                        for i in range(10):  # 最多10位通讯作者
                            if i < len(authors):
                                author = authors[i]
                                row.extend([author.get('name', ''), author.get('email', '')])
                            else:
                                row.extend(['', ''])  # 空白填充
                    
                    writer.writerow(row)
                    
            logger.info(f"结果已保存到: {output_file}")
            
        except Exception as e:
            logger.error(f"保存结果到CSV文件时出错: {e}")
            logger.error(f"错误详情: {traceback.format_exc()}")
    
    def process_records_by_lines(self, line_numbers: List[int]) -> bool:
        """
        根据行号处理指定的记录
        
        Args:
            line_numbers: 要处理的行号列表
            
        Returns:
            bool: 处理是否成功
        """
        try:
            # 获取指定行号的记录
            records = self.get_records_by_lines(line_numbers)
            if not records:
                logger.error("没有找到有效记录")
                return False
            
            # 生成输出文件名
            output_file = self.generate_output_filename(line_numbers)
            
            # 重置统计信息
            self.results = []
            self.processed_count = 0
            self.success_count = 0
            self.error_count = 0
            
            total_count = len(records)
            logger.info(f"准备处理{total_count}条记录")
            
            # 逐个处理记录
            for i, (source_title, url, line_num) in enumerate(records, 1):
                logger.info(f"处理进度: {i}/{total_count} (行号{line_num}) - {source_title[:50]}...")
                
                try:
                    # 爬取文章标题和通讯作者信息
                    article_title, authors = self.crawl_single_url(source_title, url)
                    
                    # 保存结果
                    result = {
                        'source_title': source_title,
                        'article_title': article_title,
                        'url': url,
                        'authors': authors,
                        'line_number': line_num
                    }
                    self.results.append(result)
                    
                    if authors:
                        self.success_count += 1
                        logger.info(f"成功提取到{len(authors)}位通讯作者")
                    else:
                        logger.warning(f"未找到通讯作者信息: {url}")
                    
                    self.processed_count += 1
                    
                    # 每处理5条记录保存一次（防止数据丢失）
                    if i % 5 == 0:
                        self.save_results_to_csv(output_file)
                        logger.info(f"已处理{i}条记录，中间结果已保存")
                    
                    # 添加延迟，避免请求过于频繁
                    time.sleep(3)
                    
                except Exception as e:
                    self.error_count += 1
                    logger.error(f"处理记录时出错: {source_title}, URL: {url}, 错误: {e}")
                    continue
            
            # 保存最终结果
            self.save_results_to_csv(output_file)
            
            # 输出统计信息
            logger.info("批量爬取任务完成！")
            logger.info(f"总处理数量: {self.processed_count}")
            logger.info(f"成功提取: {self.success_count}")
            logger.info(f"错误数量: {self.error_count}")
            logger.info(f"成功率: {self.success_count/self.processed_count*100:.1f}%" if self.processed_count > 0 else "成功率: 0%")
            logger.info(f"结果已保存到: {output_file}")
            
            return True
            
        except Exception as e:
            logger.error(f"处理记录时出错: {e}")
            return False

    def run_batch_crawl(self, csv_file_path: str, output_file: str, max_urls: int = None):
        """
        执行批量爬取任务
        
        Args:
            csv_file_path: 输入CSV文件路径
            output_file: 输出CSV文件路径
            max_urls: 最大处理URL数量（用于测试）
        """
        logger.info("开始批量爬取任务...")
        
        # 读取CSV文件
        data = self.read_csv_file(csv_file_path)
        if not data:
            logger.error("没有读取到有效数据，退出程序")
            return
        
        # 限制处理数量（用于测试）
        if max_urls and max_urls > 0:
            data = data[:max_urls]
            logger.info(f"测试模式：只处理前{max_urls}条记录")
        
        total_count = len(data)
        logger.info(f"准备处理{total_count}条记录")
        
        # 逐个处理URL
        for i, (source_title, url) in enumerate(data, 1):
            logger.info(f"处理进度: {i}/{total_count} - {source_title[:50]}...")
            
            try:
                # 爬取文章标题和通讯作者信息
                article_title, authors = self.crawl_single_url(source_title, url)
                
                # 保存结果
                result = {
                    'source_title': source_title,
                    'article_title': article_title,
                    'url': url,
                    'authors': authors
                }
                self.results.append(result)
                
                if authors:
                    self.success_count += 1
                    logger.info(f"成功提取到{len(authors)}位通讯作者")
                else:
                    logger.warning(f"未找到通讯作者信息: {url}")
                
                self.processed_count += 1
                
                # 每处理10条记录保存一次（防止数据丢失）
                if i % 10 == 0:
                    self.save_results_to_csv(output_file)
                    logger.info(f"已处理{i}条记录，中间结果已保存")
                
                # 添加延迟，避免请求过于频繁
                time.sleep(2)
                
            except Exception as e:
                self.error_count += 1
                logger.error(f"处理记录时出错: {source_title}, URL: {url}, 错误: {e}")
                continue
        
        # 保存最终结果
        self.save_results_to_csv(output_file)
        
        # 输出统计信息
        logger.info("批量爬取任务完成！")
        logger.info(f"总处理数量: {self.processed_count}")
        logger.info(f"成功提取: {self.success_count}")
        logger.info(f"错误数量: {self.error_count}")
        logger.info(f"成功率: {self.success_count/self.processed_count*100:.1f}%" if self.processed_count > 0 else "成功率: 0%")


def main():
    """主函数 - 支持用户交互选择处理的记录"""
    try:
        print("=" * 60)
        print("通讯作者信息爬取工具")
        print("=" * 60)
        
        # 创建批量爬虫实例
        crawler = BatchCrawler()
        
        # 检查输入文件是否存在
        if not os.path.exists(crawler.csv_file_path):
            logger.error(f"输入文件不存在: {crawler.csv_file_path}")
            print(f"错误：输入文件不存在: {crawler.csv_file_path}")
            return
        
        # 获取总记录数
        total_records = crawler.get_total_records()
        if total_records == 0:
            logger.error("CSV文件中没有有效记录")
            print("错误：CSV文件中没有有效记录")
            return
        
        print(f"CSV文件: {crawler.csv_file_path}")
        print(f"总记录数: {total_records}")
        print()
        
        # 获取用户输入
        print(f"请输入要处理的行号:")
        print(f"支持格式:")
        print(f"  - 单个行号: 5")
        print(f"  - 多个行号: 1,3,5")
        print(f"  - 范围: 1-5 或 1:5")
        print(f"  - 混合: 1,3-5,8")
        print(f"  - 输入 'q' 或 'quit' 退出")
        
        while True:
            try:
                user_input = input(f"\n请输入行号 (1-{total_records}): ").strip()
                
                if user_input.lower() in ['q', 'quit', 'exit']:
                    print("退出程序")
                    return
                
                if not user_input:
                    print("请输入有效的行号")
                    continue
                
                # 解析用户输入
                line_numbers = crawler.parse_line_input(user_input)
                
                if not line_numbers:
                    print("无效的行号格式，请重新输入")
                    continue
                
                # 显示将要处理的记录
                records = crawler.get_records_by_lines(line_numbers)
                output_filename = crawler.generate_output_filename(line_numbers)
                
                print(f"\n将要处理 {len(records)} 条记录:")
                for source_title, url, line_num in records:
                    print(f"  行号 {line_num:3d}: {source_title[:60]}{'...' if len(source_title) > 60 else ''}")
                    print(f"         URL: {url[:80]}{'...' if len(url) > 80 else ''}")
                    print()
                print(f"输出文件: {output_filename}")
                
                # 确认处理
                confirm = input(f"\n确认处理这些记录? (y/n): ").strip().lower()
                if confirm not in ['y', 'yes', '是']:
                    print("取消处理")
                    continue
                
                # 开始处理
                print(f"\n开始处理...")
                print("=" * 60)
                success = crawler.process_records_by_lines(line_numbers)
                print("=" * 60)
                
                if success:
                    print(f"\n✅ 处理完成！结果已保存到: {output_filename}")
                else:
                    print(f"\n❌ 处理失败")
                
                # 询问是否继续
                continue_choice = input(f"\n是否继续处理其他记录? (y/n): ").strip().lower()
                if continue_choice not in ['y', 'yes', '是']:
                    break
                    
            except KeyboardInterrupt:
                print(f"\n\n用户中断处理")
                break
            except Exception as e:
                logger.error(f"处理过程中出错: {e}")
                print(f"出现错误: {e}")
                continue
        
        print(f"\n程序结束")
        
    except KeyboardInterrupt:
        print(f"\n\n用户中断处理")
    except Exception as e:
        logger.error(f"程序执行出错: {e}")
        print(f"程序执行出错: {e}")


if __name__ == "__main__":
    main()