#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
合并重新爬取数据的脚本
将重新爬取成功的数据合并回原始的Excel和CSV文件中
"""

import pandas as pd
import logging
import os
import sys
import traceback
from typing import Dict, List, Tuple

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('merge_recrawled_data.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class RecrawledDataMerger:
    """重新爬取数据合并器"""
    
    def __init__(self):
        """初始化合并器"""
        # 原始文件路径
        self.excel_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\crawl_sciencedirect_results.xlsx"
        self.csv_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\crawl_1-300.csv"
        
        # 重新爬取结果文件路径
        self.recrawled_urls_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\recrawled_urls_results.csv"
        self.recrawled_authors_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\recrawled_empty_authors_results.csv"
        
        # 备份文件路径
        self.excel_backup = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\crawl_sciencedirect_results_backup.xlsx"
        self.csv_backup = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\result\crawl_1-300_backup.csv"
        
        self.merged_count = 0
        self.skipped_count = 0
        
    def create_backups(self):
        """创建原始文件的备份"""
        try:
            # 备份Excel文件
            if os.path.exists(self.excel_file):
                df_excel = pd.read_excel(self.excel_file)
                df_excel.to_excel(self.excel_backup, index=False)
                logger.info(f"Excel文件备份完成: {self.excel_backup}")
            
            # 备份CSV文件
            if os.path.exists(self.csv_file):
                df_csv = pd.read_csv(self.csv_file)
                df_csv.to_csv(self.csv_backup, index=False, encoding='utf-8')
                logger.info(f"CSV文件备份完成: {self.csv_backup}")
                
        except Exception as e:
            logger.error(f"创建备份文件失败: {e}")
            raise
    
    def merge_urls_to_excel(self) -> bool:
        """
        将重新爬取的URL数据合并到Excel文件中
        
        Returns:
            bool: 是否成功合并
        """
        try:
            # 检查重新爬取的URL结果文件是否存在
            if not os.path.exists(self.recrawled_urls_file):
                logger.warning(f"重新爬取的URL结果文件不存在: {self.recrawled_urls_file}")
                return False
            
            # 读取重新爬取的URL数据
            df_recrawled = pd.read_csv(self.recrawled_urls_file)
            if df_recrawled.empty:
                logger.warning("重新爬取的URL数据为空")
                return False
            
            logger.info(f"读取到 {len(df_recrawled)} 条重新爬取的URL数据")
            
            # 读取原始Excel文件
            df_excel = pd.read_excel(self.excel_file)
            logger.info(f"原始Excel文件包含 {len(df_excel)} 条记录")
            
            # 为重新爬取的数据添加到Excel文件末尾
            # 由于这些是新爬取的数据，直接追加到文件末尾
            df_combined = pd.concat([df_excel, df_recrawled], ignore_index=True)
            
            # 保存合并后的数据
            df_combined.to_excel(self.excel_file, index=False)
            
            added_count = len(df_recrawled)
            self.merged_count += added_count
            
            logger.info(f"成功将 {added_count} 条重新爬取的URL数据添加到Excel文件")
            logger.info(f"合并后Excel文件包含 {len(df_combined)} 条记录")
            
            return True
            
        except Exception as e:
            logger.error(f"合并URL数据到Excel文件失败: {e}")
            logger.error(traceback.format_exc())
            return False
    
    def merge_authors_to_csv(self) -> bool:
        """
        将重新爬取的作者数据合并到CSV文件中
        
        Returns:
            bool: 是否成功合并
        """
        try:
            # 检查重新爬取的作者结果文件是否存在
            if not os.path.exists(self.recrawled_authors_file):
                logger.warning(f"重新爬取的作者结果文件不存在: {self.recrawled_authors_file}")
                return False
            
            # 读取重新爬取的作者数据
            df_recrawled = pd.read_csv(self.recrawled_authors_file)
            if df_recrawled.empty:
                logger.warning("重新爬取的作者数据为空")
                return False
            
            logger.info(f"读取到 {len(df_recrawled)} 条重新爬取的作者数据")
            
            # 读取原始CSV文件
            df_csv = pd.read_csv(self.csv_file)
            logger.info(f"原始CSV文件包含 {len(df_csv)} 条记录")
            
            # 设置索引为URL，便于匹配和更新
            df_csv.set_index('url', inplace=True)
            df_recrawled.set_index('url', inplace=True)
            
            updated_count = 0
            
            # 遍历重新爬取的数据，更新原始数据
            for url, recrawled_row in df_recrawled.iterrows():
                if url in df_csv.index:
                    # 检查是否有新的作者信息
                    has_new_authors = False
                    for i in range(1, 11):  # 检查通讯作者1-10
                        name_col = f'通讯作者{i}姓名'
                        email_col = f'通讯作者{i}邮箱'
                        
                        if (recrawled_row.get(name_col, '').strip() or 
                            recrawled_row.get(email_col, '').strip()):
                            has_new_authors = True
                            break
                    
                    if has_new_authors:
                        # 更新作者信息
                        for i in range(1, 11):
                            name_col = f'通讯作者{i}姓名'
                            email_col = f'通讯作者{i}邮箱'
                            
                            df_csv.loc[url, name_col] = recrawled_row.get(name_col, '')
                            df_csv.loc[url, email_col] = recrawled_row.get(email_col, '')
                        
                        # 更新标题（如果有新的标题）
                        if recrawled_row.get('URL文章标题', '').strip():
                            df_csv.loc[url, 'URL文章标题'] = recrawled_row.get('URL文章标题', '')
                        
                        updated_count += 1
                        logger.info(f"更新记录: {url}")
                    else:
                        self.skipped_count += 1
                        logger.info(f"跳过无新作者信息的记录: {url}")
                else:
                    logger.warning(f"在原始CSV中未找到URL: {url}")
                    self.skipped_count += 1
            
            # 重置索引并保存
            df_csv.reset_index(inplace=True)
            df_csv.to_csv(self.csv_file, index=False, encoding='utf-8')
            
            self.merged_count += updated_count
            
            logger.info(f"成功更新 {updated_count} 条记录的作者信息")
            logger.info(f"更新后CSV文件包含 {len(df_csv)} 条记录")
            
            return True
            
        except Exception as e:
            logger.error(f"合并作者数据到CSV文件失败: {e}")
            logger.error(traceback.format_exc())
            return False
    
    def verify_merge_results(self):
        """验证合并结果"""
        try:
            logger.info("开始验证合并结果...")
            
            # 验证Excel文件
            if os.path.exists(self.excel_file):
                df_excel = pd.read_excel(self.excel_file)
                logger.info(f"合并后Excel文件记录数: {len(df_excel)}")
                
                # 检查是否有重复的URL
                duplicate_urls = df_excel['url'].duplicated().sum()
                if duplicate_urls > 0:
                    logger.warning(f"Excel文件中发现 {duplicate_urls} 个重复的URL")
                else:
                    logger.info("Excel文件中没有重复的URL")
            
            # 验证CSV文件
            if os.path.exists(self.csv_file):
                df_csv = pd.read_csv(self.csv_file)
                logger.info(f"合并后CSV文件记录数: {len(df_csv)}")
                
                # 统计有作者信息的记录数
                author_count = 0
                for _, row in df_csv.iterrows():
                    has_authors = False
                    for i in range(1, 11):
                        if (row.get(f'通讯作者{i}姓名', '').strip() or 
                            row.get(f'通讯作者{i}邮箱', '').strip()):
                            has_authors = True
                            break
                    if has_authors:
                        author_count += 1
                
                logger.info(f"CSV文件中有作者信息的记录数: {author_count}")
                logger.info(f"CSV文件中无作者信息的记录数: {len(df_csv) - author_count}")
            
        except Exception as e:
            logger.error(f"验证合并结果失败: {e}")
            logger.error(traceback.format_exc())
    
    def merge_all_data(self) -> bool:
        """
        合并所有重新爬取的数据
        
        Returns:
            bool: 是否成功完成所有合并操作
        """
        try:
            logger.info("开始合并重新爬取的数据...")
            
            # 创建备份
            self.create_backups()
            
            # 合并URL数据到Excel文件
            urls_success = self.merge_urls_to_excel()
            
            # 合并作者数据到CSV文件
            authors_success = self.merge_authors_to_csv()
            
            # 验证合并结果
            self.verify_merge_results()
            
            # 输出统计信息
            logger.info("=" * 50)
            logger.info("数据合并完成!")
            logger.info(f"总合并/更新记录数: {self.merged_count}")
            logger.info(f"跳过记录数: {self.skipped_count}")
            logger.info(f"URL数据合并: {'成功' if urls_success else '失败或无数据'}")
            logger.info(f"作者数据合并: {'成功' if authors_success else '失败或无数据'}")
            logger.info(f"备份文件: {self.excel_backup}, {self.csv_backup}")
            logger.info("=" * 50)
            
            return urls_success or authors_success
            
        except Exception as e:
            logger.error(f"合并数据时发生错误: {e}")
            logger.error(traceback.format_exc())
            return False


def main():
    """主函数"""
    logger.info("开始合并重新爬取的数据...")
    
    merger = RecrawledDataMerger()
    success = merger.merge_all_data()
    
    if success:
        logger.info("数据合并任务完成!")
    else:
        logger.error("数据合并任务失败!")
        sys.exit(1)


if __name__ == "__main__":
    main()