import pandas as pd
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("check_duplicates.log"),
        logging.StreamHandler()
    ]
)

def check_duplicate_data():
    """
    检查初始表中是否存在重复数据
    """
    try:
        # 读取初始表
        file_path = "d:/Desktop/XHSCrawer/V2/results/初始表.xlsx"
        logging.info(f"正在读取初始表: {file_path}")
        df = pd.read_excel(file_path)
        
        logging.info(f"初始表总行数: {len(df)}")
        
        # 检查所有列的重复值
        duplicates_all = df.duplicated(keep=False)
        duplicate_count_all = duplicates_all.sum()
        
        if duplicate_count_all > 0:
            logging.warning(f"发现 {duplicate_count_all} 行完全重复的数据")
            # 显示重复行
            duplicate_rows_all = df[duplicates_all]
            logging.info(f"重复行详情:\n{duplicate_rows_all}")
            # 保存重复行到文件
            duplicate_rows_all.to_excel("d:/Desktop/XHSCrawer/V2/results/重复行_全部列.xlsx", index=False)
            logging.info("已保存重复行到 '重复行_全部列.xlsx'")
        else:
            logging.info("未发现完全重复的数据行")
        
        # 检查基于笔记ID的重复值（假设笔记ID是唯一标识符）
        if '笔记ID' in df.columns:
            duplicates_by_id = df.duplicated(subset=['笔记ID'], keep=False)
            duplicate_count_id = duplicates_by_id.sum()
            
            if duplicate_count_id > 0:
                logging.warning(f"发现 {duplicate_count_id} 行笔记ID重复的数据")
                # 显示重复行
                duplicate_rows_id = df[duplicates_by_id]
                logging.info(f"按笔记ID重复的行详情:\n{duplicate_rows_id}")
                # 保存重复行到文件
                duplicate_rows_id.to_excel("d:/Desktop/XHSCrawer/V2/results/重复行_笔记ID.xlsx", index=False)
                logging.info("已保存按笔记ID重复的行到 '重复行_笔记ID.xlsx'")
            else:
                logging.info("未发现笔记ID重复的数据行")
        else:
            logging.warning("未找到'笔记ID'列，跳过基于笔记ID的重复检查")
        
        # 统计去重后的行数
        unique_df = df.drop_duplicates()
        unique_count = len(unique_df)
        logging.info(f"去重后总行数: {unique_count}")
        logging.info(f"移除的重复行数: {len(df) - unique_count}")
        
        return {
            'total_rows': len(df),
            'unique_rows': unique_count,
            'duplicate_count_all': duplicate_count_all,
            'duplicate_count_id': duplicate_count_id if '笔记ID' in df.columns else None
        }
        
    except Exception as e:
        logging.error(f"检查重复数据时出错: {str(e)}")
        raise

if __name__ == "__main__":
    logging.info("开始检查初始表中的重复数据...")
    result = check_duplicate_data()
    logging.info("重复数据检查完成")
    print(f"\n检查结果摘要:")
    print(f"- 初始表总行数: {result['total_rows']}")
    print(f"- 去重后总行数: {result['unique_rows']}")
    print(f"- 完全重复的行数: {result['duplicate_count_all']}")
    if result['duplicate_count_id'] is not None:
        print(f"- 笔记ID重复的行数: {result['duplicate_count_id']}")
    print(f"\n详细日志已保存到 check_duplicates.log")
    if result['duplicate_count_all'] > 0:
        print("重复行已保存到 results 目录下的文件中")
