#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
合并爬取结果CSV文件脚本
按顺序合并多个crawl_results文件
"""

import csv
import os
import logging
from typing import List

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('merge_crawl_results.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def merge_csv_files():
    """合并多个CSV文件"""
    
    # 定义要合并的文件列表（按顺序）
    input_files = [
        "data/crawl_results_1-50.csv",
        "data/crawl_results_51-260.csv", 
        "data/crawl_results_261-280.csv",
        "data/crawl_results_281-300.csv"
    ]
    
    # 输出文件
    output_file = "data/crawl_1-300.csv"
    
    # 统计信息
    total_rows = 0
    files_processed = 0
    
    try:
        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_file), exist_ok=True)
        
        # 检查输入文件是否存在
        existing_files = []
        for file_path in input_files:
            if os.path.exists(file_path):
                existing_files.append(file_path)
                logger.info(f"找到文件: {file_path}")
            else:
                logger.warning(f"文件不存在: {file_path}")
        
        if not existing_files:
            logger.error("没有找到任何要合并的文件")
            return False
        
        # 开始合并
        logger.info(f"开始合并 {len(existing_files)} 个文件...")
        
        with open(output_file, 'w', newline='', encoding='utf-8') as outfile:
            writer = None
            header_written = False
            
            for i, file_path in enumerate(existing_files):
                logger.info(f"处理文件 {i+1}/{len(existing_files)}: {file_path}")
                
                try:
                    # 尝试不同的编码方式读取文件
                    encodings = ['utf-8', 'gbk', 'utf-8-sig']
                    file_data = None
                    used_encoding = None
                    
                    for encoding in encodings:
                        try:
                            with open(file_path, 'r', encoding=encoding) as infile:
                                file_data = list(csv.reader(infile))
                                used_encoding = encoding
                                break
                        except UnicodeDecodeError:
                            continue
                    
                    if file_data is None:
                        logger.error(f"无法读取文件 {file_path}，尝试了所有编码方式")
                        continue
                    
                    logger.info(f"使用编码 {used_encoding} 成功读取文件 {file_path}")
                    
                    if not file_data:
                        logger.warning(f"文件 {file_path} 为空")
                        continue
                    
                    # 处理表头
                    if not header_written:
                        # 写入第一个文件的表头（如果是乱码则使用标准表头）
                        header = file_data[0]
                        # 检查是否是乱码表头
                        if any('Դ' in str(cell) or 'ͨѶ' in str(cell) for cell in header):
                            # 使用标准表头
                            standard_header = ['源文章标题', 'URL文章标题', 'url']
                            for i in range(1, 11):
                                standard_header.extend([f'通讯作者{i}姓名', f'通讯作者{i}邮箱'])
                            writer = csv.writer(outfile)
                            writer.writerow(standard_header)
                            logger.info("使用标准表头（原表头存在编码问题）")
                        else:
                            writer = csv.writer(outfile)
                            writer.writerow(header)
                            logger.info(f"使用文件表头: {header[:3]}...")
                        
                        header_written = True
                        data_rows = file_data[1:]  # 跳过表头
                    else:
                        # 后续文件跳过表头
                        data_rows = file_data[1:]
                    
                    # 写入数据行
                    rows_in_file = 0
                    for row in data_rows:
                        if row and any(cell.strip() for cell in row):  # 跳过空行
                            writer.writerow(row)
                            rows_in_file += 1
                    
                    total_rows += rows_in_file
                    files_processed += 1
                    
                    logger.info(f"文件 {file_path} 处理完成，添加了 {rows_in_file} 行数据")
                    
                except Exception as e:
                    logger.error(f"处理文件 {file_path} 时出错: {e}")
                    continue
        
        # 输出统计信息
        logger.info("=" * 50)
        logger.info("合并完成！")
        logger.info(f"处理文件数: {files_processed}/{len(existing_files)}")
        logger.info(f"总数据行数: {total_rows}")
        logger.info(f"输出文件: {output_file}")
        
        # 验证输出文件
        if os.path.exists(output_file):
            with open(output_file, 'r', encoding='utf-8') as f:
                reader = csv.reader(f)
                output_rows = sum(1 for row in reader) - 1  # 减去表头
                logger.info(f"输出文件验证: {output_rows} 行数据")
                
                if output_rows == total_rows:
                    logger.info("✅ 数据行数验证通过")
                else:
                    logger.warning(f"⚠️ 数据行数不匹配: 预期 {total_rows}, 实际 {output_rows}")
        
        return True
        
    except Exception as e:
        logger.error(f"合并过程中出错: {e}")
        return False


def show_file_info():
    """显示要合并的文件信息"""
    files = [
        "data/crawl_results_1-50.csv",
        "data/crawl_results_51-260.csv", 
        "data/crawl_results_261-280.csv",
        "data/crawl_results_281-300.csv"
    ]
    
    print("=" * 60)
    print("CSV文件合并工具")
    print("=" * 60)
    print("要合并的文件列表:")
    
    total_size = 0
    existing_count = 0
    
    for i, file_path in enumerate(files, 1):
        if os.path.exists(file_path):
            file_size = os.path.getsize(file_path)
            total_size += file_size
            existing_count += 1
            
            # 获取行数
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    row_count = sum(1 for line in f) - 1  # 减去表头
            except:
                try:
                    with open(file_path, 'r', encoding='gbk') as f:
                        row_count = sum(1 for line in f) - 1
                except:
                    row_count = "未知"
            
            print(f"  {i}. {file_path}")
            print(f"     大小: {file_size:,} 字节")
            print(f"     行数: {row_count}")
            print(f"     状态: ✅ 存在")
        else:
            print(f"  {i}. {file_path}")
            print(f"     状态: ❌ 不存在")
        print()
    
    print(f"现有文件数: {existing_count}/{len(files)}")
    print(f"总大小: {total_size:,} 字节")
    print(f"输出文件: data/crawl_results_merged.csv")
    print("=" * 60)


def main():
    """主函数"""
    try:
        # 显示文件信息
        show_file_info()
        
        # 询问是否继续
        confirm = input("\n是否开始合并? (y/n): ").strip().lower()
        if confirm not in ['y', 'yes', '是']:
            print("取消合并")
            return
        
        # 开始合并
        print("\n开始合并文件...")
        success = merge_csv_files()
        
        if success:
            print("\n✅ 文件合并成功！")
            print("合并后的文件: data/crawl_results_merged.csv")
        else:
            print("\n❌ 文件合并失败，请查看日志了解详情")
            
    except KeyboardInterrupt:
        print("\n\n用户中断操作")
    except Exception as e:
        logger.error(f"程序执行出错: {e}")
        print(f"程序执行出错: {e}")


if __name__ == "__main__":
    main()