#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CSV文件拆分脚本
将citing_articles.csv从第301行开始按URL类型拆分为两个文件
"""

import pandas as pd
import os

def split_csv_file():
    """拆分CSV文件"""
    # 文件路径
    input_file = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\data\citing_articles.csv"
    output_dir = r"C:\Users\byan\Desktop\crawl_by_trae\crawl_by_trae\data"
    sciencedirect_file = os.path.join(output_dir, "citing_articles_sciencedirect.csv")
    others_file = os.path.join(output_dir, "citing_articles_others.csv")
    
    print(f"正在读取文件: {input_file}")
    
    # 读取CSV文件
    df = pd.read_csv(input_file, encoding='utf-8')
    
    print(f"文件总行数: {len(df)}")
    print(f"列名: {list(df.columns)}")
    
    # 从第301行开始（索引为300，因为pandas索引从0开始）
    df_from_301 = df.iloc[300:].copy()
    
    print(f"从第301行开始的数据行数: {len(df_from_301)}")
    
    # 按URL类型分类
    sciencedirect_mask = df_from_301['URL'].str.startswith('https://www.sciencedirect.com/', na=False)
    
    df_sciencedirect = df_from_301[sciencedirect_mask].copy()
    df_others = df_from_301[~sciencedirect_mask].copy()
    
    print(f"ScienceDirect URLs: {len(df_sciencedirect)} 条")
    print(f"其他URLs: {len(df_others)} 条")
    
    # 保存文件
    print(f"保存ScienceDirect文件到: {sciencedirect_file}")
    df_sciencedirect.to_csv(sciencedirect_file, index=False, encoding='utf-8')
    
    print(f"保存其他文件到: {others_file}")
    df_others.to_csv(others_file, index=False, encoding='utf-8')
    
    # 显示统计信息
    print("\n=== 拆分完成 ===")
    print(f"原文件总行数: {len(df)}")
    print(f"处理的行数（从第301行开始）: {len(df_from_301)}")
    print(f"ScienceDirect文件行数: {len(df_sciencedirect)}")
    print(f"其他文件行数: {len(df_others)}")
    
    # 显示一些示例
    if len(df_sciencedirect) > 0:
        print(f"\nScienceDirect文件示例（前3行）:")
        print(df_sciencedirect.head(3).to_string(index=False))
    
    if len(df_others) > 0:
        print(f"\n其他文件示例（前3行）:")
        print(df_others.head(3).to_string(index=False))

if __name__ == "__main__":
    split_csv_file()