#!/usr/bin/env python3
"""
改进的爬虫测试脚本 - 支持清理缓存
Improved scraper test script - with cache cleaning support
"""

import sys
import os
import json
import argparse
from datetime import datetime

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

def clear_cache_and_data(output_dir='./data'):
    """清理缓存和数据文件"""
    print("🧹 清理缓存和数据文件...")
    
    if not os.path.exists(output_dir):
        print(f"📁 输出目录 {output_dir} 不存在")
        return
    
    # 清理文件
    files_removed = 0
    
    for filename in os.listdir(output_dir):
        filepath = os.path.join(output_dir, filename)
        
        # 删除去重缓存文件
        if filename == '.dedup_urls.pkl':
            try:
                os.remove(filepath)
                print(f"   ✅ 删除去重缓存: {filename}")
                files_removed += 1
            except Exception as e:
                print(f"   ❌ 删除失败: {filename} - {e}")
        
        # 删除今天的数据文件
        elif filename.endswith('.json') and datetime.now().strftime('%Y%m%d') in filename:
            try:
                os.remove(filepath)
                print(f"   ✅ 删除数据文件: {filename}")
                files_removed += 1
            except Exception as e:
                print(f"   ❌ 删除失败: {filename} - {e}")
    
    print(f"🧹 清理完成，删除了 {files_removed} 个文件")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='铺先生爬虫测试工具')
    parser.add_argument('--clean', action='store_true', help='清理缓存和今日数据文件')
    parser.add_argument('--no-dedup', action='store_true', help='禁用去重功能')
    parser.add_argument('--cities', type=str, help='指定城市，用逗号分隔，如: sz,gz')
    parser.add_argument('--types', type=str, help='指定业态类型，用逗号分隔，如: catering,entertainment')
    
    args = parser.parse_args()
    
    print("🚀 铺先生爬虫测试运行")
    print("=" * 60)
    
    # 清理缓存
    if args.clean:
        clear_cache_and_data()
        print()
    
    try:
        from puxiansheng_scraper.scraper_controller import ScraperController
        from puxiansheng_scraper.config_manager import ConfigManager
        
        # 初始化配置
        print("📋 初始化配置...")
        config_file = 'config.yaml'
        
        # 如果指定了特定参数，创建临时配置
        if args.cities or args.types:
            config = ConfigManager(config_file)
            
            if args.cities:
                cities = [city.strip() for city in args.cities.split(',')]
                config.config['scraper']['cities'] = cities
                print(f"🏙️  自定义城市: {cities}")
            
            if args.types:
                types = [t.strip() for t in args.types.split(',')]
                config.config['scraper']['business_types'] = types
                print(f"🏢 自定义业态: {types}")
        
        # 初始化控制器
        print("📋 初始化爬虫控制器...")
        controller = ScraperController(config_file)
        
        # 禁用去重功能（如果指定）
        if args.no_dedup:
            print("🔄 禁用去重功能...")
            controller.data_storage.enable_dedup = False
            controller.data_storage.existing_urls.clear()
        
        # 显示配置信息
        cities = controller.config.get_cities()
        business_types = controller.config.get_business_types()
        
        print(f"🏙️  目标城市: {cities}")
        print(f"🏢 业态类型: {business_types}")
        print(f"⏱️  请求延迟: {controller.config.get_request_delay()} 秒")
        print(f"📁 输出目录: {controller.config.get('storage.output_dir')}")
        print(f"📄 输出格式: {controller.config.get('storage.format')}")
        print(f"🔄 去重功能: {controller.data_storage.enable_dedup}")
        print()
        
        # 预估处理时间
        total_combinations = len(cities) * len(business_types)
        print(f"📊 总组合数: {total_combinations}")
        print("⚠️  注意: 这将进行真实的网络请求")
        print()
        
        # 开始爬取
        print("🔄 开始爬取数据...")
        start_time = datetime.now()
        
        # 运行爬虫
        final_stats = controller.run()
        
        end_time = datetime.now()
        
        # 显示结果
        print("\n" + "=" * 60)
        print("✅ 爬取完成!")
        print("=" * 60)
        
        # 基本统计
        print(f"⏱️  总耗时: {final_stats['processing_time']:.2f} 秒")
        print(f"🎯 成功组合: {final_stats['successful_combinations']}/{final_stats['total_combinations']}")
        print(f"📄 处理页面: {final_stats['total_pages_processed']}")
        print(f"🔍 发现条目: {final_stats['total_items_found']}")
        print(f"💾 保存条目: {final_stats['total_items_saved']}")
        
        if final_stats['total_items_found'] > 0:
            dedup_rate = (final_stats['total_items_found'] - final_stats['total_items_saved']) / final_stats['total_items_found']
            print(f"🔄 去重率: {dedup_rate:.2%}")
        
        # 成功率
        if final_stats['total_combinations'] > 0:
            success_rate = final_stats['successful_combinations'] / final_stats['total_combinations']
            print(f"✅ 成功率: {success_rate:.2%}")
        
        # 错误统计
        total_errors = sum(final_stats['total_errors'].values())
        if total_errors > 0:
            print(f"⚠️  总错误数: {total_errors}")
            for error_type, count in final_stats['total_errors'].items():
                if count > 0:
                    print(f"   {error_type}: {count}")
        
        # 详细结果
        print("\n📋 详细结果:")
        for key, stats in final_stats['city_business_stats'].items():
            city, business_type = key.split('_', 1)
            status = "✅" if stats['success'] else "❌"
            print(f"  {status} {city}-{business_type}: {stats.get('items_saved', 0)} 条数据")
            
            if not stats['success'] and 'error' in stats:
                print(f"     错误: {stats['error']}")
        
        # 保存统计信息
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        stats_file = f"scraper_stats_{timestamp}.json"
        stats_for_json = final_stats.copy()
        if stats_for_json['start_time']:
            stats_for_json['start_time'] = stats_for_json['start_time'].isoformat()
        if stats_for_json['end_time']:
            stats_for_json['end_time'] = stats_for_json['end_time'].isoformat()
        
        with open(stats_file, 'w', encoding='utf-8') as f:
            json.dump(stats_for_json, f, ensure_ascii=False, indent=2)
        
        print(f"\n📊 统计信息已保存到: {stats_file}")
        
        # 检查输出文件
        output_dir = controller.config.get('storage.output_dir', './data')
        if os.path.exists(output_dir):
            files = os.listdir(output_dir)
            data_files = [f for f in files if f.endswith('.json') and not f.startswith('.')]
            if data_files:
                print(f"📁 数据文件保存在: {output_dir}")
                for file in sorted(data_files):
                    file_path = os.path.join(output_dir, file)
                    file_size = os.path.getsize(file_path)
                    print(f"   📄 {file} ({file_size} bytes)")
        
        print("\n🎉 测试完成!")
        
        # 提供使用建议
        print("\n💡 使用建议:")
        print("   --clean: 清理缓存和今日数据文件")
        print("   --no-dedup: 禁用去重功能")
        print("   --cities sz: 只爬取深圳")
        print("   --types catering,entertainment: 只爬取餐饮和娱乐")
        
        return 0
        
    except KeyboardInterrupt:
        print("\n⏹️  用户取消了爬取")
        return 1
    except Exception as e:
        print(f"\n❌ 错误: {e}")
        import traceback
        traceback.print_exc()
        return 1

if __name__ == "__main__":
    sys.exit(main())
