#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
UAE Contacts Crawler - 主入口文件

这个文件提供了运行爬虫工作流的主要接口。
支持单个HTML文件处理和批量处理模式。

使用方法:
    python main.py <html_index> [url] [max_files]
    python main.py batch <html_indices> [max_files_per_batch]
    python main.py all [max_files_per_batch]

示例:
    python main.py 1
    python main.py 1 https://example.com 3
    python main.py batch 1,2,3,4,5 2
    python main.py all 3
"""

import sys
import os
import json
from pathlib import Path
from typing import List, Optional
from datetime import datetime

# 添加src目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

from workflow import run_crawler
from config import MODEL_NAME, SILLICON_API_URL


def print_banner():
    """打印程序横幅"""
    print("="*60)
    print("    UAE Contacts Crawler - LangGraph工作流")
    print("    基于AI的阿联酋企业联系信息提取系统")
    print("="*60)
    print(f"AI模型: {MODEL_NAME}")
    print(f"API地址: {SILLICON_API_URL}")
    print("="*60)


def print_usage():
    """打印使用说明"""
    print("\n使用方法:")
    print("  单个文件处理:")
    print("    python main.py <html_index>")
    print("\n  批量处理:")
    print("    python main.py <html_indices>")
    print("\n  处理所有HTML文件:")
    print("    python main.py all")
    print("\n参数说明:")
    print("  html_index        - HTML文件索引（如1表示处理1.html）")
    print("  html_indices     - 逗号分隔的HTML索引列表（如1,2,3,4,5）")
    print("\n示例:")
    print("  python main.py 1")
    print("  python main.py 1,2,3,4,5")
    print("  python main.py all")
    print("\n注意: 如果结果文件已存在，将自动跳过处理")


def check_html_cache_dir():
    """检查HTML缓存目录"""
    cache_dir = Path("data/html_cache")
    if not cache_dir.exists():
        print(f"\n⚠️  警告: HTML缓存目录不存在: {cache_dir}")
        print("请确保已经爬取了HTML文件到该目录")
        return False
    
    html_files = list(cache_dir.glob("*.html"))
    if not html_files:
        print(f"\n⚠️  警告: HTML缓存目录为空: {cache_dir}")
        print("请先爬取HTML文件")
        return False
    
    print(f"\n✅ 发现 {len(html_files)} 个HTML文件")
    return True


def check_result_exists(html_index: int) -> bool:
    """检查指定索引的结果文件是否已存在"""
    result_file = Path(f"data/results/single_result_{html_index}.json")
    return result_file.exists()


def get_all_html_indices() -> List[int]:
    """获取html_cache目录中所有HTML文件的索引"""
    html_cache_dir = Path("data/html_cache")
    if not html_cache_dir.exists():
        return []
    
    indices = []
    for html_file in html_cache_dir.glob("*.html"):
        try:
            # 提取文件名中的数字索引（如1.html -> 1）
            index = int(html_file.stem)
            indices.append(index)
        except ValueError:
            # 跳过非数字命名的文件
            continue
    
    return sorted(indices)


def save_result_to_file(result: dict, filename: str):
    """保存结果到文件"""
    try:
        output_dir = Path("data/results")
        output_dir.mkdir(parents=True, exist_ok=True)
        
        output_file = output_dir / filename
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)
        
        print(f"\n💾 结果已保存到: {output_file}")
    except Exception as e:
        print(f"\n❌ 保存结果失败: {e}")


def main():
    """主函数"""
    print_banner()
    
    # 检查参数
    if len(sys.argv) < 2:
        print_usage()
        sys.exit(1)
    
    # 检查HTML缓存目录
    if not check_html_cache_dir():
        sys.exit(1)
    
    try:
        if sys.argv[1].lower() == "all":
            # 处理所有HTML文件模式
            print(f"\n🚀 开始处理所有HTML文件模式")
            
            # 获取所有HTML文件索引
            html_indices = get_all_html_indices()
            if not html_indices:
                print("❌ 未找到任何HTML文件")
                print("请确保data/html_cache目录中存在HTML文件")
                sys.exit(1)
            
            print(f"发现 {len(html_indices)} 个HTML文件: {html_indices}")
            
            # 过滤掉已存在结果的文件
            pending_indices = []
            skipped_indices = []
            for index in html_indices:
                if check_result_exists(index):
                    skipped_indices.append(index)
                    print(f"⏭️  跳过索引 {index}（结果文件已存在）")
                else:
                    pending_indices.append(index)
            
            if not pending_indices:
                print("✅ 所有文件都已处理完成")
                return
            
            print(f"待处理索引: {pending_indices}")
            if skipped_indices:
                print(f"跳过索引: {skipped_indices}")
            
            # 逐个处理文件
            total_processed = 0
            total_emails = 0
            total_phones = 0
            total_social_media = 0
            
            for index in pending_indices:
                print(f"\n📄 处理索引 {index}...")
                result = run_crawler(index, None, 1)
                
                if result and "error" not in result:
                    save_result_to_file(result, f"single_result_{index}.json")
                    total_processed += result.get('processed_files', 0)
                    total_emails += result.get('total_emails', 0)
                    total_phones += result.get('total_phones', 0)
                    total_social_media += result.get('social_media_platforms', 0)
                    print(f"✅ 索引 {index} 处理完成")
                else:
                    print(f"❌ 索引 {index} 处理失败: {result.get('error', '未知错误') if result else '未知错误'}")
            
            # 打印总摘要
            print(f"\n📊 全量处理摘要:")
            print(f"  总处理文件数: {total_processed}")
            print(f"  总邮箱数: {total_emails}")
            print(f"  总电话数: {total_phones}")
            print(f"  社交媒体平台数: {total_social_media}")
            print(f"  跳过文件数: {len(skipped_indices)}")
        
        elif ',' in sys.argv[1]:
            # 批量处理模式（逗号分隔的索引列表）
            indices_str = sys.argv[1]
            html_indices = [int(x.strip()) for x in indices_str.split(',')]
            
            print(f"\n🚀 开始批量处理模式")
            print(f"HTML索引列表: {html_indices}")
            
            # 过滤掉已存在结果的文件
            pending_indices = []
            skipped_indices = []
            for index in html_indices:
                if check_result_exists(index):
                    skipped_indices.append(index)
                    print(f"⏭️  跳过索引 {index}（结果文件已存在）")
                else:
                    pending_indices.append(index)
            
            if not pending_indices:
                print("✅ 所有指定文件都已处理完成")
                return
            
            print(f"待处理索引: {pending_indices}")
            if skipped_indices:
                print(f"跳过索引: {skipped_indices}")
            
            # 逐个处理文件
            total_processed = 0
            total_emails = 0
            total_phones = 0
            total_social_media = 0
            
            for index in pending_indices:
                print(f"\n📄 处理索引 {index}...")
                result = run_crawler(index, None, 1)
                
                if result and "error" not in result:
                    save_result_to_file(result, f"single_result_{index}.json")
                    total_processed += result.get('processed_files', 0)
                    total_emails += result.get('total_emails', 0)
                    total_phones += result.get('total_phones', 0)
                    total_social_media += result.get('social_media_platforms', 0)
                    print(f"✅ 索引 {index} 处理完成")
                else:
                    print(f"❌ 索引 {index} 处理失败: {result.get('error', '未知错误') if result else '未知错误'}")
            
            # 打印总摘要
            print(f"\n📊 批量处理摘要:")
            print(f"  总处理文件数: {total_processed}")
            print(f"  总邮箱数: {total_emails}")
            print(f"  总电话数: {total_phones}")
            print(f"  社交媒体平台数: {total_social_media}")
            print(f"  跳过文件数: {len(skipped_indices)}")
        
        else:
            # 单个文件处理模式（强制覆盖）
            html_index = int(sys.argv[1])
            
            print(f"\n🚀 开始单文件处理模式（强制覆盖）")
            print(f"HTML索引: {html_index}")
            
            # 检查结果文件是否已存在（仅提示，不跳过）
            if check_result_exists(html_index):
                print(f"⚠️  结果文件已存在，将被覆盖")
            
            # 运行单个爬虫
            result = run_crawler(html_index, None, 1)
            
            # 显示实际使用的URL（从数据库获取）
            actual_url = result.get('url') if result and 'url' in result else None
            print(f"使用URL: {actual_url or '未找到'}")
            
            # 保存结果
            save_result_to_file(result, f"single_result_{html_index}.json")
            
            # 处理失败时的错误提示
            if result and "error" in result:
                print(f"\n❌ 处理失败: {result.get('error', '未知错误')}")
            elif not result:
                print(f"\n❌ 处理失败: 未知错误")
    
    except ValueError as e:
        print(f"❌ 参数错误: {e}")
        print_usage()
        sys.exit(1)
    except KeyboardInterrupt:
        print("\n\n⏹️  用户中断处理")
        sys.exit(0)
    except Exception as e:
        print(f"\n❌ 程序执行失败: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)
    
    print("\n✅ 程序执行完成")


if __name__ == "__main__":
    main()