#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
对比分析成功和失败的 Wiley URL
"""

import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'crawl'))

from crawl.crawler_others import OtherSitesCrawler
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('compare_wiley_urls.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)

def test_wiley_url_comparison():
    """对比测试两个 Wiley URL"""
    
    # 成功的 URL（来自 crawl_results_52-58.csv 第8行）
    success_url = "https://onlinelibrary.wiley.com/doi/abs/10.1002/smtd.202400880"
    
    # 失败的 URL（测试中的 URL）
    failed_url = "https://advanced.onlinelibrary.wiley.com/doi/abs/10.1002/adfm.202517996"
    
    crawler = OtherSitesCrawler()
    
    print("=" * 80)
    print("对比分析 Wiley URL 访问情况")
    print("=" * 80)
    
    # 测试成功的 URL
    print(f"\n=== 测试成功的 URL ===")
    print(f"URL: {success_url}")
    try:
        result_success = crawler.extract_authors_from_url(success_url)
        print(f"✓ 成功提取到 {len(result_success)} 位作者")
        for i, author in enumerate(result_success, 1):
            print(f"  {i}. {author['name']} <{author['email']}>")
    except Exception as e:
        print(f"✗ 提取失败: {e}")
    
    # 测试失败的 URL
    print(f"\n=== 测试失败的 URL ===")
    print(f"URL: {failed_url}")
    try:
        result_failed = crawler.extract_authors_from_url(failed_url)
        print(f"✓ 成功提取到 {len(result_failed)} 位作者")
        for i, author in enumerate(result_failed, 1):
            print(f"  {i}. {author['name']} <{author['email']}>")
    except Exception as e:
        print(f"✗ 提取失败: {e}")
    
    print("\n" + "=" * 80)
    print("分析完成")
    print("=" * 80)

if __name__ == "__main__":
    test_wiley_url_comparison()