#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量微信公众号文章爬虫
支持从文件读取多个链接进行批量爬取
"""

import os
import time
import json
from wechat_crawler import WeChatArticleCrawler


class BatchWeChatCrawler:
    def __init__(self, use_selenium=False, delay=2):
        """
        初始化批量爬虫
        :param use_selenium: 是否使用Selenium
        :param delay: 请求间隔时间（秒）
        """
        self.crawler = WeChatArticleCrawler(use_selenium=use_selenium)
        self.delay = delay
        self.results = []
    
    def crawl_from_file(self, file_path, format_type="both", output_dir="output"):
        """
        从文件读取链接进行批量爬取
        :param file_path: 包含链接的文件路径（每行一个链接）
        :param format_type: 保存格式
        :param output_dir: 输出目录
        """
        if not os.path.exists(file_path):
            print(f"错误: 文件 {file_path} 不存在")
            return
        
        with open(file_path, 'r', encoding='utf-8') as f:
            urls = [line.strip() for line in f if line.strip()]
        
        print(f"找到 {len(urls)} 个链接，开始批量爬取...")
        
        for i, url in enumerate(urls, 1):
            print(f"\n[{i}/{len(urls)}] 处理链接: {url}")
            
            try:
                saved_files = self.crawler.crawl_and_save(url, format_type, output_dir)
                
                result = {
                    'url': url,
                    'status': 'success' if saved_files else 'failed',
                    'files': saved_files,
                    'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
                }
                
                self.results.append(result)
                
                if saved_files:
                    print(f"✓ 成功保存 {len(saved_files)} 个文件")
                else:
                    print("✗ 保存失败")
                
            except Exception as e:
                print(f"✗ 处理失败: {e}")
                self.results.append({
                    'url': url,
                    'status': 'error',
                    'error': str(e),
                    'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
                })
            
            # 延时避免被封
            if i < len(urls):
                print(f"等待 {self.delay} 秒...")
                time.sleep(self.delay)
        
        self._save_results(output_dir)
    
    def crawl_from_list(self, urls, format_type="both", output_dir="output"):
        """
        从链接列表进行批量爬取
        :param urls: 链接列表
        :param format_type: 保存格式
        :param output_dir: 输出目录
        """
        print(f"开始批量爬取 {len(urls)} 个链接...")
        
        for i, url in enumerate(urls, 1):
            print(f"\n[{i}/{len(urls)}] 处理链接: {url}")
            
            try:
                saved_files = self.crawler.crawl_and_save(url, format_type, output_dir)
                
                result = {
                    'url': url,
                    'status': 'success' if saved_files else 'failed',
                    'files': saved_files,
                    'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
                }
                
                self.results.append(result)
                
                if saved_files:
                    print(f"✓ 成功保存 {len(saved_files)} 个文件")
                else:
                    print("✗ 保存失败")
                
            except Exception as e:
                print(f"✗ 处理失败: {e}")
                self.results.append({
                    'url': url,
                    'status': 'error',
                    'error': str(e),
                    'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
                })
            
            # 延时避免被封
            if i < len(urls):
                print(f"等待 {self.delay} 秒...")
                time.sleep(self.delay)
        
        self._save_results(output_dir)
    
    def _save_results(self, output_dir):
        """保存爬取结果统计"""
        os.makedirs(output_dir, exist_ok=True)
        
        # 统计结果
        total = len(self.results)
        success = len([r for r in self.results if r['status'] == 'success'])
        failed = total - success
        
        print(f"\n批量爬取完成!")
        print(f"总计: {total} 个链接")
        print(f"成功: {success} 个")
        print(f"失败: {failed} 个")
        
        # 保存详细结果
        result_file = os.path.join(output_dir, 'crawl_results.json')
        with open(result_file, 'w', encoding='utf-8') as f:
            json.dump({
                'summary': {
                    'total': total,
                    'success': success,
                    'failed': failed,
                    'timestamp': time.strftime('%Y-%m-%d %H:%M:%S')
                },
                'details': self.results
            }, f, ensure_ascii=False, indent=2)
        
        print(f"详细结果已保存到: {result_file}")
    
    def close(self):
        """关闭资源"""
        self.crawler.close()


def main():
    """主函数"""
    print("批量微信公众号文章爬虫")
    print("=" * 50)
    
    # 选择输入方式
    print("选择输入方式:")
    print("1. 从文件读取链接")
    print("2. 手动输入链接")
    
    choice = input("请选择 (1-2): ").strip()
    
    urls = []
    
    if choice == "1":
        file_path = input("请输入包含链接的文件路径: ").strip()
        if not os.path.exists(file_path):
            print("错误: 文件不存在")
            return
        
        with open(file_path, 'r', encoding='utf-8') as f:
            urls = [line.strip() for line in f if line.strip()]
    
    elif choice == "2":
        print("请输入链接（每行一个，输入空行结束）:")
        while True:
            url = input().strip()
            if not url:
                break
            urls.append(url)
    
    else:
        print("无效选择")
        return
    
    if not urls:
        print("没有找到有效链接")
        return
    
    # 选择输出格式
    print(f"\n找到 {len(urls)} 个链接")
    print("选择输出格式:")
    print("1. Markdown")
    print("2. HTML")
    print("3. 两种格式都保存")
    
    format_choice = input("请选择 (1-3): ").strip()
    format_map = {
        "1": "markdown",
        "2": "html",
        "3": "both"
    }
    format_type = format_map.get(format_choice, "both")
    
    # 设置延时
    delay = input("请输入请求间隔时间（秒，默认2秒）: ").strip()
    try:
        delay = int(delay) if delay else 2
    except ValueError:
        delay = 2
    
    # 是否使用Selenium
    use_selenium = input("是否使用Selenium处理JavaScript渲染? (y/n): ").strip().lower() == 'y'
    
    # 创建批量爬虫实例
    batch_crawler = BatchWeChatCrawler(use_selenium=use_selenium, delay=delay)
    
    try:
        # 开始批量爬取
        batch_crawler.crawl_from_list(urls, format_type)
    
    except KeyboardInterrupt:
        print("\n用户中断操作")
    except Exception as e:
        print(f"\n发生错误: {e}")
    finally:
        batch_crawler.close()


if __name__ == "__main__":
    main()