#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
拼多多数据抓取工具 - 快速运行脚本
"""

import sys
import os
import json
import time
from datetime import datetime
from pdd_scraper import PDDScraper
from advanced_scraper import AdvancedPDDScraper
from batch_scraper import BatchPDDScraper, load_urls_from_file

def save_data_to_file(data: dict, url: str, scraper_type: str = "basic") -> str:
    """保存数据到data目录"""
    try:
        # 创建data目录
        data_dir = "data"
        os.makedirs(data_dir, exist_ok=True)
        
        # 生成文件名：当前时间 + 商品ID
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 从URL中提取商品ID
        import re
        goods_id_match = re.search(r'goods_id=(\d+)', url)
        goods_id = goods_id_match.group(1) if goods_id_match else "unknown"
        
        filename = f"pdd_{scraper_type}_{goods_id}_{timestamp}.json"
        filepath = os.path.join(data_dir, filename)
        
        # 添加元数据
        data_with_meta = {
            "metadata": {
                "scrape_time": datetime.now().isoformat(),
                "url": url,
                "goods_id": goods_id,
                "tool_version": "1.0.0",
                "scraper_type": scraper_type
            },
            "data": data
        }
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data_with_meta, f, ensure_ascii=False, indent=2)
        
        return filepath
        
    except Exception as e:
        print(f"保存数据失败: {e}")
        return ""

def main():
    """主函数"""
    print("=" * 60)
    print("拼多多商品数据抓取工具")
    print("=" * 60)
    print("1. 单个商品抓取")
    print("2. 高级抓取器")
    print("3. 批量抓取")
    print("4. 退出")
    print("=" * 60)
    
    while True:
        try:
            choice = input("\n请选择功能 (1-4): ").strip()
            
            if choice == '1':
                run_single_scraper()
            elif choice == '2':
                run_advanced_scraper()
            elif choice == '3':
                run_batch_scraper()
            elif choice == '4':
                print("再见！")
                break
            else:
                print("❌ 无效选择，请重新输入")
                
        except KeyboardInterrupt:
            print("\n\n👋 程序已退出")
            break
        except Exception as e:
            print(f"❌ 发生错误: {e}")

def run_single_scraper():
    """运行单个商品抓取"""
    print("\n🔍 单个商品抓取")
    print("-" * 30)
    
    # 获取URL
    url = input("请输入拼多多商品URL: ").strip()
    if not url:
        print("❌ URL不能为空")
        return
    
    # 创建抓取器
    scraper = PDDScraper()
    
    print(f"\n🚀 开始抓取: {url}")
    result = scraper.scrape_product(url)
    
    if result:
        print("\n✅ 抓取成功!")
        print(f"商品标题: {result.get('title', '无标题')}")
        print(f"价格: ¥{result.get('price', '无价格')}")
        print(f"销量: {result.get('sales', '无销量')}")
        print(f"描述: {result.get('description', '无描述')[:100]}...")
        
        # 保存数据
        save_choice = input("\n是否保存数据到文件? (y/n): ").strip().lower()
        if save_choice == 'y':
            filepath = save_data_to_file(result, url, "basic")
            if filepath:
                print(f"✅ 数据已保存到: {filepath}")
    else:
        print("❌ 抓取失败")

def run_advanced_scraper():
    """运行高级抓取器"""
    print("\n🚀 高级抓取器")
    print("-" * 30)
    
    # 获取URL
    url = input("请输入拼多多商品URL: ").strip()
    if not url:
        print("❌ URL不能为空")
        return
    
    # 创建高级抓取器
    scraper = AdvancedPDDScraper()
    
    print(f"\n🚀 开始高级抓取: {url}")
    result = scraper.scrape_product_advanced(url)
    
    if result:
        print("\n✅ 抓取成功!")
        print(f"商品标题: {result.get('title', '无标题')}")
        print(f"价格: ¥{result.get('price', '无价格')}")
        print(f"原价: ¥{result.get('original_price', '无原价')}")
        print(f"销量: {result.get('sales', '无销量')}")
        print(f"店铺: {result.get('shop_name', '无店铺信息')}")
        print(f"评分: {result.get('rating', '无评分')}")
        
        # 保存数据
        save_choice = input("\n是否保存数据到文件? (y/n): ").strip().lower()
        if save_choice == 'y':
            filepath = save_data_to_file(result, url, "advanced")
            if filepath:
                print(f"✅ 数据已保存到: {filepath}")
    else:
        print("❌ 抓取失败")

def run_batch_scraper():
    """运行批量抓取器"""
    print("\n📦 批量抓取器")
    print("-" * 30)
    
    print("选择URL来源:")
    print("1. 手动输入")
    print("2. 从文件加载")
    
    source_choice = input("请选择 (1-2): ").strip()
    
    urls = []
    
    if source_choice == '1':
        print("\n请输入URL (输入空行结束):")
        while True:
            url = input("URL: ").strip()
            if not url:
                break
            urls.append(url)
    elif source_choice == '2':
        filename = input("请输入文件名 (默认: urls.txt): ").strip()
        if not filename:
            filename = 'urls.txt'
        
        if os.path.exists(filename):
            urls = load_urls_from_file(filename)
        else:
            print(f"❌ 文件不存在: {filename}")
            return
    else:
        print("❌ 无效选择")
        return
    
    if not urls:
        print("❌ 没有找到要抓取的URL")
        return
    
    # 设置并发数
    try:
        max_workers = int(input(f"设置并发数 (默认: 2): ").strip() or "2")
    except ValueError:
        max_workers = 2
    
    # 创建批量抓取器
    batch_scraper = BatchPDDScraper(use_advanced=True, max_workers=max_workers)
    
    print(f"\n🚀 开始批量抓取 {len(urls)} 个商品...")
    results = batch_scraper.scrape_urls(urls)
    
    # 生成报告
    report = batch_scraper.generate_report(results)
    
    print("\n📊 抓取报告:")
    print(f"总数量: {report['总数量']}")
    print(f"成功数量: {report['成功数量']}")
    print(f"失败数量: {report['失败数量']}")
    print(f"成功率: {report['成功率']}")
    
    # 保存数据
    print("\n💾 保存数据...")
    json_file = batch_scraper.save_to_json(results)
    csv_file = batch_scraper.save_to_csv(results)
    
    print(f"\n✅ 数据已保存:")
    print(f"  JSON: {json_file}")
    print(f"  CSV:  {csv_file}")
    
    # 显示成功抓取的数据
    success_data = [r for r in results if r['status'] == 'success']
    if success_data:
        print(f"\n📋 成功抓取的商品:")
        for i, item in enumerate(success_data[:5], 1):
            print(f"  {i}. {item.get('title', '无标题')} - ¥{item.get('price', '无价格')}")
        
        if len(success_data) > 5:
            print(f"  ... 还有 {len(success_data) - 5} 个商品")

if __name__ == "__main__":
    main() 