#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
拼多多数据抓取工具 - 快速启动脚本
直接测试您提供的URL
"""

import json
import time
import os
from datetime import datetime
from advanced_scraper import AdvancedPDDScraper

def save_data_to_file(data: dict, url: str) -> str:
    """保存数据到data目录"""
    try:
        # 创建data目录
        data_dir = "data"
        os.makedirs(data_dir, exist_ok=True)
        
        # 生成文件名：当前时间 + 商品ID
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 从URL中提取商品ID
        import re
        goods_id_match = re.search(r'goods_id=(\d+)', url)
        goods_id = goods_id_match.group(1) if goods_id_match else "unknown"
        
        filename = f"pdd_quick_{goods_id}_{timestamp}.json"
        filepath = os.path.join(data_dir, filename)
        
        # 添加元数据
        data_with_meta = {
            "metadata": {
                "scrape_time": datetime.now().isoformat(),
                "url": url,
                "goods_id": goods_id,
                "tool_version": "1.0.0",
                "scraper_type": "quick_start"
            },
            "data": data
        }
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data_with_meta, f, ensure_ascii=False, indent=2)
        
        return filepath
        
    except Exception as e:
        print(f"保存数据失败: {e}")
        return ""

def main():
    """快速启动主函数"""
    print("=" * 60)
    print("🚀 拼多多数据抓取工具 - 快速启动")
    print("=" * 60)
    
    # 您提供的URL
    url = "https://mobile.yangkeduo.com/goods.html?goods_id=429422443186"
    
    print(f"目标URL: {url}")
    print("\n开始抓取数据...")
    print("-" * 60)
    
    # 创建高级抓取器
    scraper = AdvancedPDDScraper()
    
    # 开始抓取
    start_time = time.time()
    result = scraper.scrape_product_advanced(url)
    end_time = time.time()
    
    print(f"\n抓取耗时: {end_time - start_time:.2f} 秒")
    print("-" * 60)
    
    if result:
        print("✅ 抓取成功！")
        print("\n📋 商品信息:")
        print(f"  标题: {result.get('title', '无标题')}")
        print(f"  价格: ¥{result.get('price', '无价格')}")
        print(f"  原价: ¥{result.get('original_price', '无原价')}")
        print(f"  销量: {result.get('sales', '无销量')}")
        print(f"  店铺: {result.get('shop_name', '无店铺信息')}")
        print(f"  评分: {result.get('rating', '无评分')}")
        print(f"  评论数: {result.get('comments_count', '无评论数')}")
        print(f"  品牌: {result.get('brand', '无品牌信息')}")
        print(f"  库存: {result.get('stock', '无库存信息')}")
        print(f"  运费: {result.get('shipping', '无运费信息')}")
        
        if result.get('images'):
            print(f"  图片数量: {len(result['images'])}")
            for i, img in enumerate(result['images'][:3], 1):
                print(f"    图片{i}: {img}")
            if len(result['images']) > 3:
                print(f"    ... 还有 {len(result['images']) - 3} 张图片")
        
        if result.get('description'):
            desc = result['description']
            if len(desc) > 100:
                desc = desc[:100] + "..."
            print(f"  描述: {desc}")
        
        # 保存数据到data目录
        print("\n💾 保存数据...")
        filepath = save_data_to_file(result, url)
        if filepath:
            print(f"✅ 数据已保存到: {filepath}")
        
        # 显示原始数据（可选）
        show_raw = input("\n是否显示完整原始数据? (y/n): ").strip().lower()
        if show_raw == 'y':
            print("\n📄 完整数据:")
            print(json.dumps(result, ensure_ascii=False, indent=2))
    
    else:
        print("❌ 抓取失败")
        print("\n可能的原因:")
        print("1. 网络连接问题")
        print("2. 目标网站反爬虫机制")
        print("3. 商品已下架或不存在")
        print("4. 需要登录访问")
        print("\n建议:")
        print("- 检查网络连接")
        print("- 尝试使用代理")
        print("- 稍后重试")
        print("- 检查URL是否正确")

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n\n👋 程序已退出")
    except Exception as e:
        print(f"\n❌ 程序执行出错: {e}")
        print("请检查网络连接和依赖安装") 