#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量拼多多数据抓取器
支持多个URL的批量处理和数据导出
"""

import json
import csv
import time
import random
import os
from datetime import datetime
from typing import List, Dict, Any, Optional
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
from pdd_scraper import PDDScraper
from advanced_scraper import AdvancedPDDScraper
import pandas as pd


# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class BatchPDDScraper:
    """批量拼多多数据抓取器"""
    
    def __init__(self, use_advanced: bool = True, max_workers: int = 3):
        self.use_advanced = use_advanced
        self.max_workers = max_workers
        
        if use_advanced:
            self.scraper = AdvancedPDDScraper()
        else:
            self.scraper = PDDScraper()
        
        # 创建输出目录
        self.output_dir = "output"
        self.data_dir = "data"
        os.makedirs(self.output_dir, exist_ok=True)
        os.makedirs(self.data_dir, exist_ok=True)
    
    def scrape_single_url(self, url: str) -> Dict[str, Any]:
        """抓取单个URL的数据"""
        try:
            logger.info(f"开始抓取: {url}")
            
            if self.use_advanced:
                result = self.scraper.scrape_product_advanced(url)
            else:
                result = self.scraper.scrape_product(url)
            
            if result:
                result['url'] = url
                result['scrape_time'] = datetime.now().isoformat()
                result['status'] = 'success'
                logger.info(f"成功抓取: {url}")
                return result
            else:
                logger.error(f"抓取失败: {url}")
                return {
                    'url': url,
                    'scrape_time': datetime.now().isoformat(),
                    'status': 'failed',
                    'error': '无法获取数据'
                }
                
        except Exception as e:
            logger.error(f"抓取异常: {url} - {e}")
            return {
                'url': url,
                'scrape_time': datetime.now().isoformat(),
                'status': 'error',
                'error': str(e)
            }
    
    def scrape_urls(self, urls: List[str]) -> List[Dict[str, Any]]:
        """批量抓取多个URL"""
        logger.info(f"开始批量抓取 {len(urls)} 个URL")
        
        results = []
        
        # 使用线程池并行处理
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交所有任务
            future_to_url = {executor.submit(self.scrape_single_url, url): url for url in urls}
            
            # 收集结果
            for future in as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    result = future.result()
                    results.append(result)
                    
                    # 添加随机延迟避免被封
                    time.sleep(random.uniform(1, 3))
                    
                except Exception as e:
                    logger.error(f"处理URL异常: {url} - {e}")
                    results.append({
                        'url': url,
                        'scrape_time': datetime.now().isoformat(),
                        'status': 'error',
                        'error': str(e)
                    })
        
        logger.info(f"批量抓取完成，成功: {len([r for r in results if r['status'] == 'success'])}")
        return results
    
    def save_to_json(self, data: List[Dict[str, Any]], filename: str = None):
        """保存数据到JSON文件"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"pdd_batch_{timestamp}.json"
        
        filepath = os.path.join(self.data_dir, filename)
        
        # 添加元数据
        data_with_meta = {
            "metadata": {
                "scrape_time": datetime.now().isoformat(),
                "total_count": len(data),
                "success_count": len([d for d in data if d['status'] == 'success']),
                "failed_count": len([d for d in data if d['status'] == 'failed']),
                "error_count": len([d for d in data if d['status'] == 'error']),
                "tool_version": "1.0.0",
                "scraper_type": "advanced" if self.use_advanced else "basic"
            },
            "data": data
        }
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data_with_meta, f, ensure_ascii=False, indent=2)
        
        logger.info(f"数据已保存到: {filepath}")
        return filepath
    
    def save_to_csv(self, data: List[Dict[str, Any]], filename: str = None):
        """保存数据到CSV文件"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"pdd_batch_{timestamp}.csv"
        
        filepath = os.path.join(self.output_dir, filename)
        
        # 获取所有字段
        all_fields = set()
        for item in data:
            all_fields.update(item.keys())
        
        # 确保关键字段在前面
        field_order = ['url', 'status', 'scrape_time', 'title', 'price', 'sales']
        remaining_fields = [f for f in all_fields if f not in field_order]
        field_order.extend(remaining_fields)
        
        with open(filepath, 'w', newline='', encoding='utf-8-sig') as f:
            writer = csv.DictWriter(f, fieldnames=field_order)
            writer.writeheader()
            
            for item in data:
                # 确保所有字段都有值
                row = {field: item.get(field, '') for field in field_order}
                writer.writerow(row)
        
        logger.info(f"数据已保存到: {filepath}")
        return filepath
    
    def save_to_excel(self, data: List[Dict[str, Any]], filename: str = None):
        """保存数据到Excel文件"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"pdd_batch_{timestamp}.xlsx"
        
        filepath = os.path.join(self.output_dir, filename)
        
        # 转换为DataFrame
        df = pd.DataFrame(data)
        
        # 保存到Excel
        with pd.ExcelWriter(filepath, engine='openpyxl') as writer:
            df.to_excel(writer, sheet_name='商品数据', index=False)
            
            # 添加统计信息
            stats_data = {
                '统计项': ['总数量', '成功数量', '失败数量', '错误数量'],
                '数量': [
                    len(data),
                    len([d for d in data if d['status'] == 'success']),
                    len([d for d in data if d['status'] == 'failed']),
                    len([d for d in data if d['status'] == 'error'])
                ]
            }
            stats_df = pd.DataFrame(stats_data)
            stats_df.to_excel(writer, sheet_name='统计信息', index=False)
        
        logger.info(f"数据已保存到: {filepath}")
        return filepath
    
    def generate_report(self, data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """生成抓取报告"""
        total_count = len(data)
        success_count = len([d for d in data if d['status'] == 'success'])
        failed_count = len([d for d in data if d['status'] == 'failed'])
        error_count = len([d for d in data if d['status'] == 'error'])
        
        # 统计价格信息
        prices = []
        for item in data:
            if item['status'] == 'success' and 'price' in item:
                try:
                    price = float(str(item['price']).replace('¥', '').replace('￥', ''))
                    prices.append(price)
                except:
                    pass
        
        report = {
            '抓取时间': datetime.now().isoformat(),
            '总数量': total_count,
            '成功数量': success_count,
            '失败数量': failed_count,
            '错误数量': error_count,
            '成功率': f"{success_count/total_count*100:.2f}%" if total_count > 0 else "0%",
            '价格统计': {
                '平均价格': f"{sum(prices)/len(prices):.2f}" if prices else "无数据",
                '最高价格': f"{max(prices):.2f}" if prices else "无数据",
                '最低价格': f"{min(prices):.2f}" if prices else "无数据"
            },
            '失败URL列表': [item['url'] for item in data if item['status'] != 'success']
        }
        
        return report

def load_urls_from_file(filepath: str) -> List[str]:
    """从文件加载URL列表"""
    urls = []
    
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#'):
                    urls.append(line)
        
        logger.info(f"从文件加载了 {len(urls)} 个URL")
        return urls
        
    except Exception as e:
        logger.error(f"加载URL文件失败: {e}")
        return []

def main():
    """主函数"""
    print("=" * 60)
    print("批量拼多多商品数据抓取工具")
    print("=" * 60)
    
    # 示例URL列表
    urls = [
        "https://mobile.yangkeduo.com/goods.html?goods_id=429422443186",
        # 可以添加更多URL
    ]
    
    # 或者从文件加载URL
    # urls = load_urls_from_file('urls.txt')
    
    if not urls:
        print("❌ 没有找到要抓取的URL")
        return
    
    # 创建批量抓取器
    batch_scraper = BatchPDDScraper(use_advanced=True, max_workers=2)
    
    # 开始批量抓取
    print(f"\n🚀 开始批量抓取 {len(urls)} 个商品...")
    results = batch_scraper.scrape_urls(urls)
    
    # 生成报告
    report = batch_scraper.generate_report(results)
    
    print("\n📊 抓取报告:")
    print(json.dumps(report, ensure_ascii=False, indent=2))
    
    # 保存数据
    print("\n💾 保存数据...")
    
    # 保存为JSON到data目录
    json_file = batch_scraper.save_to_json(results)
    
    # 保存为CSV到output目录
    csv_file = batch_scraper.save_to_csv(results)
    
    # 保存为Excel到output目录
    excel_file = batch_scraper.save_to_excel(results)
    
    print(f"\n✅ 数据已保存:")
    print(f"  JSON: {json_file}")
    print(f"  CSV:  {csv_file}")
    print(f"  Excel: {excel_file}")
    
    # 显示成功抓取的数据
    success_data = [r for r in results if r['status'] == 'success']
    if success_data:
        print(f"\n📋 成功抓取的商品:")
        for i, item in enumerate(success_data[:5], 1):  # 只显示前5个
            print(f"  {i}. {item.get('title', '无标题')} - ¥{item.get('price', '无价格')}")
        
        if len(success_data) > 5:
            print(f"  ... 还有 {len(success_data) - 5} 个商品")

if __name__ == "__main__":
    main() 