#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
拼多多商品数据自动获取工具
支持绕过登录限制，获取商品详细信息
"""

import requests
import json
import time
import random
import re
import os
from datetime import datetime
from urllib.parse import urlparse, parse_qs
from fake_useragent import UserAgent
import cloudscraper
from bs4 import BeautifulSoup
import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import asyncio
import aiohttp
from typing import Dict, Any, Optional, List
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class PDDScraper:
    """拼多多数据抓取器"""

    def __init__(self):
        self.ua = UserAgent()
        self.session = requests.Session()
        self.scraper = cloudscraper.create_scraper()
        self.base_url = "https://mobile.yangkeduo.com"

        # 创建data目录
        self.data_dir = "data"
        os.makedirs(self.data_dir, exist_ok=True)

        # 设置请求头
        self.headers = {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0',
            'Referer': 'https://mobile.yangkeduo.com/',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1'
        }

        self.session.headers.update(self.headers)
        self.scraper.headers.update(self.headers)

    def save_data_to_file(self, data: Dict[str, Any], url: str) -> str:
        """保存数据到文件"""
        try:
            # 生成文件名：当前时间 + 商品ID
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            goods_id = self.extract_goods_id(url) or "unknown"
            filename = f"pdd_data_{goods_id}_{timestamp}.json"
            filepath = os.path.join(self.data_dir, filename)

            # 添加元数据
            data_with_meta = {
                "metadata": {
                    "scrape_time": datetime.now().isoformat(),
                    "url": url,
                    "goods_id": goods_id,
                    "tool_version": "1.0.0"
                },
                "data": data
            }

            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data_with_meta, f, ensure_ascii=False, indent=2)

            logger.info(f"数据已保存到: {filepath}")
            return filepath

        except Exception as e:
            logger.error(f"保存数据失败: {e}")
            return ""

    def extract_goods_id(self, url: str) -> Optional[str]:
        """从URL中提取商品ID"""
        try:
            parsed = urlparse(url)
            query_params = parse_qs(parsed.query)
            goods_id = query_params.get('goods_id', [None])[0]
            return goods_id
        except Exception as e:
            logger.error(f"提取商品ID失败: {e}")
            return None

    def get_api_url(self, goods_id: str) -> str:
        """构建API请求URL"""
        return f"https://mobile.yangkeduo.com/proxy/api/search/goods/{goods_id}/detail"

    def method1_direct_request(self, url: str) -> Optional[Dict[str, Any]]:
        """方法1: 直接请求页面"""
        try:
            logger.info("尝试方法1: 直接请求页面")

            # 随机延迟
            time.sleep(random.uniform(1, 3))

            response = self.session.get(url, timeout=10)
            response.raise_for_status()

            # 解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找商品信息
            data = self._extract_from_html(soup)
            if data:
                logger.info("方法1成功获取数据")
                return data

        except Exception as e:
            logger.error(f"方法1失败: {e}")

        return None

    def method2_cloudscraper(self, url: str) -> Optional[Dict[str, Any]]:
        """方法2: 使用cloudscraper绕过反爬"""
        try:
            logger.info("尝试方法2: 使用cloudscraper")

            time.sleep(random.uniform(2, 4))

            response = self.scraper.get(url, timeout=15)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')
            data = self._extract_from_html(soup)

            if data:
                logger.info("方法2成功获取数据")
                return data

        except Exception as e:
            logger.error(f"方法2失败: {e}")

        return None

    def method3_selenium_undetected(self, url: str) -> Optional[Dict[str, Any]]:
        """方法3: 使用undetected-chromedriver"""
        driver = None
        try:
            logger.info("尝试方法3: 使用undetected-chromedriver")

            options = Options()
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-blink-features=AutomationControlled')
            options.add_experimental_option("excludeSwitches", ["enable-automation"])
            options.add_experimental_option('useAutomationExtension', False)
            options.add_argument(f'--user-agent={self.ua.random}')

            driver = uc.Chrome(options=options)
            driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")

            driver.get(url)

            # 等待页面加载
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )

            # 滚动页面
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)

            page_source = driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')

            data = self._extract_from_html(soup)

            if data:
                logger.info("方法3成功获取数据")
                return data

        except Exception as e:
            logger.error(f"方法3失败: {e}")
        finally:
            if driver:
                driver.quit()

        return None

    def method4_api_request(self, goods_id: str) -> Optional[Dict[str, Any]]:
        """方法4: 直接请求API接口"""
        try:
            logger.info("尝试方法4: 直接请求API接口")

            api_url = self.get_api_url(goods_id)

            # 构建API请求头
            api_headers = {
                'User-Agent': self.ua.random,
                'Accept': 'application/json, text/plain, */*',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Referer': f'https://mobile.yangkeduo.com/goods.html?goods_id={goods_id}',
                'Origin': 'https://mobile.yangkeduo.com',
                'X-Requested-With': 'XMLHttpRequest',
                'Content-Type': 'application/json;charset=UTF-8'
            }

            time.sleep(random.uniform(1, 2))

            response = self.session.get(api_url, headers=api_headers, timeout=10)
            response.raise_for_status()

            data = response.json()

            if data and 'goods' in data:
                logger.info("方法4成功获取数据")
                return self._parse_api_response(data)

        except Exception as e:
            logger.error(f"方法4失败: {e}")

        return None

    def _extract_from_html(self, soup: BeautifulSoup) -> Optional[Dict[str, Any]]:
        """从HTML中提取商品信息"""
        try:
            data = {}

            # 查找商品标题
            title_elem = soup.find('h1') or soup.find('title')
            if title_elem:
                data['title'] = title_elem.get_text().strip()

            # 查找价格信息
            price_patterns = [
                r'¥(\d+\.?\d*)',
                r'(\d+\.?\d*)元',
                r'价格[：:]\s*(\d+\.?\d*)'
            ]

            for pattern in price_patterns:
                matches = re.findall(pattern, soup.get_text())
                if matches:
                    data['price'] = matches[0]
                    break

            # 查找商品图片
            img_elements = soup.find_all('img')
            images = []
            for img in img_elements:
                src = img.get('src') or img.get('data-src')
                if src and ('goods' in src.lower() or 'product' in src.lower()):
                    if not src.startswith('http'):
                        src = 'https:' + src if src.startswith('//') else self.base_url + src
                    images.append(src)

            if images:
                data['images'] = images[:5]  # 限制图片数量

            # 查找商品描述
            desc_elements = soup.find_all(['p', 'div'], class_=re.compile(r'desc|detail|intro'))
            if desc_elements:
                data['description'] = desc_elements[0].get_text().strip()

            # 查找销量信息
            sales_patterns = [
                r'销量[：:]\s*(\d+)',
                r'已售[：:]\s*(\d+)',
                r'(\d+)\s*人付款'
            ]

            for pattern in sales_patterns:
                matches = re.findall(pattern, soup.get_text())
                if matches:
                    data['sales'] = matches[0]
                    break

            return data if data else None

        except Exception as e:
            logger.error(f"HTML解析失败: {e}")
            return None

    def _parse_api_response(self, api_data: Dict[str, Any]) -> Dict[str, Any]:
        """解析API响应数据"""
        try:
            goods = api_data.get('goods', {})

            return {
                'title': goods.get('goods_name', ''),
                'price': goods.get('min_group_price', ''),
                'original_price': goods.get('min_normal_price', ''),
                'sales': goods.get('sales_tip', ''),
                'images': [img.get('url') for img in goods.get('gallery_urls', [])],
                'description': goods.get('goods_desc', ''),
                'shop_name': goods.get('mall_name', ''),
                'category': goods.get('cat_id_1', ''),
                'brand': goods.get('brand_name', ''),
                'rating': goods.get('goods_rate', ''),
                'comments_count': goods.get('review_count', ''),
                'stock': goods.get('quantity', ''),
                'shipping': goods.get('shipping_fee', ''),
                'guarantee': goods.get('guarantee_info', ''),
                'raw_data': goods
            }

        except Exception as e:
            logger.error(f"API数据解析失败: {e}")
            return {}

    def scrape_product(self, url: str) -> Optional[Dict[str, Any]]:
        """主要抓取方法，依次尝试多种方法"""
        logger.info(f"开始抓取商品数据: {url}")

        goods_id = self.extract_goods_id(url)
        if not goods_id:
            logger.error("无法提取商品ID")
            return None

        # 方法列表
        methods = [
            lambda: self.method1_direct_request(url),
            lambda: self.method2_cloudscraper(url),
            lambda: self.method3_selenium_undetected(url),
            lambda: self.method4_api_request(goods_id)
        ]

        # 依次尝试各种方法
        for i, method in enumerate(methods, 1):
            try:
                logger.info(f"尝试方法 {i}")
                result = method()
                if result:
                    logger.info(f"方法 {i} 成功获取数据")
                    return result
            except Exception as e:
                logger.error(f"方法 {i} 执行异常: {e}")
                continue

        logger.error("所有方法都失败了")
        return None

def main():
    """主函数"""
    scraper = PDDScraper()

    # 测试URL
    url = "https://mobile.yangkeduo.com/goods.html?goods_id=429422443186"

    print("=" * 50)
    print("拼多多商品数据抓取工具")
    print("=" * 50)

    result = scraper.scrape_product(url)

    if result:
        print("\n✅ 成功获取商品数据:")
        print(json.dumps(result, ensure_ascii=False, indent=2))

        # 保存到data目录
        filepath = scraper.save_data_to_file(result, url)
        if filepath:
            print(f"\n📁 数据已保存到: {filepath}")
    else:
        print("\n❌ 获取数据失败")

if __name__ == "__main__":
    main()