#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高级拼多多数据抓取器
包含更多绕过技术和异步处理
"""

import asyncio
import aiohttp
import json
import time
import random
import re
import base64
import hashlib
import os
from datetime import datetime
from urllib.parse import urlparse, parse_qs, urlencode
from fake_useragent import UserAgent
import cloudscraper
from bs4 import BeautifulSoup
import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from typing import Dict, Any, Optional, List
import logging
import threading
from concurrent.futures import ThreadPoolExecutor
import requests

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class AdvancedPDDScraper:
    """高级拼多多数据抓取器"""
    
    def __init__(self):
        self.ua = UserAgent()
        self.session = requests.Session()
        self.scraper = cloudscraper.create_scraper()
        self.base_url = "https://mobile.yangkeduo.com"
        self.proxy_list = []
        self.current_proxy_index = 0
        
        # 创建data目录
        self.data_dir = "data"
        os.makedirs(self.data_dir, exist_ok=True)
        
        # 初始化请求头
        self._init_headers()
        
        # 初始化代理池
        self._init_proxy_pool()
    
    def save_data_to_file(self, data: Dict[str, Any], url: str) -> str:
        """保存数据到文件"""
        try:
            # 生成文件名：当前时间 + 商品ID
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            goods_id = self.extract_goods_id(url) or "unknown"
            filename = f"pdd_advanced_{goods_id}_{timestamp}.json"
            filepath = os.path.join(self.data_dir, filename)
            
            # 添加元数据
            data_with_meta = {
                "metadata": {
                    "scrape_time": datetime.now().isoformat(),
                    "url": url,
                    "goods_id": goods_id,
                    "tool_version": "2.0.0",
                    "scraper_type": "advanced"
                },
                "data": data
            }
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data_with_meta, f, ensure_ascii=False, indent=2)
            
            logger.info(f"数据已保存到: {filepath}")
            return filepath
            
        except Exception as e:
            logger.error(f"保存数据失败: {e}")
            return ""
    
    def _init_headers(self):
        """初始化请求头"""
        self.headers = {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0',
            'Referer': 'https://mobile.yangkeduo.com/',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'DNT': '1',
            'Pragma': 'no-cache'
        }
        
        self.session.headers.update(self.headers)
        self.scraper.headers.update(self.headers)
    
    def _init_proxy_pool(self):
        """初始化代理池"""
        # 这里可以添加免费代理或付费代理
        # 示例代理格式
        self.proxy_list = [
            # 'http://proxy1:port',
            # 'http://proxy2:port',
        ]
    
    def _get_next_proxy(self):
        """获取下一个代理"""
        if not self.proxy_list:
            return None
        
        proxy = self.proxy_list[self.current_proxy_index]
        self.current_proxy_index = (self.current_proxy_index + 1) % len(self.proxy_list)
        return proxy
    
    def _generate_signature(self, data: str) -> str:
        """生成签名"""
        # 简单的签名算法，实际使用时可能需要更复杂的算法
        return hashlib.md5(data.encode()).hexdigest()
    
    def _add_random_delay(self, min_delay: float = 1.0, max_delay: float = 3.0):
        """添加随机延迟"""
        delay = random.uniform(min_delay, max_delay)
        time.sleep(delay)
    
    def extract_goods_id(self, url: str) -> Optional[str]:
        """从URL中提取商品ID"""
        try:
            parsed = urlparse(url)
            query_params = parse_qs(parsed.query)
            goods_id = query_params.get('goods_id', [None])[0]
            return goods_id
        except Exception as e:
            logger.error(f"提取商品ID失败: {e}")
            return None
    
    async def method5_async_request(self, url: str) -> Optional[Dict[str, Any]]:
        """方法5: 异步请求"""
        try:
            logger.info("尝试方法5: 异步请求")
            
            timeout = aiohttp.ClientTimeout(total=15)
            connector = aiohttp.TCPConnector(limit=10, limit_per_host=5)
            
            async with aiohttp.ClientSession(
                timeout=timeout,
                connector=connector,
                headers=self.headers
            ) as session:
                async with session.get(url) as response:
                    if response.status == 200:
                        html = await response.text()
                        soup = BeautifulSoup(html, 'html.parser')
                        data = self._extract_from_html(soup)
                        
                        if data:
                            logger.info("方法5成功获取数据")
                            return data
                            
        except Exception as e:
            logger.error(f"方法5失败: {e}")
        
        return None
    
    def method6_playwright(self, url: str) -> Optional[Dict[str, Any]]:
        """方法6: 使用Playwright"""
        try:
            logger.info("尝试方法6: 使用Playwright")
            
            import playwright
            from playwright.sync_api import sync_playwright
            
            with sync_playwright() as p:
                browser = p.chromium.launch(
                    headless=True,
                    args=[
                        '--no-sandbox',
                        '--disable-setuid-sandbox',
                        '--disable-dev-shm-usage',
                        '--disable-accelerated-2d-canvas',
                        '--no-first-run',
                        '--no-zygote',
                        '--disable-gpu'
                    ]
                )
                
                context = browser.new_context(
                    user_agent=self.ua.random,
                    viewport={'width': 1920, 'height': 1080}
                )
                
                page = context.new_page()
                
                # 设置额外的请求头
                page.set_extra_http_headers({
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Cache-Control': 'no-cache',
                    'Pragma': 'no-cache'
                })
                
                # 导航到页面
                page.goto(url, wait_until='networkidle')
                
                # 等待页面加载
                page.wait_for_timeout(3000)
                
                # 滚动页面
                page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                page.wait_for_timeout(2000)
                
                # 获取页面内容
                content = page.content()
                soup = BeautifulSoup(content, 'html.parser')
                
                data = self._extract_from_html(soup)
                
                browser.close()
                
                if data:
                    logger.info("方法6成功获取数据")
                    return data
                    
        except Exception as e:
            logger.error(f"方法6失败: {e}")
        
        return None
    
    def method7_mobile_api(self, goods_id: str) -> Optional[Dict[str, Any]]:
        """方法7: 模拟移动端API请求"""
        try:
            logger.info("尝试方法7: 模拟移动端API请求")
            
            # 移动端API URL
            api_url = f"https://mobile.yangkeduo.com/proxy/api/search/goods/{goods_id}/detail"
            
            # 移动端请求头
            mobile_headers = {
                'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1',
                'Accept': 'application/json, text/plain, */*',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br',
                'Referer': f'https://mobile.yangkeduo.com/goods.html?goods_id={goods_id}',
                'Origin': 'https://mobile.yangkeduo.com',
                'X-Requested-With': 'XMLHttpRequest',
                'Content-Type': 'application/json;charset=UTF-8',
                'Connection': 'keep-alive',
                'Sec-Fetch-Dest': 'empty',
                'Sec-Fetch-Mode': 'cors',
                'Sec-Fetch-Site': 'same-origin'
            }
            
            self._add_random_delay(1, 2)
            
            response = self.session.get(api_url, headers=mobile_headers, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            
            if data and 'goods' in data:
                logger.info("方法7成功获取数据")
                return self._parse_api_response(data)
                
        except Exception as e:
            logger.error(f"方法7失败: {e}")
        
        return None
    
    def method8_webview_simulation(self, url: str) -> Optional[Dict[str, Any]]:
        """方法8: 模拟WebView请求"""
        try:
            logger.info("尝试方法8: 模拟WebView请求")
            
            # WebView请求头
            webview_headers = {
                'User-Agent': 'Mozilla/5.0 (Linux; Android 10; SM-G975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.120 Mobile Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br',
                'Cache-Control': 'no-cache',
                'Pragma': 'no-cache',
                'X-Requested-With': 'com.pinduoduo.buyer',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'none',
                'Sec-Fetch-User': '?1',
                'Upgrade-Insecure-Requests': '1'
            }
            
            self._add_random_delay(2, 4)
            
            response = self.session.get(url, headers=webview_headers, timeout=15)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            data = self._extract_from_html(soup)
            
            if data:
                logger.info("方法8成功获取数据")
                return data
                
        except Exception as e:
            logger.error(f"方法8失败: {e}")
        
        return None
    
    def _extract_from_html(self, soup: BeautifulSoup) -> Optional[Dict[str, Any]]:
        """从HTML中提取商品信息（增强版）"""
        try:
            data = {}
            
            # 查找商品标题（多种选择器）
            title_selectors = [
                'h1',
                'title',
                '[class*="title"]',
                '[class*="name"]',
                '[data-testid*="title"]'
            ]
            
            for selector in title_selectors:
                title_elem = soup.select_one(selector)
                if title_elem:
                    title = title_elem.get_text().strip()
                    if title and len(title) > 5:
                        data['title'] = title
                        break
            
            # 查找价格信息（增强版）
            price_patterns = [
                r'¥(\d+\.?\d*)',
                r'(\d+\.?\d*)元',
                r'价格[：:]\s*(\d+\.?\d*)',
                r'￥(\d+\.?\d*)',
                r'(\d+\.?\d*)\s*元',
                r'(\d+\.?\d*)\s*￥'
            ]
            
            page_text = soup.get_text()
            for pattern in price_patterns:
                matches = re.findall(pattern, page_text)
                if matches:
                    # 取第一个匹配的价格
                    data['price'] = matches[0]
                    break
            
            # 查找商品图片（增强版）
            img_elements = soup.find_all('img')
            images = []
            for img in img_elements:
                src = img.get('src') or img.get('data-src') or img.get('data-original')
                if src:
                    if not src.startswith('http'):
                        src = 'https:' + src if src.startswith('//') else self.base_url + src
                    
                    # 过滤掉小图标和广告图片
                    if ('goods' in src.lower() or 
                        'product' in src.lower() or 
                        'item' in src.lower()) and len(src) > 50:
                        images.append(src)
            
            if images:
                data['images'] = list(set(images))[:10]  # 去重并限制数量
            
            # 查找商品描述
            desc_selectors = [
                '[class*="desc"]',
                '[class*="detail"]',
                '[class*="intro"]',
                '[class*="info"]',
                'p'
            ]
            
            for selector in desc_selectors:
                desc_elem = soup.select_one(selector)
                if desc_elem:
                    desc = desc_elem.get_text().strip()
                    if desc and len(desc) > 10:
                        data['description'] = desc
                        break
            
            # 查找销量信息（增强版）
            sales_patterns = [
                r'销量[：:]\s*(\d+)',
                r'已售[：:]\s*(\d+)',
                r'(\d+)\s*人付款',
                r'(\d+)\s*人购买',
                r'月销[：:]\s*(\d+)',
                r'(\d+)\s*件已售'
            ]
            
            for pattern in sales_patterns:
                matches = re.findall(pattern, page_text)
                if matches:
                    data['sales'] = matches[0]
                    break
            
            # 查找店铺信息
            shop_patterns = [
                r'店铺[：:]\s*([^\n\r]+)',
                r'商家[：:]\s*([^\n\r]+)',
                r'品牌[：:]\s*([^\n\r]+)'
            ]
            
            for pattern in shop_patterns:
                matches = re.findall(pattern, page_text)
                if matches:
                    data['shop'] = matches[0].strip()
                    break
            
            # 查找评分信息
            rating_patterns = [
                r'评分[：:]\s*(\d+\.?\d*)',
                r'(\d+\.?\d*)\s*分',
                r'(\d+\.?\d*)\s*星'
            ]
            
            for pattern in rating_patterns:
                matches = re.findall(pattern, page_text)
                if matches:
                    data['rating'] = matches[0]
                    break
            
            return data if data else None
            
        except Exception as e:
            logger.error(f"HTML解析失败: {e}")
            return None
    
    def _parse_api_response(self, api_data: Dict[str, Any]) -> Dict[str, Any]:
        """解析API响应数据（增强版）"""
        try:
            goods = api_data.get('goods', {})
            
            return {
                'title': goods.get('goods_name', ''),
                'price': goods.get('min_group_price', ''),
                'original_price': goods.get('min_normal_price', ''),
                'sales': goods.get('sales_tip', ''),
                'images': [img.get('url') for img in goods.get('gallery_urls', [])],
                'description': goods.get('goods_desc', ''),
                'shop_name': goods.get('mall_name', ''),
                'category': goods.get('cat_id_1', ''),
                'brand': goods.get('brand_name', ''),
                'rating': goods.get('goods_rate', ''),
                'comments_count': goods.get('review_count', ''),
                'stock': goods.get('quantity', ''),
                'shipping': goods.get('shipping_fee', ''),
                'guarantee': goods.get('guarantee_info', ''),
                'raw_data': goods
            }
            
        except Exception as e:
            logger.error(f"API数据解析失败: {e}")
            return {}
    
    def scrape_product_advanced(self, url: str) -> Optional[Dict[str, Any]]:
        """高级抓取方法，使用多种技术"""
        logger.info(f"开始高级抓取商品数据: {url}")
        
        goods_id = self.extract_goods_id(url)
        if not goods_id:
            logger.error("无法提取商品ID")
            return None
        
        # 方法列表
        methods = [
            lambda: self.method1_direct_request(url),
            lambda: self.method2_cloudscraper(url),
            lambda: self.method3_selenium_undetected(url),
            lambda: self.method4_api_request(goods_id),
            lambda: asyncio.run(self.method5_async_request(url)),
            lambda: self.method6_playwright(url),
            lambda: self.method7_mobile_api(goods_id),
            lambda: self.method8_webview_simulation(url)
        ]
        
        # 使用线程池并行执行方法
        with ThreadPoolExecutor(max_workers=4) as executor:
            futures = []
            for i, method in enumerate(methods, 1):
                future = executor.submit(self._safe_execute_method, method, i)
                futures.append(future)
            
            # 等待第一个成功的结果
            for future in futures:
                try:
                    result = future.result(timeout=30)
                    if result:
                        return result
                except Exception as e:
                    logger.error(f"方法执行异常: {e}")
                    continue
        
        logger.error("所有方法都失败了")
        return None
    
    def _safe_execute_method(self, method, method_num):
        """安全执行方法"""
        try:
            logger.info(f"尝试方法 {method_num}")
            result = method()
            if result:
                logger.info(f"方法 {method_num} 成功获取数据")
                return result
        except Exception as e:
            logger.error(f"方法 {method_num} 执行异常: {e}")
        return None
    
    # 继承基础方法
    def method1_direct_request(self, url: str) -> Optional[Dict[str, Any]]:
        """方法1: 直接请求页面"""
        try:
            logger.info("尝试方法1: 直接请求页面")
            
            self._add_random_delay(1, 3)
            
            response = self.session.get(url, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            data = self._extract_from_html(soup)
            
            if data:
                logger.info("方法1成功获取数据")
                return data
                
        except Exception as e:
            logger.error(f"方法1失败: {e}")
        
        return None
    
    def method2_cloudscraper(self, url: str) -> Optional[Dict[str, Any]]:
        """方法2: 使用cloudscraper绕过反爬"""
        try:
            logger.info("尝试方法2: 使用cloudscraper")
            
            self._add_random_delay(2, 4)
            
            response = self.scraper.get(url, timeout=15)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            data = self._extract_from_html(soup)
            
            if data:
                logger.info("方法2成功获取数据")
                return data
                
        except Exception as e:
            logger.error(f"方法2失败: {e}")
        
        return None
    
    def method3_selenium_undetected(self, url: str) -> Optional[Dict[str, Any]]:
        """方法3: 使用undetected-chromedriver"""
        driver = None
        try:
            logger.info("尝试方法3: 使用undetected-chromedriver")
            
            options = Options()
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-blink-features=AutomationControlled')
            options.add_experimental_option("excludeSwitches", ["enable-automation"])
            options.add_experimental_option('useAutomationExtension', False)
            options.add_argument(f'--user-agent={self.ua.random}')
            
            driver = uc.Chrome(options=options)
            driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            
            driver.get(url)
            
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)
            
            page_source = driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')
            
            data = self._extract_from_html(soup)
            
            if data:
                logger.info("方法3成功获取数据")
                return data
                
        except Exception as e:
            logger.error(f"方法3失败: {e}")
        finally:
            if driver:
                driver.quit()
        
        return None
    
    def method4_api_request(self, goods_id: str) -> Optional[Dict[str, Any]]:
        """方法4: 直接请求API接口"""
        try:
            logger.info("尝试方法4: 直接请求API接口")
            
            api_url = f"https://mobile.yangkeduo.com/proxy/api/search/goods/{goods_id}/detail"
            
            api_headers = {
                'User-Agent': self.ua.random,
                'Accept': 'application/json, text/plain, */*',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Referer': f'https://mobile.yangkeduo.com/goods.html?goods_id={goods_id}',
                'Origin': 'https://mobile.yangkeduo.com',
                'X-Requested-With': 'XMLHttpRequest',
                'Content-Type': 'application/json;charset=UTF-8'
            }
            
            self._add_random_delay(1, 2)
            
            response = self.session.get(api_url, headers=api_headers, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            
            if data and 'goods' in data:
                logger.info("方法4成功获取数据")
                return self._parse_api_response(data)
                
        except Exception as e:
            logger.error(f"方法4失败: {e}")
        
        return None

def main():
    """主函数"""
    scraper = AdvancedPDDScraper()
    
    # 测试URL
    url = "https://mobile.yangkeduo.com/goods.html?goods_id=429422443186"
    
    print("=" * 60)
    print("高级拼多多商品数据抓取工具")
    print("=" * 60)
    
    result = scraper.scrape_product_advanced(url)
    
    if result:
        print("\n✅ 成功获取商品数据:")
        print(json.dumps(result, ensure_ascii=False, indent=2))
        
        # 保存到data目录
        filepath = scraper.save_data_to_file(result, url)
        if filepath:
            print(f"\n📁 数据已保存到: {filepath}")
    else:
        print("\n❌ 获取数据失败")

if __name__ == "__main__":
    main() 