#!/usr/bin/env python3
"""爬取1688指定URL的商品信息"""

import time
import json
import logging
from typing import List, Dict, Optional
from urllib.parse import urlparse, parse_qs

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.chrome.options import Options


class Product1688Crawler:
    """1688商品爬虫"""
    
    def __init__(self, headless: bool = True):
        self.headless = headless
        self.driver = None
        self.logger = self._setup_logger()
        
    def _setup_logger(self) -> logging.Logger:
        """设置日志"""
        logger = logging.getLogger('1688_crawler')
        logger.setLevel(logging.INFO)
        
        if not logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)
            logger.addHandler(handler)
            
        return logger
    
    def initialize_driver(self) -> bool:
        """初始化浏览器驱动（使用Chromium）"""
        try:
            options = Options()
            
            # 指定Chromium二进制文件路径
            options.binary_location = '/usr/bin/chromium'
            
            if self.headless:
                options.add_argument('--headless')
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-blink-features=AutomationControlled')
            options.add_experimental_option("excludeSwitches", ["enable-automation"])
            options.add_experimental_option('useAutomationExtension', False)
            
            # 设置用户代理
            options.add_argument('--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36')
            
            # 指定ChromeDriver路径
            from selenium.webdriver.chrome.service import Service
            service = Service('/usr/bin/chromedriver')
            self.driver = webdriver.Chrome(service=service, options=options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            
            self.logger.info("浏览器驱动初始化成功")
            return True
            
        except Exception as e:
            self.logger.error(f"初始化浏览器驱动失败: {e}")
            return False
    
    def crawl_products(self, url: str, max_products: int = 100) -> List[Dict]:
        """爬取商品信息"""
        if not self.initialize_driver():
            return []
        
        products = []
        page = 1
        
        try:
            while len(products) < max_products:
                self.logger.info(f"正在爬取第 {page} 页...")
                
                # 构建分页URL
                current_url = self._build_page_url(url, page)
                self.driver.get(current_url)
                
                # 等待页面加载
                time.sleep(3)
                
                # 提取当前页面的商品
                page_products = self._extract_products_from_page()
                
                if not page_products:
                    self.logger.info("当前页面没有找到商品，停止爬取")
                    break
                
                products.extend(page_products)
                self.logger.info(f"第 {page} 页找到 {len(page_products)} 个商品，总计 {len(products)} 个")
                
                # 检查是否达到目标数量
                if len(products) >= max_products:
                    products = products[:max_products]
                    break
                
                # 检查是否有下一页
                if not self._has_next_page():
                    self.logger.info("已到达最后一页")
                    break
                
                page += 1
                time.sleep(2)  # 避免请求过快
                
        except Exception as e:
            self.logger.error(f"爬取过程中出错: {e}")
        
        finally:
            self.close()
        
        return products
    
    def _build_page_url(self, base_url: str, page: int) -> str:
        """构建分页URL"""
        if 'page=' in base_url:
            # 替换现有的页码
            import re
            return re.sub(r'page=\d+', f'page={page}', base_url)
        else:
            # 添加页码参数
            separator = '&' if '?' in base_url else '?'
            return f"{base_url}{separator}page={page}"
    
    def _extract_products_from_page(self) -> List[Dict]:
        """从当前页面提取商品信息"""
        products = []
        
        try:
            # 等待商品容器加载
            WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, '.offer-item, .sm-offer-item, [class*="offer"][class*="item"]'))
            )
            
            # 查找所有商品容器
            product_elements = self.driver.find_elements(
                By.CSS_SELECTOR, '.offer-item, .sm-offer-item, [class*="offer"][class*="item"]'
            )
            
            self.logger.info(f"找到 {len(product_elements)} 个商品元素")
            
            for element in product_elements:
                try:
                    product = self._extract_product_info(element)
                    if product and product.get('title'):
                        products.append(product)
                except Exception as e:
                    self.logger.debug(f"提取单个商品信息时出错: {e}")
                    continue
                    
        except TimeoutException:
            self.logger.warning("等待商品加载超时")
        except Exception as e:
            self.logger.error(f"提取商品信息时出错: {e}")
        
        return products
    
    def _extract_product_info(self, element) -> Optional[Dict]:
        """从商品元素中提取信息"""
        product = {}
        
        try:
            # 提取标题
            title_selectors = [
                '.offer-title',
                '[class*="title"]',
                '.title',
                'a[title]'
            ]
            
            for selector in title_selectors:
                try:
                    title_element = element.find_element(By.CSS_SELECTOR, selector)
                    title_text = title_element.text.strip()
                    # 过滤掉通用的交流提示文本
                    if title_text and '点此可以直接和卖家交流' not in title_text:
                        product['title'] = title_text
                        break
                except NoSuchElementException:
                    continue
            
            # 提取价格
            price_selectors = [
                '.offer-price',
                '.price',
                '[class*="price"]',
                '.price-num',
                '[class*="Price"]'
            ]
            
            for selector in price_selectors:
                try:
                    price_element = element.find_element(By.CSS_SELECTOR, selector)
                    price_text = price_element.text.strip()
                    # 只取包含数字和货币符号的文本
                    if price_text and ('￥' in price_text or any(char.isdigit() for char in price_text)):
                        # 过滤掉销量信息
                        if '成交' not in price_text and '万+' not in price_text:
                            product['price'] = self._clean_price(price_text)
                            break
                except NoSuchElementException:
                    continue
            
            # 提取销量 - 从价格元素中查找包含"成交"的文本
            try:
                # 在整个商品容器中查找包含"成交"的文本
                all_text_elements = element.find_elements(By.CSS_SELECTOR, '*')
                for text_element in all_text_elements:
                    text = text_element.text.strip()
                    if '成交' in text and ('万+' in text or '件' in text):
                        product['sales'] = self._clean_sales(text)
                        break
            except Exception:
                pass
            
            # 提取链接
            link_selectors = [
                'a[href*="detail"]',
                '.offer-title a',
                '.title a',
                'a[href]'
            ]
            
            for selector in link_selectors:
                try:
                    link_element = element.find_element(By.CSS_SELECTOR, selector)
                    href = link_element.get_attribute('href')
                    if href and 'detail' in href:
                        product['link'] = href
                        break
                except NoSuchElementException:
                    continue
            
            # 提取图片
            try:
                img_element = element.find_element(By.CSS_SELECTOR, 'img')
                src = img_element.get_attribute('src') or img_element.get_attribute('data-src')
                if src:
                    product['image'] = src
            except NoSuchElementException:
                pass
            
            return product if product.get('title') else None
            
        except Exception as e:
            self.logger.debug(f"提取商品信息时出错: {e}")
            return None
    
    def _clean_price(self, price_text: str) -> str:
        """清理价格文本"""
        import re
        # 移除非数字和小数点的字符，保留价格数字
        price_match = re.search(r'[\d,]+\.?\d*', price_text.replace('，', ','))
        return price_match.group() if price_match else price_text
    
    def _clean_sales(self, sales_text: str) -> str:
        """清理销量文本"""
        import re
        # 提取数字
        sales_match = re.search(r'\d+', sales_text)
        return sales_match.group() if sales_match else sales_text
    
    def _has_next_page(self) -> bool:
        """检查是否有下一页"""
        try:
            # 查找下一页按钮
            next_selectors = [
                '.next:not(.disabled)',
                '.fui-paging-next:not(.disabled)',
                'a[title*="下一页"]:not(.disabled)',
                '.page-next:not(.disabled)'
            ]
            
            for selector in next_selectors:
                try:
                    next_element = self.driver.find_element(By.CSS_SELECTOR, selector)
                    if next_element and next_element.is_enabled():
                        return True
                except NoSuchElementException:
                    continue
            
            return False
            
        except Exception as e:
            self.logger.debug(f"检查下一页时出错: {e}")
            return False
    
    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            self.driver = None


def main():
    """主函数"""
    # 用户提供的URL
    url = "https://www.1688.com/zw/page.html?hpageId=old-sem-pc-list&keywords=%E9%85%92%E5%BA%97%E7%94%A8%E6%96%B9%E5%B7%BE%E7%BA%B8&cosite=&location=&trackid=&spm=a312h.2018_new_sem.dh_004.submit&keywordid=&bt=&exp=pcSemFumian%3AC%3BpcCpxGuessExp%3AB%3Bbangdan%3AB%3Bqztf%3AE%3BpcSemWwClick%3AA%3Basst%3AE%3BpcSemDownloadPlugin%3AA&ptid=&sortType=&descendOrder=&province=&city=&priceStart=&priceEnd=&dis=&provinceValue=%E6%89%80%E5%9C%A8%E5%9C%B0%E5%8C%BA&p_rs=true"
    
    # 创建爬虫实例
    crawler = Product1688Crawler(headless=False)  # 设置为False以便观察过程
    
    print("开始爬取1688商品信息...")
    print(f"目标URL: {url}")
    print("目标数量: 100个商品")
    print("-" * 50)
    
    # 开始爬取
    products = crawler.crawl_products(url, max_products=100)
    
    # 输出结果
    print(f"\n爬取完成！共获取 {len(products)} 个商品信息")
    print("-" * 50)
    
    # 显示前5个商品的详细信息
    for i, product in enumerate(products[:5], 1):
        print(f"\n商品 {i}:")
        print(f"  标题: {product.get('title', 'N/A')}")
        print(f"  价格: {product.get('price', 'N/A')}")
        print(f"  销量: {product.get('sales', 'N/A')}")
        print(f"  链接: {product.get('link', 'N/A')}")
    
    if len(products) > 5:
        print(f"\n... 还有 {len(products) - 5} 个商品")
    
    # 保存到JSON文件
    output_file = "1688_products.json"
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(products, f, ensure_ascii=False, indent=2)
    
    print(f"\n结果已保存到: {output_file}")
    
    # 统计信息
    print("\n统计信息:")
    print(f"  总商品数: {len(products)}")
    print(f"  有价格信息的商品: {len([p for p in products if p.get('price')])}")
    print(f"  有销量信息的商品: {len([p for p in products if p.get('sales')])}")
    print(f"  有链接的商品: {len([p for p in products if p.get('link')])}")


if __name__ == "__main__":
    main()