#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：Amazon Scraper
@File    ：amazon_scraper.py
@Author  ：Harvey
@Date    ：2025/3/25
'''

import json
import csv
import os
import time
import signal
import sys
from bs4 import BeautifulSoup
from DrissionPage import ChromiumPage, ChromiumOptions

# Global variable to track if we should exit gracefully
should_exit = False

def signal_handler(sig, frame):
    """Handle interruption signals to exit gracefully"""
    global should_exit
    print("\nReceived interrupt signal. Finishing current task before exiting...")
    should_exit = True

# Register signal handlers
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)

def log(level, message):
    """Simple logging function"""
    levels = {0: "ERROR", 1: "INFO", 2: "DEBUG"}
    print(f"[{levels.get(level, 'INFO')}] {message}")

def create_dp():
    """Create and configure a DrissionPage browser instance"""
    co = ChromiumOptions()
    co.headless(False)  # 设置无头加载
    co.incognito(True)  # 无痕隐身模式打开的话，不会记住你的网站账号密码的
    co.set_argument('--no-sandbox')  # 禁用沙箱
    co.set_argument("--disable-gpu")  # 禁用GPU加速
    co.set_argument('--start-maximized')
    co.set_argument('--window-size', '1200,1000')  # 窗口大小
    co.set_proxy('http://127.0.0.1:7890')  # 启动代理
    co.set_user_agent(
        user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36')
    
    browser = ChromiumPage(co)  # 创建对象
    return browser

def extract_product_info(html, url):  
    """  
    从HTML中动态提取产品信息，优化性能  
    """  
    soup = BeautifulSoup(html, 'lxml')  # 使用lxml解析器提高速度  
    product_info = {  
        'url': url,  
        'title': None,  
        'price': None,  
        'currency': None,  
        'features': {},  
        'about_items': [],  
        'technical_details': {},  
        'additional_info': {},  
        'ingredients': None,  
        'directions': None,  
        'description': None,
        'warranty_and_support': None  # 添加这一行  
    }  
    
    # 一次性获取所有关键元素，减少DOM查询  
    productTitle = soup.find(id='productTitle')  
    price_whole = soup.select_one('.a-price .a-price-whole')  
    price_symbol = soup.select_one('.a-price .a-price-symbol')  
    feature_table = soup.select_one('.a-section.a-spacing-small.a-spacing-top-small table')  
    feature_bullets = soup.find(id='feature-bullets')  
    product_description = soup.find(id='productDescription')  
    tech_tables = soup.select('#productDetails_techSpec_section_1, #technicalSpecifications_section_1, .tech-details table')  
    add_tables = soup.select('#productDetails_detailBullets_sections1, #additionalInformation, .additional-info table')  
    # 提取 Warranty & Support 信息  
    warranty_section = soup.find(id='productSpecifications_dp_warranty_and_support')  
    if warranty_section:  
        warranty_text = []  
        
        # 提取文本内容  
        for span in warranty_section.select('.a-section.table-padding > span'):  
            if span.text.strip():  
                warranty_text.append(span.text.strip())  
        
        # 检查是否有表格内容  
        warranty_table = warranty_section.select_one('table#productDetails_warranty_support_sections')  
        if warranty_table:  
            for row in warranty_table.select('tr'):  
                key_element = row.select_one('th')  
                value_element = row.select_one('td')  
                
                if key_element and value_element:  
                    key = key_element.text.strip()  
                    value = value_element.text.strip()  
                    warranty_text.append(f"{key}: {value}")  
        
        if warranty_text:  
            product_info['warranty_and_support'] = ' '.join(warranty_text) 
    # 提取标题  
    if productTitle:  
        product_info['title'] = productTitle.text.strip()  
    
    # 提取价格  
    if price_whole and price_symbol:  
        product_info['price'] = price_whole.text.strip().replace(',', '')  
        product_info['currency'] = price_symbol.text.strip()  
    
    # 提取产品特征  
    if feature_table:  
        feature_rows = feature_table.select('tr')  
        for row in feature_rows:  
            key_element = row.select_one('.a-text-bold')  
            value_element = row.select_one('.po-break-word')  
            
            if key_element and value_element:  
                key = key_element.text.strip()  
                value = value_element.text.strip()  
                product_info['features'][key] = value  
    
    # 如果没找到特征表，尝试其他可能的表格结构  
    if not product_info['features']:  
        feature_tables = soup.select('table.a-normal, table.a-keyvalue, table.product-facts-table')  
        for table in feature_tables:  
            rows = table.select('tr')  
            for row in rows:  
                key_element = row.select_one('th, td:first-child, .label, .a-text-bold')  
                value_element = row.select_one('td:last-child, .value, .a-size-base:not(.a-text-bold)')  
                
                if key_element and value_element:  
                    key = key_element.text.strip()  
                    value = value_element.text.strip()  
                    if key and value:  
                        product_info['features'][key] = value  
    
    # 提取About this item  
    if feature_bullets:  
        about_items = feature_bullets.select('li span.a-list-item')  
        for item in about_items:  
            text = item.text.strip()  
            if text:  
                product_info['about_items'].append(text)  
    
    # 如果上面方法没找到，尝试其他可能的结构  
    if not product_info['about_items']:  
        bullet_lists = soup.select('.feature-bullets ul, .product-facts-list, .a-unordered-list')  
        for bullet_list in bullet_lists:  
            items = bullet_list.select('li')  
            for item in items:  
                text = item.text.strip()  
                if text and text not in product_info['about_items']:  
                    product_info['about_items'].append(text)  
    
    # 提取Technical Details  
    for table in tech_tables:  
        rows = table.select('tr')  
        for row in rows:  
            key_element = row.select_one('th, .label, .prodDetSectionEntry')  
            value_element = row.select_one('td, .value, .prodDetAttrValue')  
            
            if key_element and value_element:  
                key = key_element.text.strip()  
                value = value_element.text.strip()  
                if key and value:  
                    product_info['technical_details'][key] = value  
    
    # 提取Additional Information  
    for table in add_tables:  
        rows = table.select('tr')  
        for row in rows:  
            key_element = row.select_one('th, .label, .prodDetSectionEntry')  
            value_element = row.select_one('td, .value')  
            
            if key_element and value_element:  
                key = key_element.text.strip()  
                value = value_element.text.strip()  
                product_info['additional_info'][key] = value  
    
    # 提取Product Description  
    if product_description:  
        p_tags = product_description.select('p')  
        description_texts = [p.text.strip() for p in p_tags if p.text.strip()]  
        if description_texts:  
            product_info['description'] = ' '.join(description_texts)  
    
    # 如果没找到描述，尝试其他可能的结构  
    if not product_info['description']:  
        description_sections = soup.select('.product-description, .aplus-v2, .aplus')  
        for section in description_sections:  
            p_tags = section.select('p')  
            description_texts = [p.text.strip() for p in p_tags if p.text.strip()]  
            if description_texts:  
                product_info['description'] = ' '.join(description_texts)  
                break  
    
    # 删除空字典和空列表  
    for key in list(product_info.keys()):  
        if isinstance(product_info[key], dict) and not product_info[key]:  
            product_info[key] = None  
        elif isinstance(product_info[key], list) and not product_info[key]:  
            product_info[key] = None  
    
    return product_info

def extract_product_images(html, url):  
    """  
    从页面HTML中提取产品图片链接  
    """  
    soup = BeautifulSoup(html, 'lxml')  # 使用lxml解析器提高速度  
    images = {  
        'main_image': None,  
        'gallery_images': []  
    }  
    
    try:  
        # 一次性找到主要的图片元素  
        main_img = soup.find(id='landingImage')  
        thumb_images = soup.select('#altImages .imageThumbnail img')  
        alt_img_selected = soup.select('.selected .imgTagWrapper img')  
        
        if main_img:  
            # 优先使用高分辨率图片链接  
            if main_img.get('data-old-hires'):  
                images['main_image'] = main_img['data-old-hires']  
            elif main_img.get('src'):  
                images['main_image'] = main_img['src']  
            
            # 提取data-a-dynamic-image属性中的所有图片URL  
            if main_img.get('data-a-dynamic-image'):  
                try:  
                    dynamic_images = json.loads(main_img['data-a-dynamic-image'])  
                    images['gallery_images'].extend([img_url for img_url in dynamic_images.keys()   
                                                  if img_url != images['main_image'] and img_url not in images['gallery_images']])  
                except:  
                    pass  
        
        # 处理缩略图  
        for img in thumb_images:  
            if img.get('src'):  
                img_url = img['src']  
                hd_url = img_url.replace('_AC_SR38,50_', '_AC_SL1500_')  
                if hd_url not in images['gallery_images'] and hd_url != images['main_image']:  
                    images['gallery_images'].append(hd_url)  
        
        # 处理选中的变体图片  
        for img in alt_img_selected:  
            img_url = img.get('data-old-hires') or img.get('src')  
            if img_url and img_url not in images['gallery_images'] and img_url != images['main_image']:  
                images['gallery_images'].append(img_url)  
    
    except Exception as e:  
        log(0, f"[图片提取] 提取图片时出错: {str(e)}")  
    
    return images

def load_urls(json_file_path):
    """Load URLs from JSON file"""
    try:
        with open(json_file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        return data
    except Exception as e:
        log(0, f"加载URL文件失败: {str(e)}")
        return []

def save_urls(json_file_path, urls_data):
    """Save updated URL status to JSON file"""
    try:
        with open(json_file_path, 'w', encoding='utf-8') as f:
            json.dump(urls_data, f, ensure_ascii=False, indent=4)
        return True
    except Exception as e:
        log(0, f"保存URL状态失败: {str(e)}")
        return False

def append_to_csv(csv_file_path, data):
    """Append data to CSV file"""
    file_exists = os.path.exists(csv_file_path)
    
    try:
        with open(csv_file_path, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            
            # Write header if file doesn't exist
            if not file_exists:
                writer.writerow(['url', 'product_info', 'images'])
            
            # Write data row
            writer.writerow([
                data['url'],
                json.dumps(data['product_info'], ensure_ascii=False),
                json.dumps(data['images'], ensure_ascii=False)
            ])
        return True
    except Exception as e:
        log(0, f"写入CSV失败: {str(e)}")
        return False

def scrape_amazon_products(json_file_path, csv_file_path):
    """Main function to scrape Amazon products"""
    # Load URLs from JSON
    urls_data = load_urls(json_file_path)
    if not urls_data:
        log(0, "没有URL可处理或加载失败")
        return
    
    # Create browser instance
    browser = create_dp()
    
    try:
        # Process each URL
        for i, item in enumerate(urls_data):
            # Check if we should exit
            if should_exit:
                log(1, "正在退出程序...")
                break
                
            url = item['url']
            status = item['status']
            
            # Skip already processed or error URLs
            if status != 0:
                log(1, f"跳过URL ({i+1}/{len(urls_data)}): {url}, 状态: {status}")
                continue
            
            log(1, f"正在处理 ({i+1}/{len(urls_data)}): {url}")
            
            try:
                # Get HTML content
                browser.get(url)
                
                # Wait for page to load completely - look for product title
                try:
                    browser.wait.ele_display('#productTitle', timeout=30)
                except:
                    log(1, "等待页面加载超时，尝试继续处理...")
                
                html = browser.html
                
                # Extract data
                product_info = extract_product_info(html, url)
                images = extract_product_images(html, url)
                
                # Check if we got basic product information
                if not product_info['title']:
                    log(0, f"没有找到产品标题，可能加载失败: {url}")
                    item['status'] = 2  # Mark as error
                    save_urls(json_file_path, urls_data)
                    continue
                
                print("\n" + "="*80)  
                print(f"URL: {url}")  
                print(f"标题: {product_info['title']}")  
                print(f"价格: {product_info['currency'] or ''}{product_info['price'] or '未知'}")  
                print(f"\n主图: {images['main_image'] or '未找到'}")  
                print(f"\n画廊图片({len(images['gallery_images']) if images['gallery_images'] else 0}):")  
                if images['gallery_images']:  
                    for i, img_url in enumerate(images['gallery_images']):  
                        print(f"{i+1}. {img_url}")  
                else:  
                    print("无画廊图片")  
                print("="*80 + "\n")
                
                # Save to CSV
                data = {
                    'url': url,
                    'product_info': product_info,
                    'images': images
                }
                
                if append_to_csv(csv_file_path, data):
                    # Update status in JSON
                    item['status'] = 1  # Mark as success
                    save_urls(json_file_path, urls_data)
                
                # Add delay to avoid rate limiting
                time.sleep(1)
                
            except Exception as e:
                log(0, f"处理URL失败: {url}, 错误: {str(e)}")
                item['status'] = 2  # Mark as error
                save_urls(json_file_path, urls_data)
    
    finally:
        # Close browser
        try:
            browser.quit()
        except:
            pass
        
        log(1, "爬取完成")

if __name__ == '__main__':
    # File paths
    json_file_path = 'p1.json'  # 包含URL的JSON文件
    csv_file_path = 'p1.csv'  # 保存结果的CSV文件
    
    log(1, f"开始爬取亚马逊商品数据")
    log(1, f"URL文件: {json_file_path}")
    log(1, f"输出文件: {csv_file_path}")
    
    scrape_amazon_products(json_file_path, csv_file_path)