from datetime import datetime
import random
import time
import requests
import json
import re
from bs4 import BeautifulSoup
import logging
import traceback

from proxy_zly import getproxy_zly
lastproxy   =getproxy_zly()
proxyusetimes2=0

class amazonproductXue:
    def __init__(self):
        self.product_name = None
        self.asin = None
        self.price = None
        self.currency = 'USD'
        self.availability = None
        self.description = []
        self.product_details = {}
        self.store_info = {}
        self.sku_list = []
        self.image_list = []
        self.videos = []

# 再定义解析函数
def parse_amazon_page(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    result = amazonproductXue()  # 现在可以正确实例化了
    
    # 解析ASIN的逻辑
    asin_sources = [...]
    for source in asin_sources:
        if source and source.get('data-asin'):
            result.asin = source.get('data-asin')  # 设置ASIN属性
            break
    
    # 其他解析逻辑...
    return result
cookies = {
    'csm-sid': '994-6662062-1139818',
    'x-amz-captcha-1': '1741408998605935',
    'x-amz-captcha-2': 'V53IuydljPCsRfk7YAu/Zg==',
    'session-id': '142-5234308-1535544',
    'session-id-time': '2082787201l',
    'i18n-prefs': 'USD',
    'skin': 'noskin',
    'ubid-main': '134-7577819-5158835',
    'session-token': '97qNARecL6AhRlU72y0BSdIeD97hbfak7iRaihX0Q2ZkOxdgUk7+oDN0E2kaZrHJKFDFTNSW/KgkwVYDP4YOX2wVByazzzcZlzLxDQTbnplWIyvEhHcE5NsTpy8DWTjo5HwF43KwRjlR+pHmmj3jmZCJErceLAXazal9kTcE6qUgw9PWbWEsCL+HuUH7FZ90Pl+X+OVre5IWkVUyZB3TladvSlgLoHDiRsnkeMhK3wljPwFySr7zeBcfW8en3p7b3zQ8niMsen/nwLyRSCDjyg6UdbLceyWleC/uFJ8d8LDma1nXkQOQlVOcGoOukiHcr3hIbpXDh9UJvoo+QHhQLi5oIzYO8suR',
    'csm-hit': 'tb:s-KRGMEW5JVMEVK3N8NDM0|1741401940558&t:1741401945930&adb:adblk_no',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'cache-control': 'max-age=0',
    'device-memory': '4',
    'downlink': '10',
    'dpr': '1',
    'ect': '4g',
    'priority': 'u=0, i',
    'referer': 'https://www.amazon.com/s?k=TREELIGHT&crid=24FYLBYMFX1AY&sprefix=treelight%2Caps%2C1311&ref=nb_sb_noss_2',
    'rtt': '100',
    'sec-ch-device-memory': '4',
    'sec-ch-dpr': '1',
    'sec-ch-ua': '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-ch-ua-platform-version': '"12.0.0"',
    'sec-ch-viewport-width': '1912',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0',
    'viewport-width': '1912',
    # 'cookie': 'csm-sid=994-6662062-1139818; x-amz-captcha-1=1741408998605935; x-amz-captcha-2=V53IuydljPCsRfk7YAu/Zg==; session-id=142-5234308-1535544; session-id-time=2082787201l; i18n-prefs=USD; skin=noskin; ubid-main=134-7577819-5158835; session-token=97qNARecL6AhRlU72y0BSdIeD97hbfak7iRaihX0Q2ZkOxdgUk7+oDN0E2kaZrHJKFDFTNSW/KgkwVYDP4YOX2wVByazzzcZlzLxDQTbnplWIyvEhHcE5NsTpy8DWTjo5HwF43KwRjlR+pHmmj3jmZCJErceLAXazal9kTcE6qUgw9PWbWEsCL+HuUH7FZ90Pl+X+OVre5IWkVUyZB3TladvSlgLoHDiRsnkeMhK3wljPwFySr7zeBcfW8en3p7b3zQ8niMsen/nwLyRSCDjyg6UdbLceyWleC/uFJ8d8LDma1nXkQOQlVOcGoOukiHcr3hIbpXDh9UJvoo+QHhQLi5oIzYO8suR; csm-hit=tb:s-KRGMEW5JVMEVK3N8NDM0|1741401940558&t:1741401945930&adb:adblk_no',
}





# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def amazon_scraper(html_content):
    """增强版亚马逊产品解析器"""
    soup = BeautifulSoup(html_content, 'html.parser')
    product_list = []
    
    for item in soup.select('div.s-result-item[data-asin]'):
        asin = item['data-asin']
        # 验证ASIN格式
        if not re.match(r'^[A-Z0-9]{10}$', asin):
            continue
            
        try:
            # 提取产品信息
            product_data = {
                'productid': asin,
                'title': '',
                'price': '',
                'currency': 'US',  # 货币单位
                'image_url': '',
                'link': '',
                'rating': '',
                'reviews_count': '',
                'seller': '',
                'sales_info': '',
                'delivery_info': '',
                'in_stock': True,
                'brand': '',  # 新增品牌字段
                'category': ''  # 新增类别字段
            }
            
            title_elem = item.select_one('h2 span')
            if title_elem:
                product_data['title'] = title_elem.text.strip()
                
            price_text=''
            price_elem = item.select_one('.a-price .a-offscreen')
            product_data['price'] = 0.0
            if price_elem:
                price_text = price_elem.text.strip()
                product_data['price'] = re.sub(r'[^\d.]', '', price_text)

                if re.match(r'^\d+(\.\d+)?$', product_data['price']):
                    product_data['price'] = float(product_data['price'])
                else:
                    product_data['price'] = 0.0
            if product_data['price']==0 or  product_data['price'] =='':
                price_whole = item.select_one('span.a-price-whole').get_text(strip=True) if item.select_one('span.a-price-whole') else ''
                price_fraction = item.select_one('span.a-price-fraction').get_text(strip=True) if item.select_one('span.a-price-fraction') else ''      

                        # 组合成完整的价格
                price_text = f"{price_whole}{price_fraction}"
            if price_text=='' or re.match(r'^\d+(\.\d+)?$', price_text)==False:
                price_elements = item.find_all('span', class_='a-color-base')

                for element in price_elements:
                    price_text = element.get_text(strip=True)
                    if price_text.startswith('$'):
                        price_text=price_text.replace('$', '').replace(',', '').strip()
                        print(price_text) 

            if price_text!='' and product_data['price']==0 and re.match(r'^\d+(\.\d+)?$', price_text):
                product_data['price'] = float(price_text.replace('$', '').replace(',', '').strip()) 
            print(product_data['price'] ,'price_text',price_text)
            img_elem = item.select_one('img.s-image')
            if img_elem and img_elem.get('src'):
                product_data['image_url'] = img_elem.get('src')
                
            link_elem = item.select_one('a.a-link-normal.s-line-clamp-2.s-link-style[href*="/dp/"]')
            if link_elem and link_elem.get('href'):
                href = link_elem.get('href', '')
                if len(href) > 750:
                    href = href[:750]
                product_data['link'] = 'https://www.amazon.com' + href



                
            if product_data['link']=='':
                link = item.select_one('a', href=lambda href: href and f'/dp/{asin}' in href)
                if link:
                    print(asin,f"list Found link: {link['href']}")
                    product_data['link'] = 'https://www.amazon.com' + link.get('href')
                    print(product_data['link'])
            rating_elem = item.select_one('.a-icon-alt')
            if rating_elem:
                rating_text = rating_elem.text.strip()
                rating_match = re.search(r'(\d+\.?\d*)', rating_text)
                if rating_match:
                    product_data['rating'] = rating_match.group(1)
                else:
                    print('no rating')
                    
            reviews_elem = item.select_one('a[href*="customerReviews"] span')
            if reviews_elem:
                reviews_text = reviews_elem.text.strip()
                product_data['reviews_count'] = reviews_text.replace(',', '')            
            else:
                print('no reviews_count')    

            seller_elem = item.select_one('span.a-size-base:contains("by")')
            if seller_elem:
                seller_text = seller_elem.text.strip()
                seller_match = re.search(r'by (.+)', seller_text)
                if seller_match:
                    product_data['seller'] = seller_match.group(1).strip()
                    product_data['brand'] = seller_match.group(1).strip()
            else:
                store_elem = item.select_one('a:contains("Visit the")')
                if store_elem:
                    store_text = store_elem.text.strip()
                    store_match = re.search(r'Visit the (.+) Store', store_text)
                    if store_match:
                        product_data['seller'] = store_match.group(1).strip()
                        product_data['brand'] = store_match.group(1).strip()
                else:
                    if product_data['title']:
                        title_parts = product_data['title'].split(' ')
                        if len(title_parts) > 0:
                            # 如果第一个词是全大写或包含品牌特征，很可能是品牌/卖家
                            potential_brand = title_parts[0]
                            if (potential_brand.isupper() or 
                                potential_brand.endswith('TM') or 
                                all(c.isupper() for c in potential_brand if c.isalpha())):
                                product_data['seller'] = potential_brand
                                product_data['brand'] = potential_brand
            
            img_tag = item.select_one('img', class_='s-image')
            title2 = img_tag['alt'] if img_tag and 'alt' in img_tag.attrs else 'No title found'

            if len(title2)>50:
                product_data['title'] = title2
                product_data['brand'] = title2

            badge_area = item.select_one('.a-row.a-size-base span:contains("by Amazon")')
            if badge_area:
                product_data['seller'] = "Amazon"
            
            if not product_data['seller'] and product_data['link']:
                url_parts = product_data['link'].split('/')
                if len(url_parts) > 3:
                    for part in url_parts:
                        if part and part not in ['dp', 'ref', 'keywords', 'sr']:
                            potential_seller = part.replace('-', ' ').title()
                            if len(potential_seller) > 3 and potential_seller not in ['Amazon', 'Com', 'Www']:
                                product_data['seller'] = potential_seller
                                if not product_data['brand']:
                                    product_data['brand'] = potential_seller
                                if len(product_data['brand']) > 30:
                                    words1 = product_data['brand'].split()
                                    product_data['brand'] = ' '.join(words1[:2])

                                break
            
            sales_elem = item.select_one('.a-color-secondary:contains("bought in past month")')
            if sales_elem:
                product_data['sales_info'] = sales_elem.text.strip()
            else:
                print('no sales_info')
            
            # 提取徽章信息（包括类别）
            # badge_elem = item.select_one('.a-badge')
            # if badge_elem:
            #     badge_text = badge_elem.text.strip()
            #     product_data['badges'].append(badge_text)
                
            #     # 尝试从徽章中提取类别 (通常格式为 "Best Seller in [Category]")
            #     category_match = re.search(r'Best Sellerin (.+)', badge_text)
            #     if category_match:
            #         product_data['category'] = category_match.group(1).strip()
            
            # 如果没有从徽章中获取到品牌，尝试从标题中提取
            if not product_data['brand']:
                # 许多亚马逊标题格式为 "[Brand] [Product Description]"
                title_parts = product_data['title'].split(' ')
                if len(title_parts) > 0:
                    potential_brand = title_parts[0]
                    # 如果第一个词是全大写或者有特殊格式，很可能是品牌
                    if potential_brand.isupper() or potential_brand.endswith('TM'):
                        product_data['brand'] = potential_brand
            
            # 提取配送信息
            delivery_elems = item.select('.a-color-base')
            delivery_info = []
            for elem in delivery_elems:
                if 'delivery' in elem.text.lower() or 'fastest' in elem.text.lower():
                    delivery_info.append(elem.text.strip())
            
            if delivery_info:
                product_data['delivery_info'] = ' '.join(delivery_info)
            brand =''
            product_data['brand']=''
            product_data['category']=''
            product_type=''
            if item.select_one('h2.a-size-mini.s-line-clamp-1 span.a-size-base-plus'):
                brand=item.select_one('h2.a-size-mini.s-line-clamp-1 span.a-size-base-plus').get_text(strip=True)
            if item.select_one('div[data-csa-c-product-type]'):
                product_type = item.select_one('div[data-csa-c-product-type]')['data-csa-c-product-type']   
            if product_data['brand']=='' or not product_data['brand']:
                product_data['brand']=brand if brand else ''
            if product_data['category']=='' or not product_data['category']:
                product_data['category']=product_type if product_type else ''
            # 将产品信息添加到列表
            product_list.append(product_data)
            
        except Exception as e:
            print(f"解析产品 {asin} 时出错，0: {str(e)}")
            traceback.print_exc()
            product_list.append(product_data)
            continue

    return product_list

# Helper functions
def get_text(element, selector):
    try:
        return element.select_one(selector).get_text(strip=True)
    except:
        return None

def get_attr(element, selector, attr):
    try:
        return element.select_one(selector)[attr]
    except:
        return None

def clean_price(price):
    try:
        return float(price.replace('$', '').replace(',', '').strip())
    except:
        return None

def format_url(href):
    return f"https://amazon.com{href}" if href else None
    
def parse_rating(element):
    try:
        rating_text = element.select_one('i.a-icon-star').get_text()
        return float(rating_text.split(' ')[0])
    except:
        return None

def parse_reviews(element):
    try:
        reviews_text = element.select_one('span.a-size-base').get_text()
        return int(reviews_text.replace(',', ''))
    except:
        return None
def _find_price_for_asin(soup, asin):
    # 从价格表中匹配ASIN对应价格
    price_cells = soup.select(f'tr[data-asin="{asin}"] td.a-color-price')
    return float(price_cells[0].text.strip('$')) if price_cells else None

# 尺寸库存检查
def _check_size_availability(soup, asin):
    # 通过API或隐藏字段检查库存
    stock_indicator = soup.find('div', attrs={'data-asin': asin, 'class': 'a-section'})
    return 'In Stock' in stock_indicator.text if stock_indicator else False
def clean_spec_number(raw_str):
    """
    清理产品型号字符串中的特殊字符
    :param raw_str: 原始字符串
    :return: 清理后的字符串
    """
    # 定义需要移除的特殊字符
    special_chars = {
        '\u200e',  # LEFT-TO-RIGHT MARK
        '\u200f',  # RIGHT-TO-LEFT MARK
        '\n'       # 换行符
    }
    
    # 方法1：使用translate高效移除
    translation_table = str.maketrans('', '', ''.join(special_chars))
    cleaned = raw_str.translate(translation_table)
    
    # 方法2：替换连续空格（可选）
    cleaned = ' '.join(cleaned.split())
    
    # 方法3：使用正则表达式（更彻底）
    cleaned = re.sub(r'[\u200e\u200f\n]', '', raw_str)  # 移除特殊字符
    cleaned = re.sub(r'\s+', ' ', cleaned).strip()      # 合并多余空格
    
    return cleaned
def clean_category(category):
    """清理分类名称"""
    # 替换HTML实体
    category = category.replace('&amp;', '&')
    # 移除括号内容及多余空格
    return re.sub(r'\s*\(.*?\)|\s+', ' ', category).strip()
def parse_rankings(text):
    """
    解析包含多个排名的字符串
    :param text: 输入字符串
    :return: 包含主排名和子排名的字典
    """
    # 匹配所有排名项的正则表达式
    pattern = r"""
        \#(\d+)       # 匹配排名数字
        \s*in\s*      # 匹配可能带空格的"in"
        ([^#(]+)      # 捕获分类名称（直到遇到#或(）
    """
    
    matches = re.findall(pattern, text, re.VERBOSE)
    
    result = {
        "main_ranking": None,
        "sub_rankings": []
    }
    
    if matches:
        # 主排名（第一个匹配项）
        main_rank = int(matches[0][0])
        main_category = clean_category(matches[0][1])
        result["main_ranking"] = {
            "mainrank": main_rank,
            "maincategory": main_category
        }
        
        # 子排名（后续匹配项）
        for match in matches[1:]:
            result["sub_rankings"].append({
                "subrank": int(match[0]),
                "subcategory": clean_category(match[1])
            })
    
    return result
def parse_product_details(soup):
    result={}
    result['item_model_number']=''
    result['Department']=''
    result['publishdate']='1980-1-1'
    result['Manufacturer']=''
    result['item_model_number']=''
    result['ASIN']=''
    result['weight']=0
    result['product_dimensions']=''
    result['package_dimensions']=''
    result['best_sellers_rank']='' 
    result['product_dimensions']=''
    result['package_dimensions']=''
    result['best_sellers_rank']=''
    result['brand']=''
    asin=''
    base_items = soup.select('ul.detail-bullet-list li')

    for row in base_items:
        # 提取每行的 <th> 和 <td> 内容
        content=row.get_text(strip=True)
        
        print(content)
    # 提取基础信息
    base_items = soup.select('ul.detail-bullet-list li')
    product_info={}
    try:
        for item in base_items:
            # 获取每个项目的文本内容
            item_text = item.get_text(strip=True)
            
            # 解析基本信息
            if ':' in item_text:
                # 分割标题和值
                title_part = item_text.split(':')[0].strip()
                value_part = item_text.split(':')[1].strip()
                
                # 根据标题存储对应的值
                if 'Package Dimensions' in title_part:
                    product_info['package_dimensions'] = value_part.replace('\u200e', '')
                elif 'Department' in title_part:
                    product_info['department'] = value_part.replace('\u200e', '')
                elif 'Date First Available' in title_part:
                    product_info['date_first_available'] = value_part.replace('\u200e', '')
                elif 'Manufacturer' in title_part:
                    product_info['manufacturer'] = value_part.replace('\u200e', '')
                elif 'ASIN' in title_part:
                    product_info['asin'] = value_part.replace('\u200e', '')
                elif 'Best Sellers Rank' in title_part:
                    # 解析主排名
                    main_rank_match = re.search(r'#([\d,]+) in', value_part)
                    if main_rank_match:
                        product_info['main_rank'] = int(main_rank_match.group(1).replace(',', ''))
                    
                    # 解析子类目排名
                    sub_categories = []
                    sub_ranks = item.select('ul li')
                    for sub_rank in sub_ranks:
                        rank_text = sub_rank.get_text(strip=True)
                        sub_rank_match = re.search(r'#(\d+) in (.*)', rank_text)
                        if sub_rank_match:
                            sub_categories.append({
                                'rank': int(sub_rank_match.group(1)),
                                'category': sub_rank_match.group(2).strip()
                            })
                    product_info['sub_categories'] = sub_categories
                elif 'Customer Reviews' in title_part:
                    # 解析评分
                    rating_elem = item.select_one('span.a-size-base')
                    if rating_elem:
                        product_info['rating'] = float(rating_elem.get_text(strip=True))
                    
                    # 解析评论数
                    reviews_elem = item.select_one('span#acrCustomerReviewText')
                    if reviews_elem:
                        reviews_text = reviews_elem.get_text(strip=True)
                        reviews_count = re.search(r'([\d,]+)', reviews_text)
                        if reviews_count:
                            product_info['reviews_count'] = int(reviews_count.group(1).replace(',', ''))
        
    except Exception as e:
        print(f"解析产品 {asin} 时出错，0: {str(e)}")
        traceback.print_exc()
    tempdict={}
    if len(base_items)==0:
        base_items2 = soup.select('ul.a-unordered-list.a-vertical.a-spacing-small li')
        
        table = soup.find('table', id='productDetails_detailBullets_sections1')
        
        product_details = {}
        
        # 遍历表格中的所有行
        if table:
            for row in table.find_all('tr'):
                # 提取每行的 <th> 和 <td> 内容
                content=row.get_text(strip=True)
                key = row.find('th').text.strip().lower()  
                value = row.find('td').text.strip().lower()  
                key=key.replace('item weight', 'weight')
                tempdict[key]=value
                if key==value:
                    result.update(parse_rankings(value))
                else:
                    key=clean_spec_number(key)
                    if key=='Product Dimensions':
                        key='product_dimensions'
                    key=key.replace('Package Dimensions', 'package_dimensions')
                    if 'package' in key.lower() and 'dimensions' in key.lower():
                        key='product_dimensions'
                    if 'product' in key.lower() and 'dimensions' in key.lower():
                        key='product_dimensions'
                    value=clean_spec_number(value)
                    if key and value:
                        if key=='item model number'.lower():
                            key='item_model_number'
                        if key=='date first available'.lower():
                            key='publishdate'
                            
                            date_obj = datetime.strptime(value, "%B %d, %Y")
                            formatted_date = f"{date_obj.year}-{date_obj.month}-{date_obj.day}"
                            value=formatted_date
                            print('publishdate',value)
                        if key=='best sellers rank'.lower():
                            key='best_sellers_rank'
                if key.lower() in result.keys():
                    result[key.lower()] = value
                    print(key, 'in result')
                else:
                    print(key, 'not in result')
                print(key,value)
    result['ASIN']=tempdict['asin'] if  tempdict!={} and'asin' in tempdict else ''
    if  tempdict!={} and 'weight' in tempdict and tempdict['weight']!=None:
        weightinfo=tempdict['weight']
        match = re.match(r"(\d+\.?\d*)\s*(ounces|pounds)", weightinfo, re.IGNORECASE)
        if match:
            weight = float(match.group(1))
            unit = match.group(2).lower()   
            
            # 根据单位转换到公斤
            if unit == "ounces":
                result['weight']  = weight * 0.0283495
            elif unit == "pounds":
                result['weight']  = weight * 0.453592

    result['weightunit']='kg'
    result['Manufacturer']=tempdict['manufacturer'] if 'manufacturer' in tempdict else ''            
    result= result
    for index,item in enumerate(base_items):
        content=item.get_text(strip=True)
        key=content.split(':')[0].replace('\u200f', '').replace('\u200e', '').strip(':').replace('\n', '')
        if ':' in content:
            value=content.split(':')[1].replace('\u200f', '').replace('\u200e', '').strip(':').replace('\n', '')
        else:
            value=content
        if key==value:
            result.update(parse_rankings(value))
        else:
            key=clean_spec_number(key)  
            if key=='Product Dimensions':
                key='product_dimensions'
            key=key.replace('Package Dimensions', 'package_dimensions')
            if 'package' in key.lower() and 'dimensions' in key.lower():
                key='product_dimensions'
            if 'product' in key.lower() and 'dimensions' in key.lower():
                key='product_dimensions'
            value=clean_spec_number(value)
            if key and value:
                if key=='Item model number':
                    key='item_model_number'
                if key=='Date First Available':
                    key='publishdate'
                    
                    date_obj = datetime.strptime(value, "%B %d, %Y")
                    formatted_date = f"{date_obj.year}-{date_obj.month}-{date_obj.day}"
                    value=formatted_date
                    print('publishdate',value)
                if key=='Best Sellers Rank':
                    key='best_sellers_rank'
                result[key] = value
        

    # result.update(product_info)
    return result
def parse_ranking1(text):
    """
    解析亚马逊式排名字符串
    :param text: 输入字符串（如 "#4 inWomen's Fashion Sneakers"）
    :return: 包含排名数字和分类名称的字典
    """
    # 匹配模式说明：
    # ^#(\d+)       - 匹配#开头的数字
    # \s*in\s*      - 匹配可能带有空格的"in"
    # (.*)          - 捕获剩余部分作为分类名称
    pattern = r'^#(\d+)\s*in\s*(.*)'
    
    match = re.match(pattern, text.strip())
    if match:
        return {
            "rank": int(match.group(1)),
            "category": match.group(2).strip()
        }
    return None
def parse_product_features(soup):
    features = []
    # 定位特征列表容器
    feature_list = soup.select('ul.a-unordered-list.a-vertical.a-spacing-small li')
    if len(feature_list)==0:
        feature_list = soup.select('li.a-spacing-mini')
 
    
    for item in feature_list:
        # 提取每个特征项的文本内容
        feature_text = item.get_text(strip=True)
        features.append(feature_text)
    
    return features
def parse_sku_variant_details(soup):
    variants = []
    
    """
    解析产品变体完整信息
    :param html_content: HTML内容字符串
    :return: 包含完整信息的字典列表
    """
    items = soup.find_all('li', class_=lambda x: x and ('swatchAvailable' in x or 'swatchSelect' in x))
    
    result = []
    
    for item in items:
        try:
            # 提取所有HTML属性
            attributes = dict(item.attrs)
            time.sleep(random.randint(1, 2) *0.2 *random.random())
            # 提取关键信息
            asin = attributes.get('data-defaultasin', '')
            img_tag = item.find('img', class_='imgSwatch')
            
            # 构建数据字典
            variant_data = {
                "asin": asin,
                "image_url": img_tag['src'].replace('_SS47_', '_SL500_') if img_tag else '',
                "attributes": {
                    # 核心属性
                    "data-csa-c-item-id": attributes.get('data-csa-c-item-id', ''),
                    "data-defaultasin": asin,
                    "data-dp-url": attributes.get('data-dp-url', ''),
                    "id": attributes.get('id', ''),
                    "title": attributes.get('title', '').replace('Click to select ', ''),
                    
                    # 其他扩展属性
                    "class": attributes.get('class', []),
                    "data-csa-c-type": attributes.get('data-csa-c-type', ''),
                    "data-csa-c-slot-id": attributes.get('data-csa-c-slot-id', ''),
                    "data-csa-c-content-id": attributes.get('data-csa-c-content-id', '')
                },
                # 图片相关信息
                "image_attributes": {
                    "alt": img_tag.get('alt', '') if img_tag else '',
                    "dimensions": img_tag.get('style', '') if img_tag else ''
                } if img_tag else None
            }
            
            result.append(variant_data)
        except Exception as e:
            print(f"解析错误: {str(e)}")
            continue
    
    return result
def amazon_scraper_images(soup):
    """增强版亚马逊产品解析器"""
    product_list = []
    product_data={}

    try:
            # 主图解析
            main_img = soup.select_one('div#imgTagWrapperId img')
            product_data['main_image'] = main_img.get('src', '').split('?')[0] if main_img else ''
            product_data['hires_image'] = main_img.get('data-old-hires', '').split('?')[0] if main_img else ''

            # 解析图片列表（过滤模板和占位符）
            image_list = []
            for li in soup.select('ul.a-unordered-list li:not(.template):not(.videoThumbnail)'):
                # 跳过包含占位符的无效项
                if li.select_one('.placeHolder'):
                    continue
                
                img = li.find('img')
                if img and img.get('src'):
                    # 转换缩略图为高清图
                    img_url = img['src'].replace('_AC_US40_', '_AC_SL500_').split('?')[0]
                    image_list.append(img_url)
            
            # 单独解析视频缩略图
            video_thumb = soup.select_one('li.videoThumbnail img')
            if video_thumb and video_thumb.get('src'):
                video_url = video_thumb['src'].replace('_SS40_', '_SL500_').split('?')[0]
                product_data['video_cover'] = video_url

            product_data['image_list'] = image_list

    except Exception as e:
            logger.error(f"图片解析失败: {str(e)}")
            product_data['image_list'] = []
    return product_data    
 # 反爬措施处理
def _get_jsondata(soup):
    # 安全解析JavaScript数据
    script = soup.find('script', text=re.compile('var dataToReturn'))
    return json.loads(re.search(r'({.*})', script.text.replace('\n','')).group(1))

def parse_product_rank(rank_str):
    """
    基于字符串分割的解析方案，支持多种格式
    输入示例："#990 in Clothing...#3 inWomen's Loafers..."
    输出结构始终保持四个键，缺失值为None
    """
    result = {
        'main_rank': 0,
        'maincategory': '',
        'sub_rank': 0,
        'sun_subcategory':''
    }

 


    # 清理输入字符串
    rank_str=rank_str.replace(' in ',' in')
    cleaned_str = ' '.join(rank_str.strip().split())

    try:
        # ======================
        # 解析主分类部分
        # ======================
        main_part = cleaned_str.split('#', 1)[1]  # 取出第一个#后的内容
        main_part = main_part.split('#')[0] if '#' in main_part else main_part  # 截断到下一个#前
        
        # 分割排名和分类名称
        if ' in' in main_part:
            rank_part, name_part = main_part.split(' in', 1)
            result['main_rank'] = int(rank_part.replace(',','').strip())
            # 清理分类名称中的括号内容
            clean_name = name_part.split('(')[0].strip()
            result['maincategory'] = ' '.join(clean_name.split())
 
        # ======================
        # 解析子分类部分
        # ======================
        if '#' in cleaned_str:
            sub_part = '#'.join(cleaned_str.split('#')[2:])  # 取出第二个#后的内容
            if ' in' in sub_part:
                sub_rank_part, sub_name_part = sub_part.split(' in', 1)
                result['sub_rank'] = int(sub_rank_part.strip())
                result['sun_subcategory'] = sub_name_part
    except (IndexError, ValueError):
        pass  # 保持默认的None值

    return result
def parse_dimensions(raw_str):
    """
    解析产品尺寸和重量信息并转换单位
    输入："10.12 x 7.09 x 3.54 inches; 8.8 ounces"
    输出：{
        'length': 25.70,
        'width': 18.01, 
        'height': 8.99,
        'weight_kg': 0.25,
        'lengthunit': 'cm',
        'weightunit': 'kg'
    }
    """
    # 常量定义
    INCH_TO_CM = 2.54
    OUNCE_TO_KG = 0.0283495
    
    result = {
        'length': 0,
        'width': 0,
        'height': 0,
        'weight': 0,
        'lengthunit': 'cm',
        'weightunit': 'kg'
    }
    
    try:
        # 分割尺寸和重量部分
        parts = [p.strip() for p in raw_str.split(';')]
        
        # ========= 解析尺寸 =========
        if len(parts) > 0 and "x" in parts[0]:
            # 提取尺寸部分并移除单位
            dim_part = parts[0].lower().replace("inches", "").replace("d", "").replace("w", "").replace("h", "").replace("d", "").replace("l", "").replace('"', '').strip()
            # 支持不同分隔符（x/X/×）
            dimensions=[]
            try:
                dimensions = [float(n) for n in dim_part.replace('×', 'x').replace(' ', '').split('x')]
            except Exception as e:
                traceback.print_exc()
            
            if len(dimensions) >= 3:
                result['length'] = round(dimensions[0] * INCH_TO_CM, 2)
                result['width'] = round(dimensions[1] * INCH_TO_CM, 2)
                result['height'] = round(dimensions[2] * INCH_TO_CM, 2)
        
        # ========= 解析重量 =========
        if len(parts) > 1 and "ounce" in parts[1].lower():
            # 提取重量数值
            weight_str = parts[1].lower().replace("ounces", "").strip()
            result['weight'] = round(float(weight_str) * OUNCE_TO_KG, 2)
        if len(parts) > 1 and "pounds" in parts[1].lower():
            # 提取重量数值
            weight_str = parts[1].lower().replace("pounds", "").strip()
            result['weight'] = round(float(weight_str) * 0.453592, 2)

            
            
    except Exception as e:
        traceback.print_exc()
    if result['height']==0:
        try:
            parts2 = raw_str.replace("d", "").replace("w", "").replace("h", "").replace("l", "").upper().split(' X ')
            if len(parts2)>=2:  
                depth = float(parts2[0].replace('"D', ''))
                width = float(parts2[1].replace('"W', ''))
                height = float(parts2[2].replace('"H', ''))     
                result['length']=depth* 2.54
                result['width']=width* 2.54
                result['height']=height* 2.54
        except Exception as e:
            traceback.print_exc()
    return result

 
# 输出: {'length': 25.7, 'width': 18.01, 'height': 8.99, 'weight_kg': 0.25}
def captureamz(keywordsdict,page=1,AmazonCrawler_instance=None):
    product_data_all=[]
    params = {
        'k': keywordsdict['keywords'],
        'crid': '24FYLBYMFX1AY',
        'refresh': '1',
        'sprefix': f"{keywordsdict['keywords']},aps,1311",
        'ref': 'glow_cls',
        'page': f'{page}',
        }
    global proxyusetimes2,lastproxy
    proxyusetimes2=proxyusetimes2+1
    if proxyusetimes2   >100:
        lastproxy=getproxy_zly()
        proxyusetimes2=0
    response = requests.get('https://www.amazon.com/s', params=params, cookies=cookies, headers=headers)
    print(response.status_code)
    product_data = amazon_scraper(response.text)
    newproductdatas=[]
    downloaddetailatonce=keywordsdict['downloaddetailatonce']
    downloaddetailatonce=0
    if downloaddetailatonce==1:
        for index,iasindata in enumerate(product_data):

            print(index,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),index,iasindata) 
            newproductdata=AmazonCrawler_instance.request_page(iasindata['link'])
            soup = BeautifulSoup(newproductdata.text,'html.parser')
            
            a=None
            b=None
            c=None
            d=None
            try:
                a=parse_product_details(soup)
            except Exception as e:
                print(f"解析产品a {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
                try:
                    a=parse_product_details(soup)
                except Exception as e:
                    print(f"解析产品a {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
            try:
                b=parse_product_features(soup)
                for index in range(len(b)+1,5):
                    b.append('')
            except Exception as e:
                print(f"解析产品b {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            try:
                c=parse_sku_variant_details(soup)
            except Exception as e:
                print(f"解析产品c {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            try:
                d=amazon_scraper_images(soup)
            except Exception as e:
                print(f"解析产品d {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            mainrank={}
            try:
                mainrank=a['main_ranking']
            except Exception as e:
                print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            subrank={}
            try:
                subrank=a['sub_rankings']
            except Exception as e:
                print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            # a.update(mainrank)
            # a.update(subrank)
            ranks=parse_product_rank(a['best_sellers_rank'])
            a.update(ranks)
            dimensions={}
            try:
                dimensions=parse_dimensions(a['product_dimensions'])
            except Exception as e:
                print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                traceback.print_exc()
            if 'product_dimensions' not in a:
                a['product_dimensions']=''
            a.update(dimensions)
            iasindata.update(a)

            dictb={}
            for iindex,  itemx in enumerate(b,start=1):
                dictb[f'detail{iindex}'] = itemx
            for iindex in range (len(b)+1,5):
                dictb[f'detail{iindex}'] = ''
            iasindata.update(dictb)
            iasindata.update(d)
            newproductdatas.append(iasindata)
            

    # time.sleep(random.randint(1, 2) )
    # return product_data 
    return product_data
#     for index, item in enumerate(product_data):
#         print(index,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),item) 
#     time.sleep(random.randint(5,   8) )
# for inxx in range(3):
#     product_data_all=captureamz('power bank',inxx+1)
#     print(product_data_all)

