import requests
import random
import time
from fake_useragent import UserAgent

class AmazonCrawler:
    def __init__(self):
        self.session = requests.Session()
        self.ua = UserAgent()
        self.proxies = {
            'http': 'http://user:pass@proxy_ip:port',
            'https': 'http://user:pass@proxy_ip:port'
        }
        self.cookie_pool = [
            {
                'session-id': '138-6891760-8093012',
                'session-id-time': '2082787201l',
                'i18n-prefs': 'USD',
                'lc-main': 'en_US',  # 修正为英文站点
                'sp-cdn': 'L5Z9:US',  # 修正为美国CDN
                'skin': 'noskin'
            },
            # 添加更多备用cookie
        ]

    def _get_dynamic_headers(self):
        """生成动态请求头"""
        return {
            'authority': 'www.amazon.com',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'accept-language': 'en-US,en;q=0.9',  # 修正为英文语言
            'sec-ch-ua': f'"Chromium";v="{random.randint(90, 110)}", "Not A;Brand";v="{random.randint(8, 99)}"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': self.ua.random
        }

    def _rotate_cookie(self):
        """轮换Cookie池"""
        return random.choice(self.cookie_pool)

    def _request_with_retry(self, url, max_retries=3):
        """带自动重试的请求方法"""
        for attempt in range(max_retries):
            try:
                response = self.session.get(
                    url,
                    headers=self._get_dynamic_headers(),
                    cookies=self._rotate_cookie(),
                    proxies=self.proxies,
                    timeout=(3.05, 27),
                    allow_redirects=False
                )

                # 检测验证码页面
                if 'captcha' in response.url:
                    raise CaptchaTriggeredException()

                # 检测机器人验证
                if response.status_code in [403, 429, 503]:
                    self._handle_block(attempt)
                    continue

                return response

            except requests.exceptions.RequestException as e:
                print(f"请求异常: {str(e)}")
                if attempt == max_retries - 1:
                    raise
                time.sleep(2 ** attempt)

        return None

    def _handle_block(self, attempt):
        """处理封禁情况"""
        sleep_time = random.uniform(5, 15) * (attempt + 1)
        print(f"检测到封禁，等待{sleep_time:.1f}秒后重试")
        time.sleep(sleep_time)
        
        # 切换代理 
        
        # 清除旧cookie
        self.session.cookies.clear()

    def crawl_product(self, url):
        """爬取产品页面"""
        try:
            response = self._request_with_retry(url)
            if response and response.status_code == 200:
                return self.parse_product(response.text)
            return None
        except CaptchaTriggeredException:
            print("触发验证码，需要人工干预")
            return None

    def parse_product(self, html):
        """解析页面内容"""
        # 使用lxml替代BeautifulSoup提升解析速度
        from lxml import html as lxml_html
        tree = lxml_html.fromstring(html)
        
        # 使用XPath提取数据
        return {
            'title': tree.xpath('//span[@id="productTitle"]/text()')[0].strip(),
            'price': tree.xpath('//span[@class="a-price-whole"]/text()')[0],
            'rating': tree.xpath('//span[@class="a-icon-alt"]/text()')[0].split()[0],
            # 其他字段...
        }

class CaptchaTriggeredException(Exception):
    pass

# 使用示例
if __name__ == '__main__':
    crawler = AmazonCrawler()
    result = crawler.crawl_product('https://www.amazon.com/dp/B0CWDW9M5V')
    print(result) 