import requests
from fake_useragent import UserAgent
import time
import random
import pytz
from datetime import datetime
from proxy_zly import getproxy_zly
lastproxy=getproxy_zly()
proxyusetimes=0
from excelhelper_xue import excelhelper_xue

class AmazonCrawler:
    def __init__(self):
        self.ua = UserAgent(browsers=['chrome'], os='windows')
        self.local_tz = pytz.timezone('America/Los_Angeles')
        self.session = requests.Session()
        self.session.verify = False  # 仅限测试环境使用
        
    def generate_headers(self):
        """生成符合亚马逊最新要求的请求头"""
        now = datetime.now(self.local_tz)
        return {
            'authority': 'www.amazon.com',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
            'accept-language': 'en-US,en;q=0.9',
            'cache-control': 'max-age=0',
            'device-memory': str(random.choice([4, 8, 16])),
            'downlink': str(round(random.uniform(5.0, 15.0), 1)),
            'dpr': str(round(random.uniform(1.0, 2.0), 1)),
            'ect': random.choice(['4g', '3g']),
            'rtt': str(random.randint(50, 300)),
            'sec-ch-ua': self._get_browser_fingerprint(),
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': self.ua.random,
            'x-requested-with': 'XMLHttpRequest',
            'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT')
        }

    def _get_browser_fingerprint(self):
        """生成最新Chrome浏览器指纹"""
        versions = [
            ('122', '24'),  # Chrome 122
            ('121', '23'),  # Chrome 121
            ('120', '22')   # Chrome 120
        ]
        version = random.choice(versions)
        return f'"Chromium";v="{version[0]}", "Not?A_Brand";v="{version[1]}", "Google Chrome";v="{version[0]}"'

    def request_page(self, url, max_retries=5):
        """执行带防御机制的请求"""
        for attempt in range(max_retries):
            try:
                # 随机请求前延迟
                global proxyusetimes,lastproxy
                time.sleep(random.uniform(0.5, 0.8))
                proxyusetimes=proxyusetimes+1
                if proxyusetimes>100:
                    lastproxy=getproxy_zly()
                    proxyusetimes=0
                response = self.session.get(
                    url,
                    headers=self.generate_headers(),
                    timeout=(8, 12),
                    allow_redirects=False,
                    verify=False 
                )

                # 状态码处理
                if response.status_code == 503:
                    raise Exception('触发服务不可用状态')
                if 'captcha' in response.url:
                    raise Exception('检测到验证码页面')
                if len(response.content) < 5000:
                    raise Exception('响应内容过短')

                return response

            except Exception as e:
                print(f"Attempt {attempt+1} failed: {str(e)}")
                if attempt == max_retries - 1:
                    return '-1'
                time.sleep(random.random()*0.5)  # 指数退避

if __name__ == '__main__':
    crawler = AmazonCrawler()
    sql='select * from app_product_xue where detail1 is null'
    results=excelhelper_xue.execute_sql(sql)
    for index,row in enumerate(results):
        try:
            print(row['link'])
            response = crawler.request_page(row['link'])        
            print(f"Success! Status code: {response.status_code}")
            print(f"Content length: {len(response.text)}")
        except Exception as e:
            print(f"Final failure: {str(e)}")