
import requests
from bs4 import BeautifulSoup

def amazon_spider():
    # 公共请求头
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'dpr': '1',
        'ect': '3g',
        'priority': 'u=0, i',
        'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
    }

    # 需要爬取的URL列表
    urls = [
        'https://www.amazon.com/Non-Skid-Decorative-Bookshelf-Apartment-Bookshelves/dp/B0CWDW9M5V',
        'https://www.amazon.com/SUNMORY-Bookshelf-Bookcase-Bookshelves-Organizer/dp/B0C88XTSMM',
        'https://www.amazon.com/HuggieGems-Magnetic-Organizer-Refrigerator-Organizers/dp/B0C3H9MJQV'
    ]

    # 循环处理每个URL
    for url in urls:
        # 动态cookie参数（需要定期更新）
        cookies = {
            'session-id': '132-8823095-4386051',
            'session-id-time': '2082787201l',
            'i18n-prefs': 'USD',
            'ubid-main': '132-0822393-6189325',
            'lc-main': 'en_US',
            'session-token': 'K3HM+iSlgLA/ti3FBVyXyXyscep1PxjMY5QTJKJuHsZ7cJp1y+95w8wWKpdcXtGeWfdQEsbWzdtcJj/aP6YCxGoXAPVQAavuejQpMOonaKmGLH1Ksiu0u4tl10nciguEqdjcNKRB1f8PBD4PVBnGqhQUDLo96tMDrF0Dfnryt6pj0XT/JFvwhlBn5Ph2eu6SWljrVUiTXAU3TIDxUfwXpHTFlTnXIWSes5Aso/UBmFJp+NSp3qDx+pUgDHHKa8EN7TNALK+saz3J/zIrVsCbNH08yaqBgbrkBGMfKqTM/LPKkL9X1lSOq4BqZXOAT92wNl/AJ6Ze4h9PY7vskvNzg+6ZIoWwLGoA',
            'csm-hit': 'tb:JZC137FCN8CANMXWGV37+s-H7G5QWVXYG5WDSAEDFSG|1742792147042&t:1742792147042&adb:adblk_no'
        }
    with requests.Session() as req:
        # 首次获取动态cookie
        init_response = req.get(
            'https://www.amazon.com/',
            headers=headers,
            timeout=10
        )   
        print(init_response.cookies)
        for url in urls:
            try:
                response = req.get(
                    url,
                    headers=headers.update({
                        'referer': 'https://www.amazon.com/',
                        'sec-fetch-site': 'same-origin'
                    }),
                    timeout=10
                )
                
                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                    # ... existing parsing logic ...
                else:
                    print(f"请求失败，状态码：{response.status_code}")

            except Exception as e:
                print(f"请求异常：{str(e)}")

 

if __name__ == '__main__':
    amazon_spider()