import requests
from lxml import etree
import time
from bs4 import BeautifulSoup
from lxml import html
from lxml import etree
from random import randint
import random
import re

# 给定的字符串
# cookie失效，从浏览器中获取
# input_str = '___rl__test__cookies=1732282054562; __uzma=44f16387-7e6e-4e9c-b855-1bbb1af3c12f; __uzmb=1732281950; __uzme=9335; AMP_MKTG_f93443b04c=JTdCJTdE; __ssds=2; __ssuzjsr2=a9be0cd8e; __uzmaj2=9eb0239c-d695-4ae0-979f-55f77a805fa1; __uzmbj2=1732281997; __uzmcj2=882031066368; __uzmdj2=1732281997; __uzmlj2=rZ+Mb7cI5ztRk+XTgJk+y4CKYY+nmL878pJpMdELWK0=; __uzmfj2=7f600069bdd05b-9edb-47f7-ada0-199b6251e7b717322819972780-84b83f464a29541710; AMP_f93443b04c=JTdCJTIyZGV2aWNlSWQlMjIlM0ElMjIzMjUzYTA1Mi0wOTJkLTQxNDEtODAzMi0wMTg1YTFkZjhlOWElMjIlMkMlMjJzZXNzaW9uSWQlMjIlM0ExNzMyMjgxOTU1NTE1JTJDJTIyb3B0T3V0JTIyJTNBZmFsc2UlMkMlMjJsYXN0RXZlbnRUaW1lJTIyJTNBMTczMjI4MjAwMTYzNSUyQyUyMmxhc3RFdmVudElkJTIyJTNBNSUyQyUyMnBhZ2VDb3VudGVyJTIyJTNBNCU3RA==; utag_main__sn=1; utag_main__se=1%3Bexp-session; utag_main__ss=1%3Bexp-session; utag_main__st=1732283803383%3Bexp-session; utag_main_ses_id=1732282003383%3Bexp-session; utag_main__pn=1%3Bexp-session; __gads=ID=108fa724bff92b24:T=1732282002:RT=1732282002:S=ALNI_MY8N3rstbJUoR2XHvxxl-ziOfA9Ew; __gpi=UID=00000f726ae6260e:T=1732282002:RT=1732282002:S=ALNI_MYb-u9jiMUZQS58QP6nlorWUgNusw; __eoi=ID=c492e2f444750121:T=1732282002:RT=1732282002:S=AA-Afja8qCfaKW1mG6oZpGyf0OBu; __deba=wtQ5_Ozclbh8qUugWzo9c_c9SEKcnAWG4Q-uyxwpJpsl3Rgzfd6wnjF4dRDW203c1jV2aI0Nti2tPkn-AJlBVEteYBxR1C1nWImyt8ZyS1uVBIDM83DkQKOYBHm3q-NW-21IU2tfzON5KIgx9sb_ZA==; _gcl_au=1.1.873822329.1732282005; _scid=SUBqT6Wm3ExfWeV91Wea2ENVJsoG4djK; _scid_r=SUBqT6Wm3ExfWeV91Wea2ENVJsoG4djK; __uzmc=841321665701; __uzmd=1732282006; __uzmf=7f600069bdd05b-9edb-47f7-ada0-199b6251e7b7173228195051756475-93f5b887f892bb2d16; cto_bundle=SwYCbl9pdVdYTUlDY1EzSlJQSURJWlVuSEVscGE5dklkcjhQQmZZbXNvTXBmbFpDTHQzUkNySU5hbktjJTJCamp6cU8lMkYxQkthekQ2cXVxUThPVE40S1NvdVIxWjRVemFFa2txOHl0OHZtakxDMUVyMUZaM1pxNnY2NkphWnMyZTZyV3M4eEJJQnpxVnlHYzRhazBBV3hBbXF6bTJSelZyVVU4bkhoVnh6M0wlMkZueXlETDB3UURkQThHeXlaODFKeXdDdUVUcUdweUVZMk1aOVB5bzFZaHkyclREQjNRJTNEJTNE; ak_bmsc=2AE1C93A38C6FD4BD33313C7298D5963~000000000000000000000000000000~YAAQEtgjF8yx8i+TAQAASuwNVBncQnlGBmoxDfkKncJDGA1appOtcalcklt7pvFESRb3doYBXi7FApOVxweKr0Bsyy98GK1+uNmGgnMdfhoDSp2usoy3KUEKCCdnKsLz8bkEKlovwLohk2aPXO6EnxOj1TnZC6lSTW6xlHQEaDiSnfhuSEn6cH5qMbwjg44dD2nn3NyRBQuZDVyQ/be8VRat2CW0JWl2zBfH6Hywb1XLIcr3UCYi4a4cvJ/tMmXF1nkI1DuuHE/KhF24LwbVPIsEHoHh3S2NBsf9U6qHLkhpl53hfHAaH/GuZp/Jnj7PbKuZxq9bewCLfCMVj40ojz//efXDL4gKu6dbE8igfWFSlg0cFtusQaix0tBzNgH9mO5OoW8tc4ZVnOJry7JWFIj2NGAir7GaCHs=; s=CgAD4ACBnQdg0NTQwY2Q0YTAxOTMwYWIxMmJhODYxYWNhZmZmZmIxMTlVM/na; bm_sv=27A555348A097DBABCCD37DE952B3580~YAAQLfAgFxPXwi6TAQAA0TIOVBlFh5ugCENlCdEyg0UOalmWymdjc5TmXWS366hvbYSgnG0JrHyLQ5kRWHKsDIBQ00RXwQuWFOS14vZIZM8P8lV0o/1dRg5quHli22lXVqqfPhbm9DtX7NtN+9ef2edGlwc5tNvHYz+GSv1wgynhmJcW3g6ndy3OwChUs+drpiXAYEVV16/Xdmzs0MQj2WQ9Doyh/IXaOJS0pnbblfdpMatX+lnySys05g8mf4o=~1; dp1=bu1p/QEBfX0BAX19AQA**6b02edb9^pbf/%23e000a000000000000000006921ba39^bl/JPen-US6b02edb9^; ns1=BAQAAAZJU03gwAAaAANgAU2khujljNjl8NjAxXjE3MzIyODE5OTY0NzBeXjFeM3wyfDV8NHw3fDEwfDQyfDQzfDExXl5eNF4zXjEyXjEyXjJeMV4xXjBeMV4wXjFeNjQ0MjQ1OTA3NaTjgdCvrqd/wdeONwre6m3Acy4w; nonsession=BAQAAAZJU03gwAAaAADMACGkhujk1NjItMDAwMQDKACBrAu25NTQwY2Q0YTAxOTMwYWIxMmJhODYxYWNhZmZmZmIxMTkAywACZ0CNwTI0hgbHYpo9V27Q2V0EaT1ewXZOCH4*; OUTFOX_SEARCH_USER_ID_NCOO=355185825.82951874; ds2=sotr/b8_5az10JNfz^; ebay=%5Ejs%3D1%5Esbf%3D%23000000%5Epsi%3DADZSdCLA*%5E'
input_str='s=CgAD4ACBnSnOSNzVhZDFhMTIxOTMwYTI0NDY3YjY0Y2VhZmZmZWNlZDTJweci; ak_bmsc=10F35584F1C9191E5F9FDC4C3E7C90F9~000000000000000000000000000000~YAAQz7khF2ZP6DCTAQAAlRqtdRn1KbbOMQ8mht5sDg3N1x0p/NleJAeIa+2uveTSryzRs357/5Kgv/rLr8YZz7JuDPySgyjbCFM2k1aYYyJELPe6MC6npUlz1GFaIy4T7kohKQ41V9ziuZcCtU32EwzBkE4qNjxDTJZM1ZfU80/yg7dy7vQMtdiVhQGZx1fxjPHc5RDdWAE5/IIH39kGayPpZpeZpJv21LsJd4j4cdGyVISNd73D5dMYqMfx9QdWzuqsVIC1auT4qBpcPXtiBOIqPbpJ9RME8ZG1TUk/spgqDc6MGRG8phE9x1uc5HTZXQDAr9Q/unU7JFRrB88CwUp6NRLK/BUMPURaReAqBGTjdZ8Vv/okE5wcJ0xqcdnQsl5evyMGk4E=; __uzma=777face8-807c-4a18-aba1-2e568015d052; __uzmb=1732846101; __uzme=6809; __gads=ID=ad1cb6635a2b5a55:T=1732846102:RT=1732846102:S=ALNI_Maotj73Rnj35cgp50Qh_CBE0b9yPQ; __gpi=UID=00000fa22c83092d:T=1732846102:RT=1732846102:S=ALNI_MZ_Pnz6chs4AbYZEroMkhLKDCNSzA; __eoi=ID=3bbb54b675e2297c:T=1732846102:RT=1732846102:S=AA-AfjbNq91xQeAoB2jwvIMSygMm; AMP_MKTG_f93443b04c=JTdCJTdE; __uzmc=383281324462; __uzmd=1732846106; __uzmf=7f6000bb5cd6e1-05bd-4c3b-8148-7e476ab6a2d917328461015435313-bb78352adf72c04d13; __ssds=2; __ssuzjsr2=a9be0cd8e; __uzmaj2=b7ab5839-58b4-4859-8622-bf94dd7c39d7; __uzmbj2=1732846108; __uzmcj2=832261048067; __uzmdj2=1732846108; __uzmlj2=8LoqUPdqH41ef9bIaKGPzA3jZBQel49hm3b30yXEllw=; __uzmfj2=7f6000bb5cd6e1-05bd-4c3b-8148-7e476ab6a2d917328461084900-beda33f943ffc01a10; bm_sv=15589B438B5B15AC9A4944D39A880DF8~YAAQz7khF85Y6DCTAQAA+letdRm2k1tGSIsM1Z1t5LmlebdZcAX7D6tlHMhCkHG9/D4DCiwHgRR2v165aiOEyRe7WoC2psCAjVo1rS8ezhZWrKGsf93WHK/2mp1RRYTtXC+Khm7R+DqXlYihKnsR1TPknnRsFplw/reFpexc5C6qN1ndyrDYV8j857LOoZXEPes3ccEDfFUq69gtNZ++mnaK2kaP6IR1XzZzP/nLQGnAqTF07+QsEQVAkUfk4w==~1; __deba=WMzBcqGxe3PPgiEQ7Pz2ONCo3iH_w2ezyieYD8NxQgWhdrAmGext7KBpK9J_kAm5XR8UW1J2_XMwSXzXGqPFGEH_kYJni4B5ve61UR6rXjQxxwBSefoGnHjzBbte9m2Bg3-fhuU5imUz1fZJM5Gpbg==; utag_main__sn=1; utag_main__se=1%3Bexp-session; utag_main__ss=1%3Bexp-session; utag_main__st=1732847915204%3Bexp-session; utag_main_ses_id=1732846115204%3Bexp-session; utag_main__pn=1%3Bexp-session; _scid=u6ZLKRtccxkVHkHmZJ-qtLS7B6SdNyN4; _scid_r=u6ZLKRtccxkVHkHmZJ-qtLS7B6SdNyN4; _gcl_au=1.1.1739503463.1732846120; cto_bundle=JG-0p19UTE0lMkZkaXhnY1FLY0xyZyUyRjRQU3BsblNzVGxLV3J1eHJrRHB4SVJTam1rWG8yMHFpSlRUYVVBSzN4MzA0JTJCTUlVUHZWZ3daRDFyQnFtNnpsRTUwZklYTUZZcXRTNzJVWWVINDh6JTJCdSUyQmtQVnFWSVA2akJqSmg2JTJCS28xRTh2VzclMkJPRjZxbFJJSkElMkZISDZrc3VTVTNSWXV3JTNEJTNE; _fbp=fb.1.1732846121152.119953238407666199; _ScCbts=%5B%5D; _pin_unauth=dWlkPU1qZ3lPREZrWTJRdE5qazJaQzAwTlRnd0xXSmxNV1l0WTJNM09XTTNNalpoTXpFMQ; ns1=BAQAAAZJU03gwAAaAANgAU2kqVapjNjl8NjAxXjE3MzI4NDYxMDY5MTleXjFeM3wyfDV8NHw3fDEwfDQyfDQzfDExXl5eNF4zXjEyXjEyXjJeMV4xXjBeMV4wXjFeNjQ0MjQ1OTA3NQPVgwyueMjD2Yt80/stttei22/a; dp1=bpbf/%23e000e00000000000000000692a55aa^bl/US6b0b892a^; nonsession=BAQAAAZJU03gwAAaAADMABWkqVao5MTcwMQDKACBrC4kqNzVhZDFhMTIxOTMwYTI0NDY3YjY0Y2VhZmZmZWNlZDQAywACZ0kpMjEyOdoCN12wgUr9yEmhAMshSIZdKIQ*; AMP_f93443b04c=JTdCJTIyZGV2aWNlSWQlMjIlM0ElMjI3YWY5MjA3My0yN2ZhLTRiZmYtOGU2Yi04OTE0NjVhNGM4NDclMjIlMkMlMjJzZXNzaW9uSWQlMjIlM0ExNzMyODQ2MTAzNzE4JTJDJTIyb3B0T3V0JTIyJTNBZmFsc2UlMkMlMjJsYXN0RXZlbnRUaW1lJTIyJTNBMTczMjg0NjE0ODI5MCUyQyUyMmxhc3RFdmVudElkJTIyJTNBNCUyQyUyMnBhZ2VDb3VudGVyJTIyJTNBMyU3RA==; ds2=sotr/b8_5azzzzzzz^; ebay=%5Ejs%3D1%5Esbf%3D%23000000%5Epsi%3DArTkWcT8*%5E'
def parse_cookies(input_str):
    # 将字符串分割为键值对
    pairs = input_str.split(';')
    # 创建一个空字典来存储键值对
    result_dict = {}
    # 遍历每个键值对，并将其添加到字典中
    for pair in pairs:
        if '=' in pair:
            key, value = pair.split('=', 1)
            result_dict[key] = value
    return result_dict


cookies = parse_cookies(input_str)

headers = {
    'accept': '*/*',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'bx-v': '2.5.22',
    'content-type': 'application/json;charset=UTF-8',
    'origin': 'https://www.ebay.com',
    'priority': 'u=1, i',
    'referer': 'https://www.ebay.com/sch/i.html?_from=R40&_trksid=p4432023.m570.l1313&_nkw=cat&_sacat=0',
    'sec-ch-ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': 'cors',
    'sec-fetch-site': 'same-origin',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
}
def set_shipping_country( country_code="USA",cookie_str=None):
        """设置配送国家"""
        if cookie_str is not None:
            cookies = parse_cookies(cookie_str)
        else:
            cookies = parse_cookies(input_str)
        session = requests.Session()
        base_url = "https://www.ebay.com"
         
        api_endpoint = "/gh/setuserpreference"
        payload = {
            "operation": "setShipToLocation",
            "countryCode": country_code,
            "countryName": "United States",
            "moduleContext": {
                "moduleId": 45019,
                "instanceId": 1,
                "uxComponentGroup": "SHIP_TO_LOCATION"
            }
        }

        try:
            response = requests.post(
                base_url + api_endpoint,
                headers=headers,
                json=payload,
                timeout=10,
                cookies=cookies
            )
            if response.status_code == 200:
                print(f"✅ 配送地址已设置为美国")
            else:
                print(f"❌ 设置失败: HTTP {response.status_code}")
        except Exception as e:
            print(f"网络请求失败: {str(e)}")

def get_ebay_detail(item_url, cookie_str=None):
    item = {}
    if cookie_str is not None:
        cookies = parse_cookies(cookie_str)
    else:
        cookies = parse_cookies(input_str)

    item = {}
    try:
        response = requests.get(item_url, headers=headers, cookies=cookies)
        tree = html.fromstring(response.text)
        # images
        s = response.text
        target_link=''
        start_index = s.find('https://www.ebay.com/str/')
        if start_index != -1:
            # 通过切片获取完整的链接字符串
            target_link = s[start_index:start_index +
                            len('https://www.ebay.com/str/') + 200]
            print(target_link)
            # 找到?_trksid的位置
            end_index = target_link.find('?_trksid', 0)
            if end_index != -1:
                # 通过切片获取去掉?_trksid及其之后内容的链接字符串
                target_link = target_link[0:end_index]
                print(target_link)
        item['storename'] =target_link

        ele = tree.xpath(
            '//div[@class="ux-image-carousel-container image-container"]')[0]
        eles = ele.xpath('//img/@data-zoom-src')
        item['images'] = list(set(eles))
        ele = tree.xpath(
            '//h1[@class="x-item-title__mainTitle"]/span/text()')[0]
        item['title'] = ele.strip()
        ele = tree.xpath('//div[@class="x-price-primary"]/span/text()')[0]
        item['price'] = ele.strip()
        ele = ' '.join([
            item.strip() for item in tree.xpath(
                '//div[@class="x-quantity__availability"]/span/text()')
        ])
        item['stock'] = ele.strip()
        pattern = r'\d+'
        match = re.search(pattern, item_url)
        item['productId'] = ''
        if match:
            item['productId'] = f'{match.group()}'.strip()
        sku_eles = tree.xpath(
            '//span[@class="listbox-button mar-t-16 listbox-button--fluid"]')
        skus = []
        for sku in sku_eles:
            sku_type = sku.xpath(
                './/span[@class="btn__label"]/text()')[0].replace(':', '')
            sku_vals = [
                item.strip()
                for item in sku.xpath('//span[@class="listbox__value"]/text()')
                if item.strip() != 'Select'
            ]
            skus.append([sku_type, sku_vals])
        item['skus'] = skus
    except Exception as e:
        print('crawl', item_url, 'failed', e)
    return item


def get_ebay_data(keyword, max_page=1, cookie_str=None):
    items = []
    if cookie_str is not None:
        cookies = parse_cookies(cookie_str)
    else:
        cookies = parse_cookies(input_str)

    for page in range(1, max_page + 1):
        try:
            print(page, keyword)
            url = f'https://www.ebay.com/sch/i.html?_from=R40&_nkw={keyword}&_sacat=0&_pgn={page}&_ipg=240&shipToCountry=USA'
            response = requests.get(url, headers=headers, cookies=cookies)
            tree = html.fromstring(response.text)
            soup = BeautifulSoup(response.text, 'html.parser')

            seller_spans = soup.find_all('span', class_='s-item__seller-info-text')
            sellers = []
            for span in seller_spans:
                if span.text:
                    # 提取卖家名称 (在括号前的部分)
                    seller_name = span.text.split('(')[0].strip()   
                    sellers.append(seller_name)
            # print(sellers)
            pattern = r'https://www\.ebay\.com/itm/\d+'
            links = re.findall(pattern, response.text)
            links=list(set(links))
            # soup = BeautifulSoup(response.content, 'html.parser')
            # a_tag = soup.find_all(
            #     'a', href="https://www.ebay.com/")  # 查找href属性为指定链接的a标签

            # li_elements = soup.find_all('li', attrs={'data-viewport': True})
            # # 查找所有<a>标签
            # links = soup.find_all('a', href=True)
            # eles = tree.xpath('//div[@class="s-item__wrapper clearfix"]')

            print(page, keyword,'查询记录数量:',len(links))
            li_elements = soup.find_all('li', attrs={'data-viewport': True})
            for li in li_elements:
                try:
                    item = {}
                    
                    # 获取商品链接和ID
                    link_elem = li.find('a', class_='s-item__link')
                    if link_elem:
                        item['url'] = link_elem.get('href')
                        # 从URL中提取商品ID
                        item_id_match = re.search(r'/itm/(\d+)', item['url'])
                        if item_id_match:
                            item['productId'] = item_id_match.group(1)
                    
                    # 获取商品图片
                    img_elem = li.find('img')
                    if img_elem:
                        item['image_url'] = img_elem.get('src')
                        item['title'] = img_elem.get('alt', '')
                    
                    # 获取商品标题
                    title_elem = li.find('span', role='heading')
                    if title_elem:
                        item['title'] = title_elem.text.strip()
                    
                    # 获取商品状态（新品/二手）
                    condition_elem = li.find('span', class_='SECONDARY_INFO')
                    if condition_elem:
                        item['condition'] = condition_elem.text.strip()
                    
                    # 获取价格
                    price_elem = li.find('span', class_='s-item__price')
                    if price_elem:
                        item['price'] = price_elem.text.strip()
                    
                    # 获取运费
                    shipping_elem = li.find('span', class_='s-item__shipping')
                    if shipping_elem:
                        item['shipping'] = shipping_elem.text.strip()
                    
                    # 获取卖家信息
                    seller_elem = li.find('span', class_='s-item__seller-info-text')
                    if seller_elem:
                        item['seller'] = seller_elem.text.strip()
                    
                    # 获取商品位置
                    location_elem = li.find('span', class_='s-item__location')
                    if location_elem:
                        item['location'] = location_elem.text.replace('发货地：', '').strip()
                    
                    # 获取折扣信息
                    discount_elem = li.find('span', class_='s-item__discount')
                    if discount_elem:
                        item['discount'] = discount_elem.text.strip()
                    
                    # 检查是否支持议价
                    best_offer_elem = li.find('span', class_='s-item__dynamic')
                    if best_offer_elem and '议价' in best_offer_elem.text:
                        item['best_offer'] = True
                    
                    items.append(item)
                    
                except Exception as e:
                    print(f"Error parsing item: {e}")
                    continue
            intx = 0
            # for item_url in links:
            #     intx += 1
            #     # if (intx > 30):
            #     #     break
            #     print(f'ebay.com->start crawl:->{intx}/{max_page}:', item_url)
            #     item = get_ebay_detail(item_url, cookie_str)
            #     # item['keywords']=keyword
            #     if len(item) > 0:
            #         items.append(item)
            #     time.sleep(random.random() * 2)
        
        except Exception as e:
            print(e)
        if page==1 and len(links)<50:
                    # 第一页读取的数据少于60个，则不读取第二页了。
                    break
    return items

set_shipping_country(cookie_str=input_str)
items = get_ebay_data('women shoes', max_page=1)
for index, item in enumerate(items):
    if index % 10 == 0:
        print(index)
    print(f'第{index+1}个商品:{item}')
time.sleep(random.random()*2)
# # items = get_ebay_data('women shoes', max_page=1)
# # for item in items:
# #     print(item)

# # print('item size:', len(items))

# # url = 'https://ebay.com/itm/123456?itmmeta=012DEW30YG0MEEKND7NH&hash=item123546:g:acwAA9KNiJowH:sc:ShippingMethodStandard!95008!US!-1&itmprp=enc%3AbgepL1tlUHjMGCVfSTGJh%2BzsVKeJ3CQk7NizDI4BZeppuFnmyS6Ijyp8lh%2FnEw%2BWqO7uTV1Q6izE1R0T54aV8j71F4xlWfVcGft4%2FiOQhtqVXA1rW6M1atPARQRmhqUxtEPJKhKtSFgI%2Bvwlzb0GwVCtkp%3ABlBMUObkmabpYw'

# # response = requests.get(url, headers=headers, cookies=cookies)

# # with open('ebay.html', 'w') as f:
# #     f.write(response.text)
