import time
import random
import hashlib
import requests
import json
from bs4 import BeautifulSoup
import re
from walmartupc import get_upc
# from data5u_dynamic import get_proxyip
import traceback
from urllib.parse import urlparse

class WalmartScraper:
    def __init__(self):
        self.base_url = "https://www.walmart.ca/search"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
            "Accept": "text/html,application/xhtml+xml,application/json",
            "Accept-Language": "en-US,en;q=0.9",
            "Referer": "https://www.walmart.ca/",
            "authority": "www.walmart.ca"
        }
    def _generate_nonce(self):
        """从响应中提取nonce值"""
        chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
        return ''.join(random.choice(chars) for _ in range(16))

    def _generate_params(self, keyword, page):
        current_time = int(time.time() * 1000)
        t = str(current_time)
        v = str(current_time + random.randint(200000, 400000))
        session_id = hashlib.md5(str(random.random()).encode()).hexdigest()
        session_hash = hashlib.md5(session_id.encode()).hexdigest()[:24]
        
        return {
            "q": keyword,
            "page": str(page),
            "affinityOverride": "default",
            "t": t,
            "v": v,
            "H": session_hash,
            "s": "f00a1d7edcef633ab2f1e3e26b253040",
            "Q": "2",
            "S": str(random.randint(7000, 12000)),
            "N": str(random.randint(20, 50)),
            "z": "1"
        }

    
    def search_products(self, keyword, max_pages=5):
        all_products = []
        
        for page in range(1, max_pages + 1):
            try:
                params = self._generate_params(keyword, page)
                 
                response = requests.get(
                    self.base_url,
                    params=params,
                    headers=self.headers 
                )
                products = []
                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                                        
                    product_items = soup.find_all('div', class_=lambda x: x and 'sans-serif mid-gray relative flex flex-column w-100 hide-child-opacity' in x)

                    for item in product_items:
                        # 假设每个 item 是一个字典，并且店铺名称在 'store' 键中
                        # store_name = item.get('store', 'Unknown Store')  # 使用字典的 get 方法获取店铺名称
                        # print(f"Store Name: {store_name}")

                        # 提取商品名称
                        product_name_elem = item.find('span', {'data-automation-id': 'product-title'})
                        product_name = product_name_elem.text.strip() if product_name_elem else None

                        # 提取品牌
                        brand_elem = item.find('div', class_='mb1 mt2 b f6 black mr1 lh-copy')
                        brand = brand_elem.text.strip() if brand_elem else None

                        # 提取价格信息
                        price_div = item.find('div', {'data-automation-id': 'product-price'})
                        current_price_elem = price_div.find_all('span', class_='w_q67L')
                        if current_price_elem:
                            current_price = current_price_elem[0].text.replace('current price ', '').replace('Now ', '').strip()
                        else:
                            current_price = None

                        original_price_elem = price_div.find('div', class_='gray mr1 strike f7 f6-l')
                        original_price = original_price_elem.text.strip() if original_price_elem else None

                        # 检查是否有清仓活动
                        clearance_deal = bool(item.find('span', class_='w_SrYk w_2ioM w_O_Dq w_OAJ4 tag-leading-badge absolute'))

                        # 提取库存状态
                        inventory_status_elem = item.find('div', {'data-automation-id': 'inventory-status'})
                        inventory_status = inventory_status_elem.text.strip() if inventory_status_elem else '有货'

                        # 提取图片链接
                        image_elem = item.find('img', {'data-testid': 'productTileImage'})
                        image_url = image_elem.get('src') if image_elem else None

                        # 提取产品链接
                        link_elem = item.find('a', {'link-identifier': True})
                        product_link = link_elem.get('href') if link_elem else None
                        # product_info=self.get_product_info('https://www.walmart.ca'+product_link)

                        # 提取产品 SKU
                        sku = item.get('data-item-id')

                        # 提取评分
                        rating=''
                        rating_elem = item.find('span', class_='rating')
                        if rating_elem:
                            rating = rating_elem.text.strip() if rating_elem else None

                        # 提取评价数量
                        review_count=''
                        review_count_elem = item.find('span', class_='review-count')
                        if review_count_elem:
                            review_count = review_count_elem.text.strip() if review_count_elem else None
                        
                                                
                                                
                        # 提取评分
                        rating=''
                        rating_span = soup.find('span', {'data-testid': 'product-ratings'})
                        if rating_span:
                            rating = float(rating_span['data-value'])

                        # 提取评价数量
                        reviews=''
                        reviews_span = soup.find('span', {'data-testid': 'product-reviews'})
                        if reviews_span:
                            reviews = int(reviews_span['data-value'])
                            price_div = item.find('div', {'data-automation-id': 'product-price'})
                        current_price = None
                        original_price = None
                        
                        if price_div:
                            # 处理当前价格（优先使用data-automation-id）
                            current_price_elem = price_div.find('span', {'data-automation-id': 'product-price'})
                            if not current_price_elem:
                                current_price_elem = price_div.find('span', class_='w_q67L')
                            
                            if current_price_elem:
                                # 清理价格文本
                                price_text = current_price_elem.text.strip()
                                # 处理多种价格格式：CA$12.34 / $12.34 / 12,34 $ 
                                # current_price = float(re.sub(r'[^\d.]', '', price_text.split()[0]))
                                current_price=price_text.replace('current price $','').replace('$','').replace(',','').strip()
                            # 处理原价（带删除线价格）
                            original_price_elem = price_div.find('div', {'data-automation-id': 'was-price'}) or \
                                                price_div.find('div', class_='strike')
                            if original_price_elem:
                                try:
                                    original_price = float(re.sub(r'[^\d.]', '', original_price_elem.text.strip()))
                                except (ValueError, TypeError):
                                    original_price = None
                                    
                        # 处理价格范围（如 $10.00 - $15.00）
                        if current_price and '-' in price_text:
                            price_range = [float(re.sub(r'[^\d.]', '', p)) 
                                        for p in price_text.split('-')]
                            current_price = sum(price_range)/len(price_range) 
                        product_info = {
                            'title': product_name,
                            'brand': brand,
                            'original_price': original_price,
                            'current_price': current_price,
                            'clearance_deal': clearance_deal,
                            'inventory_status': inventory_status,
                            'image': image_url,
                            'link': product_link,
                            'SKU': sku,
                            'rating': rating,
                            'reviews': reviews,
                            'price': current_price,
                            'upc': ''
                        }
                        print(product_info)
                        # 将商品信息添加到列表中
                        products.append(product_info)
      
                    all_products.extend(products)
                    print(f"第{page}页: 获取到{len(products)}个产品")
                   
                else:
                    print(f"第{page}页请求失败: {response.status_code}")
                
                time.sleep(random.uniform(6, 8))
                
            except Exception as e:
                traceback.print_exc()
                print(f"处理第{page}页时出错: {str(e)}")
                continue
        return all_products
    def save_results(self, products, filename="walmart_products.json"):
        if products:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(products, f, ensure_ascii=False, indent=2)
            print(f"保存了{len(products)}个产品到{filename}")
        else:
            print("没有数据可保存")

    def _extract_product_id(self, url):
        """从产品URL提取ID的正则表达式方法"""
        # 匹配两种格式的URL：
        # 1. https://www.walmart.ca/en/ip/5RDQWVKR8E9U
        # 2. https://www.walmart.ca/en/ip/Dinosaur-Remote-Control.../5RDQWVKR8E9U
        match = re.search(r'/ip/(?:[^/]+/)?([A-Z0-9]{10,14})(?:[/?]|$)', url)
        if not match:
            raise ValueError(f"无效的Walmart产品链接: {url}")
        return match.group(1)

    def get_walmart_upc(self, product_url):
        """通过产品链接获取UPC（修复cookies未定义问题）"""
        # 定义必要的cookies
        cookies = {
            'DYN_USER_ID': '15c86b35-bc98-4a0f-933f-d01fccb2f7ef',
            'WM_SEC.AUTH_TOKEN': 'MTAyOTYyMDE4z7NeczN3OHYci9NyR9hsf2wncgzO2yDoRa71j9BMZtBTY7CrtBNTeJ82kMlzdsxIuI9cALgQOxAKh6Q5IaSBmjlsVS7Sp6cEBVZOZps8KPwHNz8VKeBnkYzjxAyV%2FAQAj8OFN4dileb20bpDLeCIlSFd%2FHsc7bnSe4%2BTLU2zbj2jBfhPfnjWPC0bf44%2BJqoR%2Fz153owF1bW0uFmzCVnOm60zOyVe5O3N8cHt%2F%2BJ0G66WRXS%2FVM7dhoO5fDBksK28pGd2w9VMvEOMsmi01MtA%2F%2B3%2BUrCYfDaYliuXSs21LmUpa1eIn%2Fi%2B7sEWWPQta0pemB9YZyXjC1SHLXitVuD7rUkQ6afgtMY6UUD71jr2TVkDB8%2BsM1swFHwp56JFH0qi2HWpwkdv%2F%2FB4GwJfdfdZRiCFU7YNy%2B%2B7Og%2FK1CpgYd8%3D'
        }
        
        # 在HTML请求中添加cookies参数
        try:
            html_response = requests.get(
                product_url,
                headers=self.headers,  # 使用类中定义的headers
                cookies=cookies,       # 添加cookies参数
                timeout=10
            )
            product_info = {'upc': None, 'price': None}  # 新增price字段
            soup = BeautifulSoup(html_response.text, 'html.parser')
            def extract_price(soup, response_text):
                """多维度价格提取"""
                # 方法1：从JSON-LD数据获取
                result = {'currency': 'CAD', 'price': None, 'upc': None}

                script_data = soup.find('script', type='application/ld+json')
                if script_data:
                    try:
                        product_data = json.loads(script_data.text)
                        if 'offers' in product_data:
                            offers = product_data['offers']
                            if isinstance(offers, list) and len(offers) > 0:
                                result['price'] = offers[0].get('price')
                                result['currency'] = offers[0].get('priceCurrency', 'CAD')
                            elif isinstance(offers, dict):
                                result['price'] = offers.get('price')
                                result['currency'] = offers.get('priceCurrency', 'CAD')

                            # 提取UPC（处理不同键名情况）
                            upc_keys = ['gtin13', 'upc', 'productID']
                            for key in upc_keys:
                                if value := product_data.get(key):
                                    result['upc'] = str(value).strip()
                                    break
                                    
                    except Exception as e:
                            print(f"JSON-LD解析异常: {str(e)}")
                if not result['price']:
                    price_match = re.search(r'"currentPrice"\s*:\s*{\s*"value"\s*:\s*(\d+\.\d+)', response_text)
                    if price_match:
                        result['price'] = price_match.group(1)

                if not result['upc']:
                    upc_match = re.search(r'"upc"\s*:\s*"(\d{12})"', response_text)
                    if upc_match:
                        result['upc'] = upc_match.group(1)

                return result
                # 方法2：从隐藏的meta标签获取
                
            def parse_json_data(response_text):
                """增强版JSON数据解析方法"""
                try:
                    # 使用更精确的正则表达式匹配
                    pattern = r'"specifications"\s*:\s*(\[.*?\])(?=,\s*["\']?[a-zA-Z]+["\']?:)'
                    match = re.search(pattern, response_text, re.DOTALL)
                    if not match:
                        return None
                    
                    specs_json = match.group(1)
                    
                    # 预处理JSON字符串
                    specs_json = specs_json.replace('\\"', '\\\\"')  # 保留转义字符
                    specs_json = specs_json.replace("'", '"')        # 统一引号
                    specs_json = re.sub(r',\s*}', '}', specs_json)    # 修复尾部逗号问题
                    
                    # 尝试解析JSON
                    try:
                        specs_data = json.loads(specs_json)
                    except json.JSONDecodeError as e:
                        # 尝试修复常见的格式错误
                        fixed_json = re.sub(r'(,)\s*}(?=\s*$)', r'}', specs_json)  # 修复尾部多余逗号
                        fixed_json = re.sub(r"(\w+)\s*:", r'"\1":', fixed_json)     # 给未加引号的key加引号
                        specs_data = json.loads(fixed_json)
                    
                    # 遍历规格条目
                    # print(specs_data)
                    for spec in specs_data:
                        if isinstance(spec, dict) and spec.get('name') == 'Universal Product Code (UPC check)':
                            value = str(spec.get('value', '')).strip()
                        if isinstance(spec, dict) and spec.get('name') == 'Brand':
                            brandvalue = str(spec.get('value', '')).strip()
                            # 验证UPC格式（通常为12位数字）
                           
                    return None
                    
                except json.JSONDecodeError as e:
                    error_context = specs_json[max(0,e.pos-30):e.pos+30]
                    print(f"JSON解析失败（位置{e.pos}）：...{error_context}...")
                    return None
                except Exception as e:
                    print(f"解析异常：{str(e)}")
                    return None

            # 调用时只需要传递response_text参数
            upc=''
            product_info = extract_price(soup, html_response.text)  # 新增价格解析
            product_info['upc'] = parse_json_data(html_response.text) or upc  # 保持原有UPC解析
            upc=product_info['upc']
            if upc=='' or upc==None:
                pattern = r'''
                    "name":\s*"      # 匹配字段名开始
                    [^"]*?           # 非贪婪匹配任意字符
                    (?:UPC|Universal\s+Product\s+Code)  # 匹配UPC关键词
                    [^"]*?           # 匹配可能的后缀
                    "\s*,\s*         # 结束字段名
                    "value":\s*      # 开始值部分
                    "                # 值引号
                    (\d{11,14})      # 捕获12-14位数字（标准UPC长度）
                    "                # 结束值
                '''
                data=html_response.text
                matches = re.findall(pattern, data, re.VERBOSE | re.IGNORECASE)
                print("找到的UPC码:", matches)
                if len(matches)>0:
                    upc=matches[0]
                    product_info['upc'] = upc
            # 方法1：通过UPC标签直接获取
            return product_info

        except Exception as e:
            print(f"解析异常: {str(e)}")
            traceback.print_exc()
            return None

if __name__ == "__main__":
    WalmartScraper_instance = WalmartScraper()
    all_products=WalmartScraper_instance.search_products('00682698728833',max_pages=1)
    # upc=WalmartScraper_instance.get_walmart_upc('https://www.walmart.ca/en/ip/Dinosaur-Remote-Control-Toys-Car-4-channel-Remote-Control-Car-For-Kids-Dinosaur-Toys-Car-Rc-Car-Birthday-Gifts-For-3-7-Years-Old-Kids/5RDQWVKR8E9U?from=/search')
    # print(upc)
    # time.sleep(3)
    # for ix in range(10):
    upc2=WalmartScraper_instance.get_walmart_upc('https://www.walmart.ca/en/ip/VOLTZ-TOYS-1-14-Scale-RC-Car-Licensed-Ferrari-LaFerrari-Aperta-Remote-Control-Toy-Car-Model-Kids-Adults-Doors-Lights-Drift-Official-Merchandise-Best/752UWH85DV6U?from=/search')
    print(upc2)
    #     time.sleep(3)
    # test_url = 'https://www.walmart.ca/account'
    # response = requests.get(test_url, cookies=cookies)
    # print(response.status_code)  # 200表示cookie有效
