from DrissionPage import ChromiumPage, ChromiumOptions
from DrissionPage.errors import ContextLostError


co = ChromiumOptions().set_paths(browser_path=r"C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe") # 设置浏览器路径
co.set_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.2365.52')  # 设置 User-Agent
page = ChromiumPage(co)

# 拿到所有商品链接和商品名称
page.get('https://www.walmart.com/shop/deals/beauty-and-grooming?povid=GlobalNav_rWeb_Beauty_FeaturedShops_BeautyDeals_051123')


all_names = []

def get_name(all_names):
    links = page.eles('x://div[@class="mb0 ph0-xl pt0-xl bb b--near-white w-25 pb3-m ph1"]')
    # print(links)


    # 遍历链接，获取属性
    for link in links:

        try:
            name = link.ele('x://span[@class="w_iUH7"]')
            goods_name = name.text
            # 如果goods_name里包含 " 就跳过次商品
            if goods_name.find('"') != -1:
                continue
            url = link.ele('x://a[@class="w-100 h-100 z-1 hide-sibling-opacity  absolute"]').attr('href')

            text = f"""
            $
    "instruction": "Validate the product name and generate a TikTok video script based on FastMoss data analysis. Follow these steps: 1) Check if $$product_name&& is valid. If invalid, output '请输入一个有效的产品名称。' 2) Use {{product_id}} and {{product_url}} to retrieve FastMoss/Amazon data about features, reviews, USPs, and US market needs. 3) Create a script with a 3-second pop-culture hook (e.g., movie/TV references), pain points, USPs, timed script framework, bilingual script (EN/CN), localized idioms, and FTC-compliant claims. 4) Validate data across 3+ US platforms (Amazon/Walmart/Target) and document verification sources.",
    "input": $
      "product_name": "{goods_name}",
      "product_id": "$$product_id**",
      "product_url": "{url}",
      "requirements": $
        "seasonal_timeline": "Focus on events within 2 months (e.g., Labor Day, back-to-school)",
        "compliance_checks": [ "FTC广告规定", "无夸大节省金额", "可验证功效声明" ],
        "cultural_references": "美国影视经典开场（如《老友记》《复仇者联盟》等）+ 地域差异（如东西海岸需求）"
      *
    *,
    "output": ""
*
这是我的模型微调模板，instruction和input都已经固定好，现在需要你根据这两个内容帮我生成可以用于训练模型的output数据，请你保证你的输出可以直接用于output内容和用于模型微调。
"""
            # 把text 中的$ 替换成{   把& 替换成}
            text = text.replace('$', '{').replace('*', '}')

            all_names.append(text)

        except ContextLostError:
            print("未找到商品名...")
            continue

    return all_names

while True:
    all_names = get_name(all_names)
    page.wait(2)

    next_page = page.ele('x://i[@class="ld ld-ChevronRight pv1 primary"]')
    if next_page:
        next_page.click()
        print("点击下一页")
        page.wait(2)  # 增加等待时间，确保新页面加载完成

    else:
        print("没有下一页了")
        break

# 去除重复字典
unique_names = []
for item in all_names:
    if item not in unique_names:
        unique_names.append(item)

# 保存去重后的结果
with open(r'D:\subtitle\goods_name\all_names.txt', 'w', encoding='gbk', errors='ignore') as f:
    for id in unique_names:
        f.write(str(id) + '\n')
print("保存成功")