from DrissionPage import ChromiumPage, ChromiumOptions
import os
import re
import json
import time
import random
from DrissionPage.errors import ContextLostError

from chat_dify_blocking import get_text
import pyautogui
from clean_json_1 import process_json_1
from clean_json_2 import process_json_2


co = ChromiumOptions().set_paths(browser_path=r"C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe") # 设置浏览器路径
co.set_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.2365.52')  # 设置 User-Agent
page = ChromiumPage(co)


all_ids = []
all_goods = []
num = 808
# 读取D:\subtitle\goods\all_ids.txt
with open(r'D:\subtitle\goods\all_ids.txt', 'r') as f:
    for line in f:
        all_ids.append(line.strip())

for id in all_ids:
    # page.get('https://www.walmart.com/ip/Costway-4PCS-Outdoor-Patio-Rattan-Furniture-Set-Cushioned-Sofa-Coffee-Table-Garden-Deck/997207730?classType=VARIANT&adsRedirect=true')
    page.get(f'https://www.walmart.com/ip/{id}')

    try:
        goods_name = page.ele('x://h1[@id="main-title"]').text
    except:
        print("未找到商品名称，跳过此商品")
        continue
    # print(f'goods_name: {goods_name}')


    # 获取所有 li 元素
    for z in range(3):
        try:
            lis = page.eles('x://div[@class="expand-collapse-content dangerous-html w_YUC7"]//li')
            # lis = page.eles('x://div[@class="w_aoqv w_wRee w_fdPt"]')
            # print(f"lis：{lis}")
            if lis:
                break
        except ContextLostError:
            lis = None
            print("描述，等待重新加载...")
            time.sleep(1)

    if len(lis) == 0:
        print("未找到描述信息，跳过此商品")
        continue

    # 提取文本内容
    li_texts = [li.text for li in lis]
    texts_all = li_texts[:5] if len(li_texts) > 5 else li_texts
    texts_all = '. '.join(texts_all) + '.'

    # print(f'texts_all:{texts_all}')
    time.sleep(5)

    if texts_all:
        target_text = get_text('Use short sentences to summarize the selling points, target groups, and usage scenarios from this paragraph',texts_all)

        input = re.sub(r"\*\*Selling Points:\*\s*", "[Selling points]: ", target_text)
        input = re.sub(r"\*\*Target Groups:\*\s*", "[Target groups]: ", input)
        input = re.sub(r"\*\*Usage Scenarios:\*\s*", "[Usage scenarios]: ", input)
        input = re.sub(r"[\n-]+", " ", input).replace("\\n", "").replace("\\", "")

        out_put = get_text('Generate a sales script for this product targeting a US audience of up to 400 words [product]:',goods_name)
        if out_put == None:
            continue
        else:
            out_put = out_put.replace("\n", "").replace("\\n", "").replace("\\", "")

        goods_dict = {
            'instruction': f'Generate a sales script for [{goods_name}] targeting American audiences',
            'input': input,
            'output': out_put
        }
        all_goods.append(goods_dict)

        # **当数据量达到 10，保存并清理**
        if len(all_goods) >= 10:
            json_file_path = os.path.join(r"D:\subtitle\goods", f'walmart_goods_{num}.json')
            with open(json_file_path, 'w', encoding='utf-8') as f:
                json.dump(all_goods, f, ensure_ascii=False, indent=4)

            # **清洗数据**
            process_json_1(num)
            process_json_2(num)

            # **清空数据列表，并递增文件编号**
            all_goods.clear()
            num += 1

        page.wait(5)
        print(goods_dict)