import json
import keyword
import random
import time

import pymysql
from selenium import webdriver
from selenium.common import NoSuchElementException
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

KEYWORD = 'iPad'


def random_delay(time_start, time_end):
    delay = random.uniform(time_start, time_end)
    print("等待时间：", delay)
    time.sleep(delay)


def fetch_taobao_goods(page=1):
    # 配置 Chrome 选项
    options = Options()
    options.add_argument("--disable-blink-features=AutomationControlled")
    driver = webdriver.Chrome(options=options)
    driver.maximize_window()
    driver.get("https://s.taobao.com/search?page=4&q=" + KEYWORD + "&tab=all")

    random_delay(5, 10)  # 等待页面加载
    # 从文件加载 cookie
    with open("taobao_cookies.json", "r") as file:
        cookies = json.load(file)
    # 注入 cookie
    for cookie in cookies:
        driver.add_cookie(cookie)
    random_delay(2, 6)
    driver.refresh()  # 刷新页面以加载注入的 cookie
    random_delay(2, 6)
    # 显式等待，直到搜索框出现
    wait = WebDriverWait(driver, 5)
    search_box = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#q")))
    goods = driver.find_elements(By.CSS_SELECTOR, 'div .tbpc-row .search-content-col')

    for div in goods:
        href = None
        title_element = None
        price_element = None
        real_sales = None
        shop_name = None
        location = None
        tag_a = div.find_element(By.TAG_NAME, 'a')
        try:
            href = tag_a.get_attribute('href')
        except Exception as e:
            print(e)
        try:
            title_element = div.find_element(By.CSS_SELECTOR, '.title--qJ7Xg_90 span').text
        except Exception as e:
            print(e)
        try:
            price_element = div.find_element(By.CSS_SELECTOR, 'span.priceInt--yqqZMJ5a').text
        except Exception as e:
            print(e)
        try:
            real_sales = div.find_element(By.CSS_SELECTOR, 'span.realSales--XZJiepmt').text
        except Exception:
            print("No realSales")
        try:
            shop_name = div.find_element(By.CSS_SELECTOR, 'span.shopNameText--DmtlsDKm').text
        except Exception as e:
            print(e)
        try:
            tmp = div.find_element(By.CSS_SELECTOR, '.procity--wlcT2xH9')
            if tmp is not None:
                location = tmp.text
        except Exception:
            print("No location")
        data = {
            "title": title_element,
            "href": href,
            "price": price_element,
            "real_sales": real_sales,
            "shop_name": shop_name,
            "location": location
        }
        # 格式化字典内容为字符串
        data_str = (
            f"Title: {data['title']}\n"
            f"Href: {data['href']}\n"
            f"Price: {data['price']}\n"
            f"Real Sales: {data['real_sales']}\n"
            f"Shop Name: {data['shop_name']}\n"
            f"Location: {data['location']}\n"
            f"-----------------------------\n"
        )
        # 指定文件路径
        file_path = '淘宝商品.txt'
        # 打开文件并追加内容
        with open(file_path, 'a', encoding='utf-8') as file:
            file.write(data_str)
        print("数据已追加到文件中")
    driver.quit()


if __name__ == '__main__':
    fetch_taobao_goods(1)
