import json
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from selenium.common.exceptions import NoSuchElementException
import random
import time

# 设置 ChromeDriver 路径
chrome_driver_path = "/usr/local/bin/chromedriver"

MAX_ITEMS_BEFORE_WRITING = 20  # 每收集 20 条数据就写入一次文件


def write_to_file(collected_items):
    # 追加写入文件的逻辑
    with open('result.json', 'a', encoding='utf-8') as file:
        json_data = [json.dumps(item, ensure_ascii=False) for item in collected_items]
        file.write('\n'.join(json_data) + '\n')
    print("Results saved successfully.")


# 随机延时函数，用来模拟动作比较快的点击操作
def random_delay(time_start, time_end):
    delay = random.uniform(time_start, time_end)
    time.sleep(delay)


# 配置 Chrome 选项
options = Options()
options.add_argument("--disable-blink-features=AutomationControlled")
driver = webdriver.Chrome( options=options)
driver.maximize_window()


def scroll_and_collect(driver, num_items):
    collected_items = []
    collected_count = 0
    result_count = 0
    while result_count < num_items:
        # 获取了页面上的所有选项卡
        items = driver.find_elements(By.XPATH, "//section[@class='note-item']")
        # 遍历获取的列表，分析里面的元素
        for item in items:
            try:
                # 找到元素里封面、标题、作者昵称、作者头像等元素
                cover = item.find_element(By.XPATH, ".//div/a[@class='cover ld mask']/img").get_attribute("src")
                title = item.find_element(By.XPATH, ".//div[@class='footer']/a[@class='title']/span").text
                author_avatar = item.find_element(By.XPATH,
                                                  ".//div[@class='author-wrapper']/a[@class='author']/img").get_attribute(
                    "src")
                author_name = item.find_element(By.XPATH,
                                                ".//div[@class='author-wrapper']/a[@class='author']/span[@class='name']").text

                # 存储获取的结果
                collected_items.append({
                    "cover": cover,
                    "title": title,
                    "author_avatar": author_avatar,
                    "author_name": author_name
                })
                result_count += 1
                collected_count += 1
                # 写入文件
                if collected_count >= MAX_ITEMS_BEFORE_WRITING:
                    write_to_file(collected_items)
                    collected_items = []  # 清空已收集的项
                    collected_count = 0  # 重置计数器
            except NoSuchElementException:
                continue
        # 翻页
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        random_delay(4, 8)  # 等待页面加载新的内容

        # 等待新内容加载的逻辑
        try:
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.XPATH, "//section[@class='note-item']"))
            )
        except NoSuchElementException:
            break
        # 最后一次写入剩余的项
    if collected_count > 0:
        write_to_file(collected_items)
    return collected_items


try:
    # 打开小红书主页
    driver.get("https://www.xiaohongshu.com")
    random_delay(5, 10)  # 等待页面加载

    # 从文件加载 cookie
    with open("cookies.json", "r") as file:
        cookies = json.load(file)

    # 注入 cookie
    for cookie in cookies:
        driver.add_cookie(cookie)
    random_delay(2, 6)
    driver.refresh()  # 刷新页面以加载注入的 cookie
    random_delay(5, 10)

    # 显式等待，直到搜索框出现
    wait = WebDriverWait(driver, 10)
    search_box = wait.until(EC.presence_of_element_located((By.XPATH, "//input[@placeholder='搜索小红书']")))
    search_box.send_keys("原神")
    random_delay(2, 5)
    search_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@class='input-button']")))
    search_button.click()
    random_delay(2, 5)
    wait.until(EC.presence_of_element_located((By.XPATH, "//section[@class='note-item']")))

    # 获取前100个内容的封面、标题、作者头像和昵称
    num_items = 100
    scroll_and_collect(driver, num_items)

    # 延时几秒以便查看搜索结果
    time.sleep(60)
finally:
    driver.quit()