from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import pickle
import time
import os

# -------------------------------
# 配置 Edge 浏览器
# -------------------------------
driver = webdriver.Edge()

# -------------------------------
# 打开小红书登录页面
# -------------------------------
url = "https://www.xiaohongshu.com/"
driver.get(url)

# 等待用户手动登录（扫码或输入账号密码）
print("请在浏览器中手动登录小红书...")
time.sleep(30)  # 给你30秒手动登录，可根据需要调整

# -------------------------------
# 登录后保存 Cookies
# -------------------------------
cookies_file = "xiaohongshu_cookies.pkl"
pickle.dump(driver.get_cookies(), open(cookies_file, "wb"))
print(f"Cookies 已保存到 {cookies_file}")

# -------------------------------
# 打开目标页面并加载 Cookies
# -------------------------------
ftxt=r"C:\pro\IT\Python\Py1\日语\temp\videos\Books"
# t1 = os.path.join(ftxt,"herfsetAIpics.txt")
t1 = os.path.join(ftxt,"文章.txt")

driver.get("https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72?tab=note")
# driver.get("https://www.xiaohongshu.com/user/profile/63ee7f11000000000f01357a?xsec_token=ABcLgb6sioPmznRVI621nDE1ZTQWR0HJZ0npKsUMCN5a4=&xsec_source=pc_note")
# driver.get("https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72?tab=note&exSource=&subTab=note")
print("过验证")
time.sleep(40)

if os.path.exists(cookies_file):
    cookies = pickle.load(open(cookies_file, "rb"))
    for cookie in cookies:
        # Selenium 添加 cookie 时，domain 不可带 https://
        if "sameSite" in cookie:
            del cookie["sameSite"]
        driver.add_cookie(cookie)
    driver.refresh()  # 刷新页面保持登录状态
    time.sleep(3)

# -------------------------------
# 定位搜索框并输入 “万”
# -------------------------------
# time.sleep(40)
# print()
# search_input = driver.find_element(By.ID, "search-input")
# search_input.clear()
# search_input.send_keys("万")
# # 如果想立即触发搜索，可以加回车
# # search_input.send_keys(Keys.ENTER)

# 找到 id=userPostedFeeds 的 div
# print("请浏览界面")
# time.sleep(10)
print("过验证成功，开始操作...")
start = time.time()
# 用集合存储已处理过的 section 索引，避免重复打印
no_new_count = 0  # 连续多少次没有新内容
max_no_new = 5    # 超过这个次数就认为结束
lst = []
sethref = list(dict.fromkeys(lst))
while True:
    # 向下滚动 300px
    driver.execute_script("window.scrollBy(0, 300);")
    time.sleep(1.5)  # 给页面加载时间

    container = driver.find_element(By.ID, "userPostedFeeds")
    sections = container.find_elements(By.TAG_NAME, "section")

    new_found = False  # 标记是否发现新链接

    for section in sections:
        try:
            first_div = section.find_elements(By.TAG_NAME, "div")[0]
            a_tags = first_div.find_elements(By.TAG_NAME, "a")
            if len(a_tags) >= 2:
                second_a = a_tags[1]
                href = second_a.get_attribute("href")
                if href and href not in sethref:
                    sethref.append(href)
                    new_found = True
        except Exception as e:
            print(f"section 出错：{e}")

    print(f"当前已收集 href 数量: {len(sethref)}")
    if(len(sethref)>40):
        print(sethref)
        break
    if new_found:
        no_new_count = 0  # 有新内容，清零
    else:
        no_new_count += 1
        if no_new_count >= max_no_new:
            print("连续多次没有新内容，结束。")
            break

end = time.time()
print(f"执行时间: {end - start:.2f} 秒,开始写入:")

with open(t1, "a", encoding="utf-8") as f:
    # f.write("评论\n")
    for s in sethref:
        f.write(s + "\n")
print("href写入完成")


time.sleep(40)



print("开始下载当前已有作品内容:")
# -------------------------------
# 观察效果
# -------------------------------
time.sleep(20000)

# -------------------------------
# 关闭浏览器
# -------------------------------
driver.quit()
