import os
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from time import sleep
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.service import Service

# 创建图片保存目录
os.makedirs("images", exist_ok=True)

# 启动浏览器
service = Service(executable_path='chromedriver.exe')
browser = webdriver.Chrome(service=service)
browser.maximize_window()
browser.get("https://www.bilibili.com/")
WebDriverWait(browser,20).until(EC.presence_of_element_located((By.XPATH,'路径')))
# 搜索“萌娘百科”
search = browser.find_element(By.XPATH, '/html[1]/body[1]/div[2]/div[2]/div[1]/div[1]/div[1]/div[1]/form[1]/div[1]/input[1]')
search.send_keys('萌娘百科')
browser.find_element(By.XPATH, '//*[@id="nav-searchform"]/div[2]').click()
sleep(5)
browser.implicitly_wait(10)
# 切换到搜索结果页
if len(browser.window_handles) > 1:
    browser.switch_to.window(browser.window_handles[-1])

# 标记第一页
is_first_page = True
output_file = open("video_titles_and_covers.md", "w", encoding="utf-8")
img_count = 1

# 翻页循环
for ii in range(10):
    # 缓慢下滑加载内容
    for i in range(20):
        browser.execute_script(f"window.scrollTo(0, {i * 300});")
        sleep(0.2)
    sleep(3)  # 等待内容加载完毕

    try:
        # 获取所有标题
        titles = [t.text.strip() for t in browser.find_elements(By.XPATH, '//h3') if t.text.strip()]
        print(f"抓到标题 {len(titles)} 个")

        # 获取无 class 属性的图片，跳过前4个
        all_imgs = browser.find_elements(By.XPATH, '//img[not(@class)]')
        valid_imgs = []
        for img in all_imgs:
            src = img.get_attribute('src')
            if src and src.startswith("http"):
                valid_imgs.append(src)

        # 跳过前4张无关图片
        valid_imgs = valid_imgs[4:]
        print(f"抓到可用封面 {len(valid_imgs)} 张（已跳过前4张）")

        for title, img_url in zip(titles, valid_imgs):
            local_img_path = f"images/第{img_count}张.jpg"
            try:
                # 下载图片
                resp = requests.get(img_url, timeout=10)
                with open(local_img_path, 'wb') as f:
                    f.write(resp.content)

                # 写入 markdown
                output_file.write(f"{title}\n\n")
                output_file.write(f"![封面]({local_img_path})\n\n")
                output_file.write("---\n\n")

                print(f"保存第{img_count}张：{title}")
                img_count += 1

            except Exception as e:
                print(f"下载失败：{img_url}，错误：{e}")

    except Exception as e:
        print("提取失败：", e)

    # 翻页逻辑
    try:
        wait = WebDriverWait(browser, 5)
        next_page = None

        if is_first_page:
            try:
                next_page = wait.until(EC.element_to_be_clickable(
                    (By.XPATH, '/html/body/div[3]/div/div[2]/div[2]/div/div/div/div[4]/div/div/button[10]')))
            except:
                print("第一页未找到下一页按钮，退出")
                break
            is_first_page = False
        else:
            possible_xpaths = [
                '/html/body/div[3]/div/div[2]/div[2]/div/div/div[2]/div/div/button[10]',
                '/html/body/div[3]/div/div[2]/div[2]/div/div/div[2]/div/div/button[9]',
            ]
            for xpath in possible_xpaths:
                try:
                    next_page = wait.until(EC.element_to_be_clickable((By.XPATH, xpath)))
                    break
                except:
                    continue

            if not next_page:
                print("未找到下一页按钮，可能已到最后一页")
                break

        print("点击下一页")
        next_page.click()
        sleep(3)

    except Exception as e:
        print("翻页异常：", e)
        break

# 收尾
output_file.close()
browser.quit()
