from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pickle
import time
import os
import re
import json
import sys
import tempfile
import shutil


def write_error_log(message):
    with open("article_errorlist.txt", "a") as file:
        file.write(message + "\n")


def save_progress(progress):
    max_retries = 50
    retries = 0

    while retries < max_retries:
        try:
            with open("progress.txt", "w", encoding='utf-8') as f:
                json.dump(progress, f)
            break  # 如果成功保存，跳出循环
        except PermissionError as e:
            retries += 1
            print(f"进度存档时，遇到权限错误Permission denied，文件可能被占用或无写入权限: {e}")
            print(f"等待10s后重试，将会重试50次... (尝试 {retries}/{max_retries})")
            time.sleep(10)  # 等待10秒后重试
    else:
        print("进度存档时遇到权限错误，且已达到最大重试次数50次，退出程序")
        sys.exit(1)


def save_cookies(driver, cookies_file):
    with open(cookies_file, 'wb') as f:
        pickle.dump(driver.get_cookies(), f)


def load_cookies(driver, cookies_file):
    if os.path.exists(cookies_file):
        with open(cookies_file, 'rb') as f:
            cookies = pickle.load(f)
        for cookie in cookies:
            driver.add_cookie(cookie)
        return True
    print("cookie不存在")
    return False


def manual_login(driver, cookies_file):
    print("进行手动登录")
    input("请登录，登录成功跳转后，按回车键继续...")
    # 登录后保存cookie到本地
    save_cookies(driver, cookies_file)
    print("程序正在继续运行")


def check_page_status(driver):
    try:
        driver.execute_script('javascript:void(0);')
        return True
    except Exception as e:
        print(f"检测页面状态时出错，尝试刷新页面重新加载: {e}")
        driver.refresh()
        time.sleep(5)
        return False


def close_mini_player(driver):
    try:
        close_button = WebDriverWait(driver, 30).until(
            EC.presence_of_element_located((By.XPATH, '//div[@title="点击关闭迷你播放器"]'))
        )
        close_button.click()
    except Exception as e:
        print(
            f"[这不影响程序正常运行，可能悬浮小窗已被关闭（加这段只是因为悬浮小窗可能遮挡按钮，把浏览器拉宽可以避免按钮被遮挡）]未找到关闭按钮或无法关闭悬浮小窗: {e}")


def restart_browser(driver):
    driver.quit()
    shutil.rmtree(temp_dir)
    main()


def check_next_page_button(driver):
    next_buttons = driver.find_elements(By.CSS_SELECTOR, ".pagination-btn")
    for button in next_buttons:
        if "下一页" in button.text:
            return True
    return False


def main():
    global temp_dir
    # 代码文件所在的文件夹内创建一个新的文件夹，作为缓存目录。如果想自行设定目录，请修改下面代码
    current_folder = os.path.dirname(os.path.abspath(__file__))
    temp_dir = tempfile.mkdtemp(dir=current_folder)

    # 指定输出路径
    output_directory = "/bilibiliCreeper/output"
    # 若不存在则创建
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # 首次登录获取cookie文件
    cookies_file = 'cookies.pkl'
    print("测试cookies文件是否已获取。若无，请在弹出的窗口中登录b站账号，登录完成后，窗口将关闭；若有，窗口会立即关闭")
    # 设置驱动的service，通过ChromeDriverManager().install()来获取驱动，从而省去了下载驱动和设置驱动的步骤
    driver = webdriver.Chrome(service=Service(executable_path=ChromeDriverManager().install()))
    driver.get('https://space.bilibili.com/')
    if not load_cookies(driver, cookies_file):
        # 进行手动登录
        manual_login(driver, cookies_file)
    driver.quit()

    # 设置Chrome浏览器参数
    chrome_options = Options()
    # 将Chrome的缓存目录设置为刚刚创建的临时目录
    chrome_options.add_argument(f'--user-data-dir={temp_dir}')
    chrome_options.add_argument('--disable-plugins-discovery')
    chrome_options.add_argument('--mute-audio')
    # 开启无头模式，禁用视频、音频、图片加载，开启无痕模式，减少内存占用
    chrome_options.add_argument('--headless')  # 开启无头模式以节省内存占用，较低版本的浏览器可能不支持这一功能
    chrome_options.add_argument("--disable-plugins-discovery")
    chrome_options.add_argument("--mute-audio")
    chrome_options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
    chrome_options.add_argument("--incognito")
    # 禁用GPU加速，避免浏览器崩溃
    chrome_options.add_argument("--disable-gpu")
    driver = webdriver.Chrome(service=Service(executable_path=ChromeDriverManager().install()), options=chrome_options)
    driver.get('https://space.bilibili.com/')
    load_cookies(driver, cookies_file)

    if os.path.exists("progress.txt"):
        with open("progress.txt", "r", encoding='utf-8') as f:
            progress = json.load(f)
    else:
        progress = {"article_count": 0, "first_comment_index": 0, "sub_page": 0, "write_parent": 0}

    with open('article_list.txt', 'r') as f:
        article_urls = f.read().splitlines()

    # 计算需要跳过的视频数量
    skip_count = progress["article_count"]
    global mini_flag
    mini_flag = True

    for url in article_urls:
        try:
            # 如果需要跳过此链接，减少跳过计数并继续循环
            if skip_count > 0:
                skip_count -= 1
                continue
            # re库正则匹配
            article_id_search = re.search(r'https://www\.bilibili\.com/read/([^/?]+)', url)
            if article_id_search:
                article_id = article_id_search.group(1)
                print(
                    f'开始爬取第{progress["article_count"] + 1}个专栏{article_id}：请耐心等待')
            else:
                error_message = f'第{progress["article_count"] + 1}个专栏被跳过：无法从 URL {url}中提取 article_id'
                print(error_message)
                write_error_log(error_message)
                progress["article_count"] += 1
                continue

            driver.get(url)

            # 在爬取评论之前滚动到页面底部
            # scroll_to_bottom(driver)

            try:
                # selenium 下调用EC
                # presence_of_element_located:判断某个元素是否被加到了dom树里，并不代表该元素一定可见
                WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#article-content")))
            except TimeoutException:
                error_message = f'第{progress["article_count"] + 1}个专栏被跳过：ID {article_id} URL {url}没有找到专栏内容属性或等了30秒还没加载出来'
                print(error_message)
                write_error_log(error_message)
                progress["article_count"] += 1
                continue

            # 获取页面title
            title = driver.title
            soup = BeautifulSoup(driver.page_source, "html.parser")
            # 寻找 符合条件的tag
            # normal-article-holder read-article-holder
            article_content_item = soup.find("div", class_="article-content")
            # 测试输出
            print(article_content_item)
            print("===============打印长度")
            # 在div元素中查找子div 的text文本内容
            # 使用find_elements()方法来获取目标div中的所有div
            # all_divs = all_reply_items.find_elements(By.TAG_NAME, "read-article-holder")
            article_content_element = article_content_item.find("div",
                                                                class_="normal-article-holder read-article-holder")
            article_content = article_content_element.text if article_content_element is not None else ''
            # 格式化文本
            formatted_text = article_content.strip().replace('  ', ' ')
            # 格式化标题，防止输出报错
            # txt命名不能包含的字符需要替换 ”、“、”、“╲”、“/”、“*”、““”、“”“、“<”、“>”、“|”
            title = title.strip().replace('?', ' ').replace('、', ' ').replace('/', ' ').replace('\\', ' ').replace('*',
                                                                                                                   ' ').replace(
                '“', ' ').replace('”', ' ').replace('<', ' ').replace('>', ' ').replace('|', ' ')
            print(formatted_text)
            file_path = os.path.join(output_directory, title + ".txt")
            # 输出到txt文本
            with open(file_path, 'w', encoding='utf-8') as file:
                # 文本内容添加文章标题
                file.write(title + '\n\n' + formatted_text)

            progress["article_count"] += 1
            save_progress(progress)

        except WebDriverException as e:
            print(f"可能网页崩溃或网络连接中断，正在尝试重新启动浏览器: {e}")
            # 暂时注释restart_browser(driver)

        except Exception as e:
            print(f"[若这条报错反复发生，请终止程序并检查]发生其他未知异常，尝试重新启动浏览器: {e}")
            # 暂时注释restart_browser(driver)
    driver.quit()


if __name__ == "__main__":
    main()
