import json
import os
import random
import time

from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

from ip_list import get_random_ip

# 代理池和 WebDriver 配置
proxy_pool = []

# 设置 Edge WebDriver 的路径
edge_driver_path = "C:/xiaoyh/edge_driver/msedgedriver.exe"

def load_proxy_pool():
    global proxy_pool
    proxy_pool = [get_random_ip() for _ in range(10)]  # 假设代理池里有10个代理


def get_random_proxy():
    if len(proxy_pool) == 0:
        load_proxy_pool()
    return random.choice(proxy_pool)


def configure_driver_with_proxy(proxy_ip):
    options = Options()
    options.add_argument('--headless')  # 无头模式
    options.add_argument('--disable-gpu')  # 禁用GPU加速
    options.add_argument('--no-sandbox')  # 解决权限问题
    options.add_argument(f'--proxy-server={proxy_ip}')  # 设置代理
    ua = UserAgent()
    options.add_argument(f"user-agent={ua.random}")
    return webdriver.Edge(service=Service(edge_driver_path), options=options)


def restart_driver():
    proxy_ip = get_random_proxy()  # 获取新的代理
    print(f"正在切换到新的代理: {proxy_ip}")
    driver = configure_driver_with_proxy(proxy_ip)  # 创建新的 WebDriver 实例
    return driver


# 保存抓取进度到文件
def save_progress(progress_file, data):
    with open(progress_file, "w", encoding="utf-8") as file:
        json.dump(data, file, ensure_ascii=False, indent=4)

# 定义动态等待方法
def wait_for_elements(context, by, value, timeout=10):
    """等待指定元素加载完成"""
    return WebDriverWait(context, timeout).until(
        EC.presence_of_all_elements_located((by, value))
    )

# 读取抓取进度
def load_progress(progress_file):
    if os.path.exists(progress_file):
        with open(progress_file, "r", encoding="utf-8") as file:
            return json.load(file)
    return {}


# 获取文章列表并处理进度
def get_list(driver, url, target_count, name, progress_file):
    progress = load_progress(progress_file)  # 加载进度
    start_page = progress.get("last_page", 1)  # 从上次抓取的页面开始
    data_list = progress.get("data", [])

    try:
        # 如果抓取的数据已经达到了目标数量，直接返回
        if len(data_list) >= target_count:
            print(f"已达到目标数量 {target_count}，停止抓取。")
            return data_list

        for page_num in range(start_page, 1000):  # 这里可以设定最多抓取的页数
            print(f"正在抓取第 {page_num} 页")
            driver.get(url + f"&page={page_num}")
            time.sleep(random.randint(1, 3))  # 模拟人工操作

            # 获取当前页面文章
            article_elements = wait_for_elements(driver, By.XPATH, "//div[@class='left']//div[@class='gushici']")
            for article in article_elements:
                if len(data_list) >= target_count:
                    break
                try:
                    title_p = article.find_element(By.CLASS_NAME, "tit")
                    title_a = title_p.find_element(By.TAG_NAME, "a")
                    title = title_a.text.strip()

                    source_p = article.find_element(By.CLASS_NAME, "source")
                    sources = source_p.find_elements(By.TAG_NAME, "a")
                    dynasty = sources[0].text.strip()
                    author = sources[1].text.strip()

                    content_div = article.find_element(By.CLASS_NAME, "gushici-box-text")
                    content = content_div.text.strip()

                    tags_div = article.find_element(By.CLASS_NAME, "tag")
                    tags = tags_div.find_elements(By.TAG_NAME, "a")
                    tags_str = ",".join([tag.text.strip() for tag in tags])

                    if title:
                        data_list.append({
                            "title": title,
                            "dynasty": dynasty,
                            "author": author,
                            "content": content,
                            "tags": tags_str
                        })

                except Exception as e:
                    print(f"解析单篇文章失败: {e}")

            # 保存抓取的进度
            progress = {
                "last_page": page_num + 1,  # 更新当前页面
                "data": data_list
            }
            save_progress(progress_file, progress)

            if len(data_list) >= target_count:
                break

    except Exception as e:
        print(f"发生错误: {e}")
    finally:
        driver.quit()


# 启动程序
progress_file = "progress.json"  # 进度文件
load_proxy_pool()
driver = configure_driver_with_proxy(get_random_proxy())

url = "https://www.gushici.net/chaxun/all/"
target_count = 10000
name = "example_name"
get_list(driver, url, target_count, name, progress_file)
