import json
import os
import random
import time
import logging
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from sortedcontainers import SortedSet
from fake_useragent import UserAgent
from ip_list_author import get_proxy_pool
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type

# 配置日志
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

# 设置 Edge WebDriver 的路径
edge_driver_path = "C:/xiaoyh/edge_driver/msedgedriver.exe"

# 设置 Edge 浏览器的选项
options = Options()
options.add_argument('--headless')  # 无头模式
options.add_argument('--disable-gpu')  # 禁用GPU加速
options.add_argument('--no-sandbox')  # 解决权限问题
ua = UserAgent()
options.add_argument(f"user-agent={ua.random}")
proxy_pool = get_proxy_pool()

def get_random_proxy():
    """从代理池中随机选择一个代理"""
    return random.choice(proxy_pool)

def set_proxy(driver, proxy_ip):
    """设置代理"""
    logger.info(f"设置代理: {proxy_ip}")
    proxy_option = f'--proxy-server={proxy_ip}'
    options = webdriver.EdgeOptions()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--no-sandbox')
    options.add_argument(proxy_option)
    driver = webdriver.Edge(service=Service(edge_driver_path), options=options)
    return driver

def wait_for_elements(driver, by, value, timeout=10):
    """等待指定元素加载完成"""
    try:
        return WebDriverWait(driver, timeout).until(EC.presence_of_all_elements_located((by, value)))
    except Exception as e:
        logger.error(f"等待元素加载失败: {e}")
        return []

def wait_for_element(driver, by, value, timeout=10):
    """等待指定元素加载完成"""
    try:
        return WebDriverWait(driver, timeout).until(EC.presence_of_element_located((by, value)))
    except Exception as e:
        logger.error(f"等待元素加载失败: {e}")
        return None

def get_next_page_link(driver):
    """获取 '查看更多' 按钮的链接"""
    try:
        next_page = driver.find_element(By.CLASS_NAME, "amore")
        return next_page.get_attribute("href")
    except Exception as e:
        logger.warning(f"获取下一页链接失败: {e}")
        return None


# 输出目录
output_dir = "output/selenium/zuozhe"

def save_data(data_list, name):
    """将抓取的数据保存到 JSON 文件，按 index 排序"""
    if data_list:
        os.makedirs(output_dir, exist_ok=True)
        filename = os.path.join(output_dir, f"{name}.json")
        with open(filename, "w", encoding="utf-8") as file:
            json.dump(data_list, file, ensure_ascii=False, indent=4)
        logger.info(f"已保存 {name} 的数据，共 {len(data_list)} 条数据，文件: {filename}")
    else:
        logger.warning(f"{name} 没有可抓取的文章数据，跳过写入。")

# 重试装饰器：当出现异常时重试，最多重试3次，每次间隔3秒
@retry(stop=stop_after_attempt(3), wait=wait_fixed(3), retry=retry_if_exception_type(Exception))
def get_list(driver, url, target_count, name, data_list):
    try:
        logger.info(f"开始抓取: {url} - {name}")
        driver.get(url)
        index = len(data_list) + 1

        while len(data_list) < target_count:
            # 获取文章元素
            author_elements = wait_for_elements(driver, By.XPATH, "//div[@class='right']//div[@class='shici-pic']",
                                                timeout=10)
            logger.info(f"当前页面抓取到 {len(author_elements)} 位作者信息")

            for author in author_elements:
                if len(data_list) >= target_count:
                    break  # 达到目标数量，退出循环
                try:
                    author_box = author.find_element(By.CLASS_NAME, "shici-pic-box")
                    author_name = author_box.find_elements(By.TAG_NAME, "p")[0].text.strip()
                    # 获取作者描述（第二个 p 标签），排除 a 标签内容
                    author_desc_element = author_box.find_elements(By.TAG_NAME, "p")[1]
                    author_desc = author_desc_element.text.strip()

                    # 添加到列表中
                    data_list.append({"index": index, "name": author_name, "desc": author_desc})
                    index += 1
                except Exception as author_error:
                    logger.error(f"解析单篇作者信息失败: {author_error}")

            logger.info(f"已抓取 {len(data_list)} 条数据")

            if len(data_list) >= target_count:
                logger.info("已达到目标数量，停止抓取。")
                break

        # 每次抓取完一个作者的数据后立即保存
        save_data(data_list, name)
    except Exception as e:
        logger.error(f"发生错误: {e}")
        driver = set_proxy(driver, get_random_proxy())  # 重新设置代理
        raise e  # 重新抛出异常触发重试

# 读取 JSON 文件
file_path = "C:\\Users\\xiaoyh\\Desktop\\authors.json"
with open(file_path, "r", encoding="utf-8") as file:
    data = json.load(file)

# 启动 Microsoft Edge 浏览器
driver = webdriver.Edge(service=Service(edge_driver_path), options=options)
index = 1

# 遍历数据
for item in data:
    data_list = []  # 用于保存抓取的数据
    logger.info(f"第 {index} 作者: {item['name']}")
    file_path = f"{item['name'].lower()}.json"
    if any(f.lower() == file_path.lower() for f in os.listdir("output/selenium/zuozhe")):
        logger.info(f"已存在 {item['name']} 的数据，跳过。")
        index += 1
        continue
    time.sleep(random.randint(1, 2))  # 等待 1-3 秒，模拟人工操作
    try:
        get_list(driver, "https://www.gushici.net/chaxun/zuozhe/" + item['name'], 1, item['name'], data_list)
    except Exception as e:
        logger.error(f"抓取 {item['name']} 失败，重试三次后仍然失败。")
    index += 1

driver.quit()
logger.info("程序已退出")
