# crawler.py
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from urllib.parse import urljoin


def course_data(baseurl):
    """爬取数据"""
    print("开始爬取......")
    datalist = set()  # 使用集合来存储唯一链接，避免重复

    # 启动浏览器
    options = Options()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    browser = webdriver.Chrome(options=options)
    
    try:
        browser.get(baseurl)
        time.sleep(2)  # 等待页面初次加载

        # 获取总页数
        total_pages = getTotalPages(browser)
        print(f"总页数: {total_pages}")

        # 设置最大爬取页数为10页
        max_pages = min(total_pages, 5)

        # 循环爬取每一页，最多爬取10页
        for page_num in range(1, max_pages + 1):
            print(f"正在爬取第 {page_num} 页......")
            page_data = getData(browser, baseurl)
            
            # 更新集合，自动去重
            datalist.update(page_data)

            # 点击“下一页”按钮
            if page_num < max_pages:  # 如果不是最后一页，点击下一页
                try:
                    next_button = WebDriverWait(browser, 10).until(
                        EC.element_to_be_clickable((By.LINK_TEXT, "下一页"))
                    )
                    next_button.click()
                    time.sleep(2)  # 等待新页面加载
                except Exception as e:
                    print("翻页失败:", e)
                    break
    
    finally:
        browser.quit()  # 确保浏览器关闭

    # 返回爬取到的数据
    return datalist


def getTotalPages(browser):
    """获取总页数"""
    soup = BeautifulSoup(browser.page_source, "html.parser")
    try:
        page_numbers = [int(a.get_text()) for a in soup.find_all('a', class_="th-bk-main-gh") if a.get_text().isdigit()]
        total_pages = max(page_numbers) if page_numbers else 1
    except Exception as e:
        print("解析总页数时出错:", e)
        total_pages = 1
    return total_pages


def getData(browser, baseurl):
    """获取当前页面的课程链接"""
    datalist = set()
    soup = BeautifulSoup(browser.page_source, "html.parser")
    
    for item in soup.find_all('div', class_="g-mn2c m-cnt"):
        links = item.find_all('a', href=True)
        
        for link in links:
            website = urljoin(baseurl, link['href'])  # 标准化相对链接
            
            if 'f-fc9' in link.get('class', []) or website.startswith("https://www.icourse163.org/channel/"):
                continue
            
            datalist.add(website)
            print(f"找到课程链接: {website}")

    return datalist
