import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options

def crawl_data(baseurl):
    """爬取数据"""
    print("开始爬取......")
    datalist = set()  # 使用集合来存储唯一评论，避免重复

    # 启动浏览器
    options = Options()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    browser = webdriver.Chrome(options=options)
    
    try:
        browser.get(baseurl)
        time.sleep(2)  # 等待页面初次加载

        # 显示评论区
        showCommentSection(browser)

        # 获取总页数
        total_pages = getTotalPages(browser)
        print(f"总页数: {total_pages}")

        # # 限制爬取的页数为10页
        total_pages = min(total_pages, 5)

        # 循环爬取每一页的评论
        for page_num in range(1, total_pages + 1):
            print(f"正在爬取第 {page_num} 页......")
            page_comments = getComments(browser)  # 获取评论
            
            # 更新集合，自动去重
            datalist.update(page_comments)

            # 点击“下一页”按钮
            if page_num < total_pages:  # 如果不是最后一页，点击下一页
                try:
                    next_button = WebDriverWait(browser, 10).until(
                        EC.element_to_be_clickable((By.CLASS_NAME, "ux-pager_btn__next"))
                    )
                    next_button.click()
                    time.sleep(2)  # 等待新页面加载
                except Exception as e:
                    print("翻页失败:", e)
                    break
    
    finally:
        browser.quit()  # 确保浏览器关闭

    # 返回评论数据
    return datalist


def showCommentSection(browser):
    """点击'课程评价'标签，显示评论区"""
    try:
        # 等待并点击'课程评价'标签
        comment_tab = WebDriverWait(browser, 10).until(
            EC.element_to_be_clickable((By.ID, "j-course-info-tab-comment"))
        )
        comment_tab.click()
        time.sleep(2)  # 等待评论区内容加载
    except Exception as e:
        print("无法显示评论区，错误信息:", e)


def getTotalPages(browser):
    """获取总页数"""
    soup = BeautifulSoup(browser.page_source, "html.parser")
    try:
        page_numbers = [int(a.get_text()) for a in soup.find_all('a', class_="th-bk-main-gh") if a.get_text().isdigit()]
        total_pages = max(page_numbers) if page_numbers else 1
    except Exception as e:
        print("解析总页数时出错:", e)
        total_pages = 1
    return total_pages


def getComments(browser):
    """获取当前页面的评论内容"""
    comment_data = set()  # 用于存储评论内容，避免重复
    soup = BeautifulSoup(browser.page_source, "html.parser")
    
    # 遍历评论列表，每个评论项位于 <div class="ux-mooc-comment-course-comment_comment-list_item_body_content">
    comment_list = soup.find_all('div', class_="ux-mooc-comment-course-comment_comment-list_item_body_content")
    
    for item in comment_list:
        # 获取 <span> 标签中的文本内容
        comment = item.find('span').get_text(strip=True)  # 提取评论文本并去除多余的空白字符
        
        if comment:  # 如果评论不为空
            comment_data.add(comment)
            print(f"找到评论: {comment}")
    
    return comment_data
