import time
import csv
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By



def scrape_weibo_with_slow_scroll():
    """
    1. 使用 Edge 驱动打开微博登录页面，等待用户登录；
    2. 访问目标微博页后，通过小幅度滚动逐步加载页面；
    3. 每次滚动后采集当前可见的微博卡片（通过部分匹配选择器）；
    4. 利用已采集微博的“指纹”去重，实时保存数据到内存；
    5. 最后将所有微博数据写入 CSV 文件。
    """
    # === 1. 配置 Edge 驱动 ===
    path_to_chromedriver = r"C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe"
    service = Service(executable_path=path_to_chromedriver)
    driver = webdriver.Edge(service=service)

    try:
        # === 2. 打开微博登录页，等待登录 ===
        login_url = "https://weibo.com/login.php"
        driver.get(login_url)
        time.sleep(10)  # 等待页面加载及手动登录

        print("请在打开的 Edge 浏览器中完成微博登录，登录成功后回车继续...")
        input()  # 登录后回车继续

        # === 3. 访问目标页面 ===
        target_url = "https://weibo.com/u/1678429834?key_word=%E5%A1%8C%E9%99%B7&is_ori=1&is_forward=1&end_time=1744646400"
        driver.get(target_url)
        time.sleep(5)

        # === 4. 逐步滚动加载及采集数据 ===
        # 定义滚动参数：每次滚动200像素，每次等待1秒
        scroll_increment = 200
        pause_between_scroll = 1
        total_scroll = 0
        last_page_height = driver.execute_script("return document.body.scrollHeight")

        # 用集合记录已采集微博的“指纹”（这里采用 innerHTML 的一部分内容做指纹）
        seen_cards = set()
        weibo_data = []  # 存储采集结果，每项为 [发布时间, 微博正文, 转发数, 评论数, 点赞数]

        # 采用循环逐步滚动页面；滚动至页面底部后结束
        while total_scroll < last_page_height:
            # 小幅度滚动
            driver.execute_script("window.scrollTo(0, arguments[0]);", total_scroll)
            time.sleep(pause_between_scroll)
            total_scroll += scroll_increment

            # 若页面高度发生变化，则更新 last_page_height
            new_page_height = driver.execute_script("return document.body.scrollHeight")
            if new_page_height > last_page_height:
                last_page_height = new_page_height

            # 尝试采集当前可见的微博卡片
            # 使用部分匹配选择器，避免使用会变动的具体后缀
            cards = driver.find_elements(By.XPATH, "//article[contains(@class, 'Feed_wrap_')]")
            for card in cards:
                # 以卡片 innerHTML 作为简单指纹，避免重复采集相同数据
                card_html = card.get_attribute("innerHTML")
                if card_html in seen_cards:
                    continue
                seen_cards.add(card_html)

                # 初始化字段
                publish_time = ""
                weibo_text = ""
                reposts = "0"
                comments = "0"
                likes = "0"

                # (a) 发布时间，使用包含 "time_" 的a标签
                try:
                    time_elem = card.find_element(By.XPATH, ".//a[contains(@class, 'time_')]")
                    publish_time = time_elem.get_attribute("title") or time_elem.text
                except Exception as e:
                    print("【警告】发布时间解析失败：", e)

                # (b) 微博正文，查找包含 "detail_wbtext_" 的div
                try:
                    # 先尝试点击展开按钮
                    expand_button = card.find_element(By.XPATH, ".//span[contains(text(), '展开')]")
                    driver.execute_script("arguments[0].click();", expand_button)
                    time.sleep(0.3)  # 给一点时间加载完整内容
                except:
                    pass  # 没有展开按钮时跳过

                # 抓取正文
                try:
                    text_elem = card.find_element(By.XPATH, ".//div[contains(@class, 'detail_wbtext_')]")
                    weibo_text = text_elem.text.strip()
                except Exception as e:
                    print("【警告】微博正文解析失败：", e)

                # (c) 转发、评论、点赞，从 footer 内的工具栏提取
                try:
                    footer = card.find_element(By.TAG_NAME, "footer")
                    tool_items = footer.find_elements(By.XPATH, ".//*[contains(@class, 'toolbar_item_')]")
                    if len(tool_items) > 0:
                        reposts_text = tool_items[0].text.replace("转发", "").strip()
                        reposts = reposts_text if reposts_text else "0"
                    if len(tool_items) > 1:
                        comments_text = tool_items[1].text.replace("评论", "").strip()
                        comments = comments_text if comments_text else "0"
                    if len(tool_items) > 2:
                        try:
                            like_elem = tool_items[2].find_element(By.XPATH, ".//*[contains(@class, 'woo-like-count')]")
                            likes_text = like_elem.text.strip()
                            likes = likes_text if likes_text else "0"
                        except:
                            likes = "0"
                except Exception as e:
                    print("【警告】转评赞数据解析失败：", e)

                weibo_data.append([publish_time, weibo_text, reposts, comments, likes])

        print(f"累计捕获微博数：{len(weibo_data)}")

        # === 5. 将采集的数据写入 CSV 文件 ===
        with open("weibo_data.csv", "w", encoding="utf-8-sig", newline="") as f:
            writer = csv.writer(f)
            writer.writerow(["发布时间", "微博正文", "转发数", "评论数", "点赞数"])
            for row in weibo_data:
                writer.writerow(row)

        print("爬取完成，数据已写入 weibo_data.csv")

    finally:
        driver.quit()


if __name__ == "__main__":
    scrape_weibo_with_slow_scroll()
