import random
import os, csv, re, time, math

from datetime import datetime, timedelta
from selenium.webdriver.chrome.service import Service
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from contextlib import contextmanager
from selenium.webdriver.support.ui import WebDriverWait as W
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By




# ---------------- 配置区 ----------------
OUT_PATH = r"C:\Users\Administrator\Desktop\xueqiu_news_20241001_20251001.csv"
START = datetime(2024, 10, 1, 0, 0, 0)     # 2024-10-01 00:00:00 起
END   = datetime(2025, 10, 1, 0, 0,0)
MAX_SCROLL = 120          # 最多滚动次数，防止无限爬
PAUSE = 0.8               # 每次滚动后的等待秒数
WAIT_SEC = 10             # 显式等待秒数
BASE_URL = "https://www.xueqiu.com"

USERNAME = "16684054261"
PASSWORD = "981124zyj."
# ----------------------------------------



# ----------工具方法区，修改逻辑不动这里--------

# 初始化浏览器对象
def init_driver():

    driver_path = r'C:\Users\Administrator\.wdm\drivers\chromedriver\win64\140.0.7339.207\chromedriver-win32\chromedriver.exe'

    # 创建Chrome选项
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    # chrome_options.add_argument('--headless')  # 无界面模式

    # 使用Service类指定驱动路径（推荐方式）
    service = Service(executable_path=driver_path)
    return webdriver.Chrome(service=service, options=chrome_options)

# 登陆
def login_xueqiu(username, password,driver):
    time.sleep(random.uniform(1.5, 2.5))

    # 1.打开网页
    driver.get(BASE_URL)

    # 2.定位【账号密码登陆】
    try:
        acc_login_btn = driver.find_element(By.XPATH, "//a[contains(text(),'账号密码登录')]")
        acc_login_btn.click()
        time.sleep(random.uniform(1, 2))
    except Exception as e:
        print("未找到账号密码登录按钮:", e)

    try:
        # 3.写入用户名密码
        user_input = driver.find_element(By.NAME, "username")
        user_input.clear()
        user_input.send_keys(username)  # 替换成你的USERNAME

        pwd_input = driver.find_element(By.NAME, "password")
        pwd_input.clear()
        pwd_input.send_keys(password)  # 替换成你的PASSWORD

        time.sleep(20)


    except Exception as e:
        print("登录输入失败:", e)






# 找页面中的帖子
def find_news_cards(driver):
    # 此时不会对帖子做任何处理，例如解析。就是尽量多找的帖子卡片WebElement列表。
    """在主文档查找帖子卡片：<div class="timeline_item">...</div>"""

    # 1) 确保在主文档
    try:
        driver.switch_to.default_content()
    except Exception:
        pass

    # 2) 等待列表渲染（最多8秒）
    try:
        W(driver, 8).until(
            EC.presence_of_all_elements_located(
                (By.CSS_SELECTOR, "div.timeline_item")
            )
        )
    except TimeoutException:
        pass  # 继续走兜底

    # 3) 直接抓卡片
    cards = driver.find_elements(By.CSS_SELECTOR, "div.timeline_item")
    if cards:
        print(f"DEBUG: div.timeline_item 命中 {len(cards)}")
        return cards

    # 4) 兜底：找容器再找卡片
    containers = driver.find_elements(
        By.CSS_SELECTOR,
        ".home-timeline, .dvhome-timeline, .timeline, .dvstatus-list"
    )
    for box in containers:
        sub = box.find_elements(By.CSS_SELECTOR, "div.timeline_item")
        if sub:
            print(f"DEBUG: 在容器内命中 {len(sub)}")
            return sub

    print("DEBUG: 未找到帖子元素，请检查页面结构。")
    return []




# 点击“资讯”模块
def open_news_tab(driver):
    """点击首页上的“资讯”标签，进入资讯页面。"""
    try:
        # 尝试点击<a>标签形式的“资讯”
        WebDriverWait(driver, WAIT_SEC).until(
            EC.element_to_be_clickable((By.XPATH, "//a[normalize-space()='资讯']"))
        ).click()
    except Exception:
        # 若页面结构不同（如是<span>标签），使用备用路径
        WebDriverWait(driver, WAIT_SEC).until(
            EC.element_to_be_clickable((By.XPATH, "//*[self::a or self::span][normalize-space()='资讯']"))
        ).click()
    print("已进入资讯模块。")




# 等待资讯列表加载
def wait_for_news_list(driver):
    """等待资讯页面中的帖子列表加载完成。防止页面还没加载完就开始找元素"""
    WebDriverWait(driver, WAIT_SEC).until(
        EC.presence_of_all_elements_located((By.XPATH, "//div[contains(@class,'list') or contains(@class,'feed')]"))
    )
    print("资讯列表加载完成。")



# 创建 CSV 文件并返回写入器
@contextmanager
def open_csv_writer():
    os.makedirs(os.path.dirname(OUT_PATH), exist_ok=True)
    f = open(OUT_PATH, "w", newline="", encoding="utf-8-sig")
    writer = csv.writer(f)
    writer.writerow(["post_id", "title", "content", "user_id", "time", "comment_count", "like_count", "url"])
    try:
        yield writer, f
    finally:
        f.flush()
        os.fsync(f.fileno())
        f.close()



# 点击“加载更多”按钮
def click_load_more(driver) -> bool:
    """
    如果页面中有“加载更多”按钮，则点击它。
    返回值：True = 成功点击，False = 未找到。
    """
    try:
        btn = WebDriverWait(driver, 2).until(
            EC.element_to_be_clickable((By.XPATH, "//*[self::button or self::a][contains(.,'加载更多')]"))
        )
        driver.execute_script("arguments[0].click();", btn)
        time.sleep(1)
        return True
    except Exception:
        return False




# 循环爬取资讯数据
def crawl_news_loop(driver, writer, f):
    """
    不做时间过滤：
    - 抓取当前页面已加载的帖子
    - 提取字段
    - 逐条写入CSV并flush
    - 点击“加载更多”或滚动加载新内容
    """
    seen = set()            # 仅按 post_id 去重
    stable_rounds = 0
    last_height = driver.execute_script("return document.body.scrollHeight")

    for _ in range(MAX_SCROLL):
        cards = find_news_cards(driver)
        print("DEBUG: 当前抓到帖子数 =", len(cards))
        if len(cards) == 0:
            # 打印一些上下文帮助定位
            try:
                print("DEBUG URL:", driver.current_url)
                # 保存页面源码到本地，便于你用编辑器 Ctrl+F 看真实结构
                html_path = os.path.join(os.path.dirname(OUT_PATH), "debug_page.html")
                with open(html_path, "w", encoding="utf-8") as pf:
                    pf.write(driver.page_source)
                print("DEBUG: 已保存页面到", html_path)
            except Exception as e:
                print("DEBUG: 保存页面失败", e)

        for c in cards:
            # 标题与链接
            title, href = extract_title_and_link(c)
            # 正文
            content = extract_content(c)
            # 用户ID
            user_id = extract_user_id(c)
            # 时间：仅做采集，不做过滤；解析失败则留空
            dt = extract_datetime(c)
            time_str = dt.strftime("%Y-%m-%d %H:%M:%S") if dt else ""

            # 帖子ID，用于去重；拿不到则用链接或标题做兜底
            post_id = extract_post_id(href)
            if not post_id:
                post_id = href or (title + "_" + user_id)

            # 去重
            if post_id in seen:
                continue
            seen.add(post_id)

            # 写入CSV
            writer.writerow([
                post_id, title, content, user_id,
                time_str,  # 仅记录，不参与过滤
                *extract_comment_like(c),
                href
            ])
            f.flush()  # 立刻落盘

        # 2) 加载更多或滚动
        if not click_load_more(driver):
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")

        # 3) 反爬限速
        time.sleep(random.uniform(0.8, 1.8))
        if _ % 30 == 0 and _ > 0:
            print("达到 30 次操作，冷却 20 秒以防封禁...")
            time.sleep(20)

        # 4) 无新内容则结束
        new_height = driver.execute_script("return document.body.scrollHeight")
        if new_height <= last_height:
            stable_rounds += 1
            if stable_rounds >= 2:
                print("页面无新内容，结束爬取。")
                break
        else:
            stable_rounds = 0
        last_height = new_height


# 提取帖子标题和链接
def extract_title_and_link(card):
    """。"""
    title, href = "", ""
    for xp in [
        ".//a[contains(@href,'/news') or contains(@href,'/article')]",
        ".//a[contains(@href,'/status')]"
    ]:
        try:
            a = card._element(By.XPATH, xp)
            title = a.text.strip()
            href = a.get_attribute("href") or ""
            if title or href:
                break
        except Exception:
            pass
    return title, href



# 提取帖子正文文本
def extract_content(card):
    try:
        return card.find_element(
            By.XPATH, ".//p[string-length(normalize-space())>0] | .//div[string-length(normalize-space())>0]"
        ).text.strip()
    except Exception:
        return ""



# 提取发帖人ID
def extract_user_id(card):
    """（通过 /u/xxxx 链接）。"""
    try:
        au = card.find_element(By.XPATH, ".//a[contains(@href,'/u/')]")
        m = re.search(r"/u/(\d+)", au.get_attribute("href") or "")
        return m.group(1) if m else ""
    except Exception:
        return ""



# 提取帖子发布时间
def extract_datetime(card):
    """，支持多种时间格式。"""
    for xp in [".//time",
               ".//*[contains(@class,'time') or contains(text(),'分钟前') or contains(text(),'昨天') or contains(text(),'-')]"]:
        try:
            el = card.find_element(By.XPATH, xp)
            dt = parse_time(el.get_attribute("datetime") or el.text)
            if dt:
                return dt
        except Exception:
            pass
    return None



# 提取评论数和点赞数。
def extract_comment_like(card):
    comment, like = 0, 0
    try:
        comment = get_number_safe(
            card.find_element(By.XPATH, ".//*[contains(.,'评论') or contains(@class,'comment')]")
        )
    except Exception:
        pass
    try:
        like = get_number_safe(
            card.find_element(By.XPATH, ".//*[contains(.,'赞') or contains(@class,'like')]")
        )
    except Exception:
        pass
    return comment, like



# 从链接中提取帖子ID
def extract_post_id(href):
    """。"""
    if not href:
        return ""
    m = re.search(r"/(\d+)", href)
    return m.group(1) if m else href



# 将不同格式的时间字符串（如“3小时前”“昨天 12:30”“2024-10-11”）统一转成 datetime 对象
def parse_time(text: str) -> datetime | None:
    if not text:
        return None
    s = text.strip()

    # 统一分隔点、全角冒号、不可见空白
    s = s.replace("：", ":")
    s = re.sub(r"[·•・∙⋅]", " ", s)        # 各种小点 -> 空格
    s = s.replace("\u00A0", " ")           # 不换行空格
    s = s.replace("\u2009", " ").replace("\u200A", " ").replace("\u200B", "")
    s = re.sub(r"来自[^\s]*", "", s)       # 去“来自新闻/来自…”
    s = re.sub(r"\s+", " ", s)             # 压缩空白

    now = datetime.now()
    # 下面保持你原来的匹配，建议把 re.match 改成 re.search 以允许前后缀：
    if re.search(r"^\s*刚刚", s):
        return now
    m = re.search(r"(\d+)\s*分钟前", s)
    if m:
        return now - timedelta(minutes=int(m.group(1)))
    m = re.search(r"(\d+)\s*小时前", s)
    if m:
        return now - timedelta(hours=int(m.group(1)))
    m = re.search(r"昨天\s*(\d{1,2}):(\d{2})", s)
    if m:
        h, mi = map(int, m.groups())
        d = now.date() - timedelta(days=1)
        return datetime(d.year, d.month, d.day, h, mi)
    if re.search(r"\b昨天\b", s):
        d = now.date() - timedelta(days=1)
        return datetime(d.year, d.month, d.day, 0, 0)
    m = re.search(r"(\d{4})-(\d{1,2})-(\d{1,2})(?:[ T](\d{1,2}):(\d{2}))?", s)
    if m:
        y, mo, d, h, mi = m.groups()
        return datetime(int(y), int(mo), int(d), int(h or 0), int(mi or 0))
    m = re.search(r"(\d{1,2})-(\d{1,2})\s+(\d{1,2}):(\d{2})", s)
    if m:
        mo, d, h, mi = map(int, m.groups())
        y = datetime.now().year
        dt = datetime(y, mo, d, h, mi)
        return dt if dt <= now else datetime(y - 1, mo, d, h, mi)
    try:
        return datetime.fromisoformat(s.replace("T", " ").split("+")[0])
    except Exception:
        return None



# 从元素文本或属性中提取数字（用于评论数、点赞数）
def get_number_safe(el) -> int:
    """从元素文本或 aria-label 提取数字"""
    if el is None:
        return 0
    txt = (el.text or "").strip()
    m = re.search(r"\d+", txt)
    if m:
        return int(m.group())
    aria = el.get_attribute("aria-label") or ""
    m = re.search(r"\d+", aria)
    return int(m.group()) if m else 0



# 主函数
def main():
    """
    步骤：
    1. 点击“资讯”标签。
    2. 等待页面加载完成。
    3. 循环提取帖子数据（标题、正文、发帖人ID、时间、评论数、点赞数、链接）。
    4. 支持“加载更多”按钮或自动滚动。
    5. 结果保存为 CSV 文件。
    """

    driver = init_driver()

    # 登录
    login_xueqiu(USERNAME, PASSWORD, driver)
    open_news_tab(driver)
    wait_for_news_list(driver)
    with open_csv_writer() as (writer, f):
        crawl_news_loop(driver, writer, f)
    print(f"数据已保存到: {OUT_PATH}")


if __name__ == "__main__":
    main()



