from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup


def fetch_dlt_results_with_selenium(url: str, count: int = 30, timeout: int = 15):
    # 1. 配置 Chrome 为无界面模式
    chrome_opts = Options()
    chrome_opts.add_argument("--headless")
    chrome_opts.add_argument("--disable-gpu")
    chrome_opts.add_argument("--no-sandbox")
    # 如果你的环境需要指定 ChromeDriver 路径，可在 executable_path 中填写
    driver = webdriver.Chrome(options=chrome_opts)

    try:
        driver.get(url)
        # 2. 等待 Angular 渲染完成：至少有一行数据加载到表格里
        WebDriverWait(driver, timeout).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "table.kj_tablelist02 tbody tr"))
        )

        # 3. 拿到渲染后的完整 HTML
        html = driver.page_source
        soup = BeautifulSoup(html, "html.parser")

        # 4. 定位表格行
        rows = soup.select("table.kj_tablelist02 tbody tr")[:count]
        if not rows:
            raise RuntimeError("渲染后未找到任何开奖数据，请检查表格选择器是否正确。")

        results = []
        for tr in rows:
            cols = [td.get_text(strip=True) for td in tr.find_all("td")]
            # 根据实际页面结构，前四列分别是：期号、开奖日期、前区（5 个号）、后区（2 个号）
            issue = cols[0]
            date = cols[1]
            # 前区号码通常在第 2 列的 5 个 <span> 里，也可能拆成多个 td，这里统一用 cols[2] 拆空格
            front = cols[2]
            back = cols[3]
            results.append({
                "期数": issue,
                "开奖日期": date,
                "前区": front,
                "后区": back,
            })

        return results

    finally:
        driver.quit()


def print_markdown_table(data: list):
    print("| 期数     | 开奖日期   | 前区号码         | 后区号码   |")
    print("| -------- | ---------- | ---------------- | ---------- |")
    for row in data:
        print(f"| {row['期数']} | {row['开奖日期']} | {row['前区']} | {row['后区']} |")


if __name__ == "__main__":
    URL = "https://www.zhcw.com/kjxx/dlt/"
    try:
        dlt_data = fetch_dlt_results_with_selenium(URL, count=30)
        print_markdown_table(dlt_data)
    except Exception as e:
        print(f"爬取或解析过程中出现错误：{e}")
