import os
import re
import hashlib
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
from urllib.parse import urlparse, urlunparse, parse_qs, urlencode

# 设置无头模式
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")

# 初始化 Selenium 浏览器
driver = webdriver.Chrome(options=chrome_options)

# 目标起始链接
start_url = "https://sid.erda.dk/cgi-sid/ls.py?share_id=GxrFNGtykS"

# 定义允许的域名列表
allowed_domains = [
    "https://sid.erda.dk/cgi-sid/ls.py?share_id=GxrFNGtykS&current",
]

# 定义文件存储路径
download_path = os.path.join(os.path.expanduser("~"), "Downloads")
file_links_path = os.path.join(download_path, "file_links.txt")

# 定义记录访问过的页面散列值
visited_hashes = set()

# URL 标准化函数
def normalize_url(url):
    parsed = urlparse(url)
    query = parse_qs(parsed.query)
    filtered_query = {k: v for k, v in query.items() if k != "page"}
    normalized_query = urlencode(filtered_query, doseq=True)
    return urlunparse(parsed._replace(query=normalized_query))

# 检查是否为允许的域名
def is_allowed_domain(href):
    return any(href.startswith(domain) for domain in allowed_domains)

# 获取页面散列值
def get_page_hash():
    page_content = driver.page_source
    return hashlib.md5(page_content.encode("utf-8")).hexdigest()

driver.refresh()
time.sleep(2)  # 等待刷新完成


# 递归抓取函数
def get_all_links(current_url, visited, depth=6):
    if depth == 0:
        print(f"递归深度达到限制，停止抓取: {current_url}")
        return []

    normalized_url = normalize_url(current_url)
    if normalized_url in visited:
        print(f"已访问过链接，跳过: {normalized_url}")
        return []

    print(f"正在访问: {current_url}，当前深度: {depth}")
    visited.add(normalized_url)

    driver.get(current_url)

    try:
        WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.TAG_NAME, "a")))
    except Exception as e:
        print(f"页面加载超时或失败: {e}")
        return []

    time.sleep(3)  # 显式等待

    # 检查页面散列值避免重复
    page_hash = get_page_hash()
    if page_hash in visited_hashes:
        print(f"发现重复页面，跳过: {current_url}")
        return []
    visited_hashes.add(page_hash)

    links = driver.find_elements(By.TAG_NAME, "a")
    all_links = []

    for link in links:
        href = link.get_attribute("href")
        if not href or not is_allowed_domain(href):
            continue

        if href == start_url or normalize_url(href) == normalize_url(start_url):
            print(f"忽略返回起始页面的链接: {href}")
            continue

        if re.search(r"\.(svg|png|bedgraph|gz)$", href):
            print(f"发现文件链接: {href}")
            all_links.append(href)
        elif "current_dir=" in href and "flags=f" in href:
            print(f"发现子文件夹链接: {href}")
            all_links.extend(get_all_links(href, visited, depth - 1))

    return all_links

# 主程序
def main():
    print(f"下载路径: {download_path}")
    visited = set()

    all_file_links = get_all_links(start_url, visited, depth=6)

    with open(file_links_path, "w") as f:
        for link in all_file_links:
            f.write(link + "\n")
    print(f"所有文件链接已保存到: {file_links_path}")

    local_download_dir = os.path.join(download_path, "downloaded_files")
    os.makedirs(local_download_dir, exist_ok=True)

    wget_command = (
        f'wget -i "{file_links_path}" '
        f'--cut-dirs=1 --no-parent --recursive --no-clobber '
        f'--accept=svg,png,bedgraph,gz '
        f'-P "{local_download_dir}"'
    )
    print(f"执行 wget 命令: {wget_command}")
    os.system(wget_command)

    print("下载完成，所有文件和目录已复现到本地。")

if __name__ == "__main__":
    try:
        main()
    finally:
        driver.quit()
