"""获取NASA的LROC数据集中的Lunar Orbiter Laser Altimeter (LOLA)数据集的数据范围"""

import requests
from bs4 import BeautifulSoup


def get_links(url):
    # 发送 GET 请求
    response = requests.get(url)
    response.raise_for_status()  # 检查请求是否成功

    # 解析 HTML 内容
    soup = BeautifulSoup(response.text, "html.parser")

    # 查找所有的 <a> 标签
    links = soup.find_all("a")

    # 提取子目录和文件链接
    subdirectories = [
        link.get("href") for link in links if link.get("href").endswith("/")
    ]
    files = [link.get("href") for link in links if not link.get("href").endswith("/")]

    return subdirectories, files


def get_content(url):
    response = requests.get(url)
    response.raise_for_status()
    content = (
        response.text.split("DTM extents:")[1]
        .split("KEY FEATURES")[0]
        .strip()
        .split("\n")
    )
    out = []
    for part in content[:4]:
        out.append(part.split(":")[1].strip())
    return out


if __name__ == "__main__":
    base_url = "https://pds.lroc.asu.edu/data/LRO-L-LROC-5-RDR-V1.0/LROLRC_2001/DATA/SDP/NAC_DTM/"
    subdirectories, files = get_links(base_url)
    with open("range.csv", "w") as f:
        print("Name,Min. Lat.,Min. Lon.,Max. Lat.,Max. Lon.", file=f, flush=True)
        for sub in subdirectories:
            if sub.startswith("/"):
                continue
            _, files = get_links(base_url + sub)
            for file in files:
                if file.endswith("README.TXT"):
                    print(
                        sub.replace("/", "")
                        + ","
                        + ",".join(get_content(base_url + sub + file)),
                        file=f,
                        flush=True,
                    )
