
import requests
from openpyxl import Workbook
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import re
from tqdm import tqdm

def getFilename(url,visited=None):

    if visited is None:
        visited = set()

    # 避免重复访问
    if url in visited:
        return []
    visited.add(url)
    links = []
    try:
        #发送请求
        response = requests.get(url)
        if response.status_code != 200:
            print("无法访问")
            return []
        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        # 提取所有链接
        for a in soup.find_all('a', href=True):
            href = a['href']
            # 转换为绝对URL
            absolute_url = urljoin(url, href)
            links.append(absolute_url)

        print("所有父级文件夹已提取完成！")
        

    except Exception as e:
        print(f"爬取 {url} 时出错: {e}")
        return []
        

    return links



def getShadowUrl2xls(tif_file_list):

    wb = Workbook()
    ws = wb.active

    #表头
    ws.append(["影像编号","URL"])

    for url in tqdm(tif_file_list, desc="处理URL进度：", unit=""):
        #print(url)
        #元组（影像编号，url）
        tuple_array = shadowUrlList(url)

        for tuple in tqdm(tuple_array, desc=f"解析 {url}", unit="条", leave=False):
            ws.append(tuple)

    wb.save("shadow_id_URL_tif.xlsx")
    
    print("URL已爬取完成")



def shadowUrlList(url,visited=None):

    if visited is None:
        visited = set()

    # 避免重复访问
    if url in visited:
        return []
    visited.add(url)
    shadow_tuple_links = []
    try:
        response = requests.get(url)
        if response.status_code != 200:
            print("链接访问失败："+url)
            return []
        soup = BeautifulSoup(response.text,"html.parser")

        a_list = soup.find_all('a', href=True)
        for a in a_list:
            href = a['href']
        # 转换为绝对URL
            if bool(re.match(r'^M',href)):
                
                absolute_url = urljoin(url, href)
                shadow_id = href.split('/')[0]
                tif_name = shadow_id + "_map_raw.tif"
                end_url = absolute_url+"/"+ tif_name
                #（ 影像编号，url）
                shadow_tuple_links.append((shadow_id,end_url))

    except Exception as e:
        print(f"爬取 {url} 时出错: {e}")
        return []
    
    return shadow_tuple_links




if __name__ == "__main__":

    base_url = "https://pds.shadowcam.im-ldi.com/observation/2024/"
    files = getFilename(base_url)

    #print(files)
    
    tif_file_list = files[6:]

    getShadowUrl2xls(tif_file_list)

