import re
from datetime import datetime
from typing import List

import requests
from bs4 import BeautifulSoup


class FileInfo:
    file_name: str
    file_url: str
    file_size: str
    file_update_time: datetime

    def __init__(self, file_name: str, file_url: str, file_size: str, file_update_time: datetime):
        self.file_name = file_name
        self.file_url = file_url
        self.file_size = file_size
        self.file_update_time = file_update_time


def get_file_infos(mirror_url, file_extension_includes=None) -> List[FileInfo]:
    if file_extension_includes is None:
        file_extension_includes = []
    file_info_list = []
    # 设置请求头，模拟浏览器访问
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/116.0.0.0 Safari/537.36"
    }
    # 发送HTTP请求
    response = requests.get(mirror_url, headers=headers, verify=False)
    # 检查请求是否成功
    content = response.text
    if response.status_code != 200:
        print(response.status_code)
        return file_info_list
    soup = BeautifulSoup(content, 'html.parser')
    links = soup.find_all('a')[1:]
    if not links:
        return file_info_list
    lines = links[0].parent.text.strip().split('\r\n')  # 跳过第一个 '../' 链接
    for link in links:
        file_name = link.text
        if file_extension_includes:
            ew = [x for x in file_extension_includes if file_name.endswith(x)]
            if not ew:
                continue
        file_url = mirror_url + link['href']
        line = None
        for l in lines:
            split = l.split()
            if file_name == split[0]:
                line = l
                break
        infos = re.split(r'\s{2,}', line)
        from datetime import datetime
        format_str = "%d-%b-%Y %H:%M"
        parsed_time = infos[1]
        try:
            parsed_time = datetime.strptime(parsed_time, format_str)
            info = FileInfo(file_name, file_url, infos[2], parsed_time)
            file_info_list.append(info)
        except ValueError as e:
            print("解析失败，错误信息:", e)
    return file_info_list
