import os
import argparse
import requests
from bs4 import BeautifulSoup
from typing import List
from urllib.parse import urljoin, urlparse


class WebPageDownloader:
    """
    WebPageDownloader类用于下载指定网页中的文件。
    """

    def __init__(self, url: str, extensions: str, download_dir: str, user_agent: str = None):
        """
        初始化WebPageDownloader类。

        :param url: 要抓取的网页的网址。
        :param extensions: 要下载的文件扩展名，多个扩展名用分号隔开。
        :param download_dir: 下载文件的目标目录。
        :param user_agent: 自定义的user-agent字符串。
        """
        self.url = url
        self.extensions = extensions.split(';')
        # self.download_dir = download_dir

        # 使用 os.path.join 来确保跨平台的兼容性
        self.download_dir = os.path.join(download_dir).replace('\\', '/')

        self.user_agent = user_agent or 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 NetType/WIFI MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x63090c0f) XWEB/11275 Flue'
        self.session = requests.Session()
        self.session.headers.update({'User-Agent': self.user_agent})

    def create_directories(self) -> None:
        """
        创建以扩展名命名的下载目录。
        """
        for ext in self.extensions:
            os.makedirs(os.path.join(self.download_dir, ext), exist_ok=True)

    def download_file(self, file_url: str, extension: str) -> None:
        """
        下载单个文件，并根据URL路径创建目录。

        :param file_url: 文件的URL。
        :param extension: 文件扩展名。
        """
        response = self.session.get(file_url, stream=True)
        if response.status_code == 200:
            # 解析 URL 并获取路径部分
            parsed_url = urlparse(file_url)
            path = parsed_url.path

            # # 移除文件名前的路径部分
            # directory_path = os.path.dirname(path)

            # 获取最后两层目录
            parts = path.split('/')
            if len(parts) > 2:
                last_two_dirs = os.path.join(parts[-3], parts[-2])
            else:
                # 如果 parts 的长度小于等于 2，则使用整个 parts 列表
                last_two_dirs = os.path.join(*parts)

            # 创建完整的下载目录路径
            # full_download_dir = os.path.join(self.download_dir, extension, directory_path)
            full_download_dir = os.path.join(self.download_dir, extension, last_two_dirs)
            print(f'self.download_dir: {self.download_dir}')
            print(f'extension: {extension}')
            print(f'last_two_dirs: {last_two_dirs}')
            print(f'full_download_dir: {full_download_dir}')

            # 确保下载目录存在，如果不存在则创建
            os.makedirs(full_download_dir, exist_ok=True)

            # 获取文件名
            file_name = os.path.basename(path)

            # 创建完整的文件路径
            file_path = os.path.join(full_download_dir, file_name)

            # 下载文件
            with open(file_path, 'wb') as file:
                for chunk in response.iter_content(chunk_size=8192):
                    file.write(chunk)

            print(f"Downloaded {file_name} to {file_path}")

    def get_links(self, soup: BeautifulSoup) -> List[str]:
        """
        从网页内容中获取所有链接。

        :param soup: BeautifulSoup对象，包含网页内容。
        :return: 包含所有链接的列表。
        """
        return [link.get('href') for link in soup.find_all('a', href=True)]

    def fetch_and_download_files(self) -> None:
        """
        抓取网页内容并下载所有指定类型的文件。
        """
        response = self.session.get(self.url)
        # print(f'response content: {response.content}')
        # 修改 response 的编码属性
        # response.encoding = 'gb2312'
        # print(f'response text: {response.text}')
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            links = self.get_links(soup)
            print(f"Found {len(links)} links.")
            print(f'links: {links}')
            # for link in links:
            #     # 检查链接是否以指定的扩展名结尾
            #     for ext in self.extensions:
            #         if link.endswith(ext):
            #             file_url = urljoin(self.url, link)
            #             # 使用quote来确保URL中的中文字符被正确编码
            #             # file_url = urljoin(self.url, quote(link, safe=':/'))
            #             print(f"Downloading {file_url}...")
            #             self.download_file(file_url, ext)

            for link in links:
                # 检查链接是否以指定的扩展名结尾
                parsed_url = urlparse(link)
                path = parsed_url.path
                for ext in self.extensions:
                    if path.endswith(ext):
                        # 重新构建完整的URL，包括查询参数
                        file_url = urljoin(self.url, link)
                        print(f"Downloading {file_url}...")
                        self.download_file(file_url, ext)
                        break  # 找到匹配的扩展名后跳出循环

    @staticmethod
    def parse_arguments() -> argparse.Namespace:
        """
        解析命令行参数。

        :return: 解析后的参数。
        """
        parser = argparse.ArgumentParser(description="Download files from a webpage.")
        parser.add_argument('url', type=str, help='The URL of the webpage to download files from.')
        parser.add_argument('extensions', type=str, help='File extensions to download, separated by semicolons.')
        parser.add_argument('download_dir', type=str, help='Directory to download files to.')
        parser.add_argument('--user-agent', type=str, help='Custom user-agent string.')
        return parser.parse_args()


def main():
    args = WebPageDownloader.parse_arguments()
    downloader = WebPageDownloader(args.url, args.extensions, args.download_dir, args.user_agent)
    downloader.create_directories()
    downloader.fetch_and_download_files()


if __name__ == "__main__":
    main()
