import requests
from urllib.parse import unquote
import os
import argparse
import re
#使用requests打开一个下载网页并下载上面所有可下载的文件
# #使用多次迭代方式
class MySession(object):
    def __init__(self,url,path):
        self.url = url
        self.path = path
        self.session = requests.Session()
        self.headers = {}
        self.cookies = {}
        self.sessid = {}
        self.tokens = {}
        self.magic_tokens = ''
    def get_urls(self,url = None):
        if url is None:
            url = self.url
        self.headers = {
            'Accept-Encoding': 'gzip, deflate, br, zstd',
            'sec-ch-ua-mobile': '0',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-User': '?1',
            'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
            'Sec-Fetch-Site': 'none',
            'sec-ch-ua-platform': '"Windows"',
            }

        first_get = self.session.get(url, verify = False, headers = self.headers)
        #print(first_get.text)
        #判断是否为文件或者是二级目录，如果是二级目录则递归查询并下载，文件则直接下载并保存
        if "text/html" in first_get.headers["Content-Type"]:
            list1 = first_get.text
            #解决跳转非index.html文件的问题
            if "html" in url:
                print(url)
                tmp = [unquote(k) for k in url.split('/')]
                #print(tmp)
                url = url.replace(tmp[-1],'')
                #print(url)
            #获取所有链接并递归获取
            for i in list1.split('\n'):
                if "<a href=" in i :
                    tmpurl = url + i.split('"')[1]
                    print(tmpurl)
                    self.get_urls(tmpurl) #递归迭代
        else:
            #新建目录
            pathname = [unquote(k) for k in url.split('/')]
            #print(pathname)
            pathname_tmp = self.path
            #print(pathname)
            for i in pathname[3:-1]:
                pathname_tmp += i + '/'
            #print(pathname_tmp)
            create_directories(pathname_tmp)
            #写入文件
            filename = pathname_tmp + unquote( url.split('/')[-1] )
            print(filename)
            write_file(first_get, filename)
    def session_close(self):
        self.session.close()
def write_file(request, filename):
    with open(filename, 'wb') as f:
        for chunk in request.iter_content(chunk_size=8192):
            #print(chunk)
            f.write(chunk)


def create_directories(directories):
    # 定义要创建的目录列表
    # 创建目录
    os.makedirs(directories, exist_ok=True)
    print(f"Directory '{directories}' created.")

def check_url(url):
    # 判断url是否符合格式要求，并修改url
    # 匹配文件名加后缀的正则表达式
    pattern = r'^(?P<filename>[^\\/:*?"<>|\r\n]+)\.(?P<extension>[^\\/:*?"<>|\r\n]+)$'
    url_list = url.split('/')
    print(url_list)
    if 'http' not in url_list[0]:    #若直接输入ip或域名地址，则增加域名前缀
        url = 'http://' + url
        url_list = url.split('/')
        print(url_list)
    
    pos = url_list.index(url_list[-1])
    print(pos)
    if pos == 2:                    #说明最后一项不是没有加/的ip地址或者一级域名
        url += '/'
    elif pos != 2 and url_list[-1] != '':          #说明最后一项不是一级域名且不是'/' ，需要进一步判断是不是html,htm,js等网页文件
        match = re.match(pattern, url_list[-1])
        if not match:        #如果不匹配，则增加'/'为方便下一步递归增加多级目录
            print('not /')
            url += '/'
    return url
def main():
    
    url = "https://www.python.org/static/img/python-logo.png"
    path = './'

    # 创建 ArgumentParser 对象
    parser = argparse.ArgumentParser(
        description='便捷的python http.server下载工具，可以下载web页面所有文件以及多级子目录文件，并保留文件夹结构',
        epilog='使用示例: python py-wget.py -u http://127.0.0.1:8080 -d /tmp/downloads'
    )

    # 添加命令行参数
    #parser.add_argument('-n', '--number', type=int, help='指定一个整数参数')
    parser.add_argument('-u', '--url', help='指定python http.server的url地址')
    parser.add_argument('-d', '--dest', help='指定下载目录，默认使用同一文件夹')
    #parser.add_argument('-v', '--verbose', action='store_true', help='启用详细输出模式')
    #parser.add_argument('-l', '--list', nargs='+', help='指定一个列表参数')

    # 解析命令行参数
    args = parser.parse_args()

    # 根据参数执行相应操作
    if not any(vars(args).values()):
        parser.print_help()
        return

   # if args.number is not None:
        #print(f"接收到的数字参数: {args.number}")
    if args.dest is not None:
        print(f"文件存储地址: {args.dest}")
        path = args.dest
    else:
        path = "./"

    if args.url is not None:
        print(f"目标url地址: {args.url}")
        url = check_url(args.url)

    try:
        get = MySession(url,path)
        get.get_urls()
    except requests.exceptions.ConnectionError as e:
        print("无法连接到目标服务器，请确认：")
        print("- 目标服务是否已启动")
        print("- 端口是否被防火墙阻挡")
        print("- 是否有其他程序占用了端口")
        print(f"错误详情：{e}")
    finally:
        get.session_close()

    #if args.verbose:
        #print("详细模式已启用")

    #if args.list is not None:
        #print(f"接收到的列表参数: {args.list}")

if __name__ == "__main__":
    main()

