'''
reference: https://blog.csdn.net/qq_43482310/article/details/105773036
'''
import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import quote
import string


def getHTMLText(url): # 获取网页内容
    kv = {'user-agent': 'Mozilla/5.0'}
    try:
        r = requests.get(url, timeout=30, headers=kv)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ''


def downLoad(uri, filename):
    root = './tmp'
    os.makedirs(root, exist_ok=True) # 指定需要存下载文件的目录
    path = os.path.join(root, filename) # 根目录与文件名进行拼接："./tmp/DIM.pdf"
    url_ = os.path.join(uri, filename)
    r = requests.get(quote(url_, safe=string.printable))
    if not os.path.exists(path): # 如果这个文件不存在，意味着要去网站下载，如果存在则不需要下载
        try:
            with open(path, 'wb') as f: # 下载代码
                f.write(r.content)
                f.close()
                print("saving file")
        except Exception as e: # 如果下载失败，则输出报错信息
            print(e)
            print(f"failed to download file from {path}")
    else:
        print('file already exists')


def main():
    uri = "http://172.27.172.181:8000" # uri是网页地址
    plain_text = getHTMLText(uri) # 从网页中获取网页内容
    soup = BeautifulSoup(plain_text, features="html.parser") # 解析网页内容
    a_tags = soup.find_all('a') # 找到网页中所有超链接，默认需要下载的文件都是以超链接的形式存在
    for tag in a_tags:
        print(f"downloading {tag.string}") # for循环遍历所有超链接
        downLoad(uri, tag.string) # 下载给定的超链接文件


main()