from urllib.request import urlopen
from bs4 import BeautifulSoup
import os


def mkdir(path):
    folder = os.path.exists(path)
    if not folder:  # 判断是否存在文件夹如果不存在则创建为文件夹
        os.makedirs(path)  # makedirs 创建文件时如果路径不存在会创建这个路径
        print("---  new folder establish  ---")


def downlaod2local(url, file_path):
    """
    下载文件到本地文件夹中
    :param url: 网址
    :param file_path: 存储路径
    """
    u = urlopen(url, timeout=10)
    f = open(file_path, 'wb')
    f.write(u.read())
    f.close()
    print(file_path + '下载成功')


def crawl2local(url, kind, save_path='.\\UpData\\', number=None):
    """
    爬取一个网页中的所有相关链接
    :param url: 网址
    :param kind: 后缀
    :param save_path: 存储路径
    :param number: 爬取数量，默认为全部
    :return:
    """
    soup, i = BeautifulSoup(urlopen(url), 'html.parser'), 0
    mkdir(save_path)
    for l in soup.findAll('a'):
        if number is not None and i == number:
            break
        s = l.get("href")
        if kind in s:
            i = i + 1
            print(s)
            downlaod2local(link + s, save_path + s)


if __name__ == '__main__':
    link = 'http://121.36.97.194/openEuler-20.03-LTS/source/repodata/'
    crawl2local(link, 'xml')
