# test01_requests.py  获取新浪首页所有图片
# 1.将新浪首页源代码下载到本地
#   1.1 文件不存在则下载  os.path.exists(....)
# 2.使用正则匹配出所有图片的url地址(list)  11:21
#   fname: 源文件地址  re_str: 目标匹配数据的正则字符串
#   def get_url(fname, re_str):
#       2.1 按行读(readline/readlines)
#       2.2 通过search匹配图片的url地址
#       2.3 如果匹配成功，将url添加到列表中
#       2.4 将存储图片的列表返回
# 3.遍历list，下载每一张图片到本地(/opt/mypics)
import requests, os, re, time, threading
def download(url, fname):
    resp = requests.get(url)
    with open(fname, mode="wb") as fw:
        fw.write(resp.content)
def get_url(fname, re_str):
    pic_list = []  # 用于存储图片url地址
    re_obj = re.compile(re_str)  # re_obj: 正则对象
    with open(fname, mode="r") as fr:
        for line in fr.readlines():  # line:行数据
            data = re_obj.search(line)
            if data != None:  # 匹配到了数据
                pic_list.append(data.group())
    return pic_list
if __name__ == '__main__':
    url = "https://www.sina.com.cn/"
    fname = "/opt/mysina.html"
    pic_dir = "/opt/mypics"  # 存储图片的本地目录路径
    if os.path.exists(fname) == False:
        download(url, fname)  # 文件不存在则下载
    if os.path.exists(pic_dir) == False:
        os.mkdir(pic_dir)  # 目录不存在则创建
    re_str = "(http|https)://[\w\./-]+\.(jpg|png|jpeg|gif)"
    plist = get_url(fname, re_str)
    start = time.time()  # 开始
    for url in plist:
        path = pic_dir+"/"+os.path.basename(url)
        # download(url, path)  # 下载
        # 多线程
        t = threading.Thread(target=download,args=(url, path))
        t.start()
    end = time.time()  # 结束
    print("spend:", end-start)
    # spend: 4.750348091125488
    # spend: 0.290719985961914
