from multiprocessing import Process,Queue
from concurrent.futures import ThreadPoolExecutor
import requests
from lxml import etree
from urllib import parse

'''
分析：
    进程1. 从主页面中解析出详情页的url，从详情页中提取到图片的下载地址
    进程2. 把拿到的下载地址，进行下载
    队列：需用 multiprocessing 里的 Queue 可以进行进程之间的通信
'''

def get_img_src(q):
    base_url = 'https://www.umei.cc'
    url = 'https://www.umei.cc/touxiangtupian/nvshengtouxiang/index_3.htm'
    resp = requests.get(url)
    resp.encoding = 'utf-8'
    tree = etree.HTML(resp.text)  # etree.HTML 里面装的 resp.text 为页面源代码
    href_list = tree.xpath("//div[@class='item_t']/div/a/@href")
    for href in href_list:
        href = parse.urljoin(base_url,href)  # urljoin是负责拼接url的，不再需要人为判断结尾带 / 与否
        child_resp = requests.get(href)
        child_resp.encoding = 'utf-8'
        child_tree = etree.HTML(child_resp.text)
        src = child_tree.xpath("//div[@class='tsmaincont-main-cont']//img/@src")[0]
        q.put(src)  # 向队列里装东西
        print(f'{src},被塞进队列！')
    q.put('已完成')

def download(url):
    print('开始下载！',url)
    name = url.split('/')[-1]
    with open(r'D:\ui\reptile_next\data_img'+'\\'+name,'wb') as f:
        resp = requests.get(url)
        f.write(resp.content)
    print('下载完毕！',url)

def download_img(q):
    with ThreadPoolExecutor(10) as t:
        while True:
            src = q.get()  # 从队列里获取数据，如果没有数据就会阻塞
            if src == '已完成':
                break
            t.submit(download,src)

if __name__ == '__main__':
    q = Queue()
    p1 = Process(target=get_img_src,args=(q,))
    p2 = Process(target=download_img,args=(q,))
    p1.start()
    p2.start()

