import aiohttp
import asyncio
from bs4 import BeautifulSoup
from lxml import etree
import requests
import time
import random
from concurrent.futures import ThreadPoolExecutor

#优美图库爬取
#2021/11/14 17:48

#1.用线程池跑主页，多用几个线程指定不同的任务
#2.一个子页就用一个线程来跑
#3.选项卡用协程跑

#网址
index = "https://www.umei.cc"

agent ={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}

proxies = {
    "https":"113.238.142.208:3128"
}
#"https://www.umei.cc/meinvtupian/meinvxiezhen/"
#下载
async def aioDownload(url):
    #C:\\Users\\指鹿\\PycharmProjects\\reptile\\Thread\\resource\\
    #C:\\Users\\cdww977\\Desktop\\reptile\\Thread\\resource\\
    path = "H:\\belle\\"
    name =url.rsplit("/",1)[1]  #从右边开始切，切一次得到内容
    path= path + name
    async with aiohttp.ClientSession() as s:
        async with s.get(url) as resp:
            with open(path,mode="wb") as f:
                f.write(await resp.content.read())  #读取内容是异步的，需要挂起
    s = random.randint(1500,2000) /1000  #随机函数
    await asyncio.sleep(s)

async def asyncObject(urls):
    tasks = []
    for url in urls:
        tasks.append(getContent(url))
    await asyncio.wait(tasks)

#第一次获取
#返回当前页的链接
async def getContent(url):
    async with aiohttp.ClientSession() as s:
          async with s.get(url) as resp:
            html = await resp.text()
            linkList = paserHtml(html)
            #子选项卡

            for i in range(len(linkList)-1):
                await sonTabs(linkList[i])

#得到图片地址
def get_Picture(link):
    response = requests.get(link)
    html = response.content.decode("utf-8")
    soup = BeautifulSoup(html, "html.parser")
    ImageBody = soup.find("div",class_="ImageBody")
    img = ImageBody.find("img").get('src')
    return img

#获取图片数量
def get_Picture_number(url):
    response = requests.get(url)
    html = response.content.decode("utf-8")
    tree=etree.HTML(html)
    divList=tree.xpath('/html/body/div[2]/div[12]/a/@href')
    length=len(divList)-1
    link=divList[length].rsplit('_',1)[1]
    pageNumber =link.rsplit('.',1)[0]
    return pageNumber

#拼接
def joint(p1,p2):
    p2=p2+1
    if p2>0:
        p1=p1.rsplit(".",1)[0]
        join="_"+repr(p2)+".htm"
        join=p1+join
        return join
        #print(join)
    else:
        print("错误")

#子选项卡任务---------------------------------
async def sonTabs(url):
    pnumber = int(get_Picture_number(url))
    picture_Link=[]

    #整理图片地址
    for i in range(pnumber-1):
        if i ==0:
            picture_Link.append(get_Picture(url))
        else:
            join = joint(url, i)
            picture_Link.append(get_Picture(join))

    #循环图片数组调用下载
    count=0
    for i in picture_Link:
        await aioDownload(i)
        time.sleep(5)
        count+=1
        print("第"+repr(count)+"张图片已经下载完成")


#返回每页的链接信息
def paserHtml(html):
    #html = getResponce(html)
    soup=BeautifulSoup(html,"html.parser")
    TypeList=soup.find("div",class_="TypeList")
    lis=TypeList.findAll("li")
    #遍历所有地址
    tableList=[]
    for i in lis:
        #print(i.a.get('href'))
        tableList.append(index +i.a.get('href'))
    return tableList

#获取一共有多少页
def backPageLink(html):
    tree=etree.HTML(html)
    divList=tree.xpath('/html/body/div[2]/div[11]/a/@href')
    length=len(divList)-1
    link=divList[length].rsplit('_',1)[1]
    pageNumber =link.rsplit('.',1)[0]
    return pageNumber

#协程开始
def coroutines(urls):
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncObject(urls))

#地址主页拼接
def threadAction(number,url):
    linkList=[]
    for i in range(number):
        if i ==0:
            linkList.append(url)
        elif i ==1:
            continue
        else:
            linkList.append(url + "index_"+repr(i)+".htm")
    return linkList

#线程池请求方法
def getResponce(html):
    responce=requests.get(url,proxies=proxies,headers=agent)
    return responce.content.decode("utf-8")

#主页方法
def indexMain(url):
    html = getResponce(url)
    page_number = int(backPageLink(html))+1
    #52个页的地址
    link_List=threadAction(page_number,url)
    #跑十一个线程，其中是个线程跑十个任务，最后一个跑两个任务
    taskNumber =len(link_List) #共计530个任务
    #530个页里的表格地址
    #多线程被屏蔽，使用单线程
    with ThreadPoolExecutor(max_workers=10) as t:  # 创建一个最大容纳数量为5的线程池
        for i in range(10):
            urls = []
            x = i*53
            y = 52+i*53
            for j in range (x,y):
                urls.append(link_List[j])
            t.submit(coroutines(urls))
            time.sleep(2)

            # if i == 10:
            #     urls = []
            #     for j in range(i * 5, taskNumber):
            #         urls.append(link_List[j])
            #     t.submit(coroutines(urls))
            #     time.sleep(2)

    # urls = []
    # for j in range(2):
    #     urls.append(link_List[j])
    # coroutines(urls)

#coroutines 协程
if __name__ == '__main__':
    #https://www.umei.cc/touxiangtupian/qinglvtouxiang/
    url = "https://www.umei.cc/meinvtupian/meinvxiezhen/"
    indexMain(url)



