import requests
from lxml import etree
import aiohttp
import asyncio
import time
import math
import json
import os

baseUrl = "https://wallhalla.com";
def html_etree(url,param=None):
    webreq = requests.get(url,param)
    webtext = webreq.text
    html = etree.HTML(webtext)
    return html

async def getThumbnailDetailUrls(tagUrl,page,sema):
    await sema.acquire()
    print("正在获取第",page,"页数据...")
    async with aiohttp.ClientSession() as session:
        try:
            async with session.get(tagUrl,params={'page':page},timeout=5,verify_ssl=False) as response:
                text = await response.text()
                html = etree.HTML(text)
                # 所有的缩略图
                imageSuffixs = html.xpath("//div[@class='thumb-wrap']/a/@href")
                imageSuffixs = map(lambda x: baseUrl + x ,imageSuffixs)
        except Exception as error:
            imageSuffixs = []
            print("获取第",page,"页数据出错, error:",error,"...")
            pass
    sema.release()
    return list(imageSuffixs)

async def defGetImageOrignalUrls(url,sema,index):
    await sema.acquire()
    print("正在获取第",index,"图片....");
    async with aiohttp.ClientSession() as session:
        try:
            async with session.get(url,timeout=5,verify_ssl=False) as response:
                text = await response.text()
                imageHtml = etree.HTML(text)
                imageUrl = imageHtml.xpath("//div[@class='wall-source-wrap']/div/@data-wallurl")[0]
                imageUrl =  "https:" + imageUrl
        except Exception as error:
            print("获取第",index,"图片出错, error:",error,"...")
            imageUrl = ""
            pass

    sema.release()    
    return imageUrl

def tagDetail(tagurl,needCount = 0):
    html = html_etree(tagurl)
    limit = 32;
    totoalCount = html.xpath("//span[@class='subt-highlight']/text()")[0].__str__().replace(',','')
    totoalCount = float(totoalCount);
    
    if (needCount != 0):
        pageCount = math.ceil(float(needCount) / limit)
    else:
        pageCount = math.ceil(totoalCount / limit)

    if (needCount > totoalCount):
        pageCount = math.ceil(totoalCount / limit)    

    if (pageCount == 0):
        return[]

    print(tagurl,"总共",pageCount,"页");

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)

    sema = asyncio.Semaphore(20) # 信号量控制并发为10

    tasks = [getThumbnailDetailUrls(tagurl,page,sema) for page in range(1,pageCount+1) ]
    items = loop.run_until_complete(asyncio.gather(*tasks))
    print("flat数组中...")
    items = [y for x in items for y in x]
    print("flat数组完成...")

    imageCount = len(items) + 1

    if(needCount != 0 and needCount < totoalCount):
        imageCount = needCount + 1

    tasks = [defGetImageOrignalUrls(items[index],sema,index+1) for index in range(1,imageCount)]
    imageUrls = loop.run_until_complete(asyncio.gather(*tasks))
    print("成功获取",len(imageUrls),"张图片...")
    # 对需要ssl验证的网页，需要250ms左右等待底层连接关闭
    loop.run_until_complete(asyncio.sleep(0.25))
    loop.close()
    imageUrls = filter(lambda x: len(x) > 0 , imageUrls)
    return list(imageUrls)



def main():
    # html = html_etree(baseUrl) #获取所有的tag
    # taglist = html.xpath("//div[@class='home-tags']/a/@href")
    # for tagSuffix in taglist:
    #     tagImageUrls = tagDetail(baseUrl + tagSuffix)
    imagesUrl = {}
    
    tags = ["food"];
    count = 250;
    
    for tag in tags:
        tagImageUrls = tagDetail(baseUrl + "/tags/" + tag, count)
        imagesUrl[tag] = tagImageUrls

    filename = ""
    for tag in tags:
        filename = filename + "_" + tag
    
    filepath = os.getcwd() + '/output/' + filename + '.json'
    write = open(filepath,'w+')
    write.write(json.dumps(imagesUrl))
    write.close()
    print("本地路径为: ",filepath)
    

    
    
if __name__ == '__main__':
    main()
    
