import os
import uuid

from AMySpider import MySpider
from lxml import html

from ThreadPool import ThreadPool


def getDetailPageLinks(num):
    if num == '1':
        url = 'http://www.369ce.com/html/part/17.html'
    else:
        url = 'http://www.369ce.com/html/part/17_' + num + '.html'
    mySp=MySpider()
    (flag, content) = mySp.getContent(url)
    selector = html.fromstring(content)
    urls = []
    for i in selector.xpath("//a/@href"):
        if str(i).__contains__('/html/article/'):
            urls.append('http://www.369ce.com' + str(i))
    return urls


def getDetailPageTitle(content):
    # 现在进入到套图的详情页面了，现在要把套图的标题和图片总数提取出来
    selector = html.fromstring(content)
    image_title=''
    try:
        image_title = selector.xpath("//*[@id='ks_xp']/div/div[1]/text()")[0]
    except IndexError:
        image_title=uuid.uuid4()
    # 需要注意的是，xpath返回的结果都是序列，所以需要使用[0]进行定位
    return image_title

def getImageLinks(url, content):
    selector = html.fromstring(content)
    image_detail_websites = []
    aa = "//*[@id='ks_xp']/div/div[2]/div/div/img/@src"
    image_detail_websites = selector.xpath(aa)
    return image_detail_websites


def download_image(dir, image_title, image_detail_websites):
    # 将图片保存到本地。传入的两个参数是图片的标题，和下载地址序列
    path = '%s/%s' % (dir, image_title)
    path=path.replace("\"","-")
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)
    num = 1
    amount = len(image_detail_websites)
    for i in image_detail_websites:
        filename = '%s/%s.jpg' % (path, num)
        if os.path.exists(filename) and  os.path.getsize(filename)>1024:
            print("%s  第%s/%s张已存在" % (image_title, num, amount))
        else:
            print('正在下载图片：%s  第%s/%s张，' % (image_title, num, amount))
            with open(filename, 'wb') as f:
                # f.write(requests.get(i).content)
                mySp = MySpider()
                (flag, content) = mySp.getContent(i)
                # (flag, content) =  MySpider.getContent(i)
                if flag:
                    f.write(content)
                    f.close()
                    # time.sleep(2)
                else:
                    print("请求错误")
        num += 1

def getProcessDetailPage(pageLink ,p):

    mySp = MySpider()
    (flag, content) = mySp.getContent(pageLink)
    # (flag, content) = MySpider. getContent(pageLink)
    if flag:
        download_image("D:/pics/us", getDetailPageTitle(content), getImageLinks(pageLink, content))
    p.add_thread()

def main():
    pool=ThreadPool(8)
    for page_number in range(1200, 4939):
        for pageLink in getDetailPageLinks(str(page_number)):
            thread = pool.get_thread()
            t = thread(target=getProcessDetailPage, args=(pageLink,pool))
            t.start()

if __name__ == '__main__':
    main()
