import os

import requests
from fake_useragent import UserAgent
from lxml import html

from AMySpider import MySpider


def getHeader():
    ua = UserAgent()
    headers = {'User-Agent': ua.random}
    return headers

def getDetailPageLinks(num):
    # 构建函数，用来查找该页内所有图片集的详细地址。目前一页包含15组套图，所以应该返回包含15个链接的序列。
    url = 'http://www.mmjpg.com/home/' + num
    spider=MySpider()
    (flag, content) = spider.getContent(url)
    selector = html.fromstring(content)
    urls = []
    for i in selector.xpath("//ul/li/a/@href"):
        urls.append(i)
    return urls


def getDetailPageTitle(content):
    # 现在进入到套图的详情页面了，现在要把套图的标题和图片总数提取出来
    selector = html.fromstring(content)
    image_title = selector.xpath("//h2/text()")[0]
    # 需要注意的是，xpath返回的结果都是序列，所以需要使用[0]进行定位
    return image_title


def getImageLinks(url, content):
    selector = html.fromstring(content)
    image_detail_websites = []
    image_amount = selector.xpath("//div[@class='page']/a[last()-1]/text()")[0]
    for i in range(int(image_amount)):
        image_detail_link = '{}/{}'.format(url, i + 1)
        content = requests.get(image_detail_link).content
        sel = html.fromstring(content)
        image_download_link = sel.xpath("//div[@class='content']/a/img/@src")[0]
        # 这里是单张图片的最终下载地址
        image_detail_websites.append(image_download_link)
    return image_detail_websites


def download_image(dir, image_title, image_detail_websites):
    # 将图片保存到本地。传入的两个参数是图片的标题，和下载地址序列
    path = '%s/%s' % (dir, image_title)
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)
    num = 1
    amount = len(image_detail_websites)
    for i in image_detail_websites:
        filename = '%s/%s/%s.jpg' % (dir, image_title, num)
        if os.path.exists(filename) and  os.path.getsize(filename)>1024:
            print("%s  第%s/%s张已存在" % (image_title, num, amount))
        else:
            print('正在下载图片：%s  第%s/%s张，' % (image_title, num, amount))
            with open(filename, 'wb') as f:
                # f.write(requests.get(i).content)
                spider = MySpider()
                (flag, content) = spider.getContent(i)
                if flag:
                    f.write(content)
                    f.close()
                    # time.sleep(2)
                else:
                    print("请求错误")
        num += 1


def main():
    for page_number in range(1, 20):
        for pageLink in getDetailPageLinks(str(page_number)):
            spider = MySpider()
            (flag, content) = spider. getContent(pageLink)
            download_image("D:/pic", getDetailPageTitle(content), getImageLinks(pageLink, content))


if __name__ == '__main__':
    main()
