import requests
from pyquery import PyQuery as pq
import os
from hashlib import md5
import time


def get_page(session, url):
    """获取页面(数据)

    :session: session
    :url: url
    :returns: 页面源码(json)

    """
    #  TODO <18-12-28, Codergege> # 使用 getpage snippets, 获取页面(json)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
            AppleWebKit/537.36(KHTML, like Gecko) \
            Chrome/70.0.3538.110 Safari/537.36'
    }
    try:
        resp = session.pos(url, data=None, headers=headers)
        if resp.status_code == 200:
            return resp.text
    except requests.ConnectionError as e:
        print('Error:', e.args)


def parse_page(data):
    """解析 get_page(session, url) 返回的数据

    :data: 页面源码或 json 数据.
    :returns: yield result

    """
    #  TODO <18-12-28, Codergege> # 解析数据
    pass


def save_image(session, item, sleep_seconds):
    """保存图片, title 作为目录名, 图片内容的 md5 值作为图片名, 防止重复.

    :item: 图片信息字典, 包含 title 和 image_url
    :returns: None

    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
            AppleWebKit/537.36(KHTML, like Gecko) \
            Chrome/70.0.3538.110 Safari/537.36',
        'accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image\
        /apng,*/*;q=0.8',
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'cache-control': 'max-age=0',
        'referer': 'https://www.mzitu.com/',
        'upgrade-insecure-requests': '1'
    }
    image_dir = item.get('title').replace(' ', '_')
    if not os.path.exists('Downloads/' + image_dir):
        os.mkdir('Downloads/' + image_dir)
    try:
        time.sleep(sleep_seconds)
        resp_img = session.get(item.get('image_url'), headers=headers)
        print(item.get('image_url'))
        print(resp_img.status_code)
        if resp_img.status_code == 200:
            file_path = 'Downloads/{0}/{1}.{2}'\
                .format(
                    image_dir, md5(resp_img.content).hexdigest(), 'jpg'
                )
            if not os.path.exists(file_path):
                with open(file_path, 'wb') as f:
                    f.write(resp_img.content)
                print(file_path, '保存成功!')
            else:
                print('Already downloaded', file_path)
    except requests.ConnectionError as e:
        print('Failed to save image:', e.args)


def parse_page_totla(data):
    """获取总页数

    :data: TODO
    :returns: TODO

    """
    doc = pq(data)
    pages = list(doc('div.nav-links>a.page-numbers').items())[-2].text()
    return int(pages)


class ImageInfo(object):

    """Docstring for ImageInfo. """

    def __init__(self, title=None, title_url=None, image_name=None,
                 image_url=None):
        """TODO: to be defined1.

        :title: TODO
        :title_url: TODO
        :image_name: TODO
        :image_url: TODO

        """
        self._title = title
        self._title_url = title_url
        self._image_name = image_name
        self._image_url = image_url


class TitleInfo(object):

    """Docstring for TitleInfo. """

    def __init__(self, title=None, title_url=None):
        """TODO: to be defined1.

        :title: TODO
        :title_url: TODO

        """
        self._title = title
        self._title_url = title_url


def parse_page_title(data):
    """TODO: Docstring for parse_page_title.

    :data: TODO
    :returns: TODO

    """
    if data:
        doc = pq(data)
        li_gen = doc('ul#pins > li').items()
        for item_li in li_gen:
            title_info = TitleInfo()
            title_info._title_url = item_li.children('a').attr('href')
            title_info._title = item_li.children('span:first > a').text()
            yield title_info


def parse_page_image_end(data):
    """TODO: Docstring for parse_page_image_end.

    :data: TODO
    :returns: TODO

    """
    if data:
        image_info = ImageInfo()
        doc = pq(data)
        image_info._image_url = doc('div.main-image > p > a > img').attr('src')
        return image_info


def parse_page_image(data, session):
    """TODO: Docstring for parse_page_image.

    :data: TODO
    :returns: TODO

    """
    if data:
        # 还需要迭代
        # 获取总页数
        doc = pq(data)
        image_total = list(
            doc('div.pagenavi > a').items()
        )[-2].children('span').text()
        # 构造 url
        image_page_url = doc('div.main-image > p > a').attr('href')[:-2]
        for i_page in range(int(image_total)):
            i_image_page_url = image_page_url + '/' + str(i_page + 1)
            # parse_page_image_end
            image_info = parse_page_image_end(
                get_page(session, i_image_page_url)
            )
            if image_info:
                yield image_info


def main():
    """爬虫入口函数
    :returns: TODO

    """
    session = requests.session()
    # 获取页面总页数
    index_url = 'https://www.mzitu.com/'
    #  page_url_2 = 'https://www.mzitu.com/page/2/'
    #  page_url_1 = 'https://www.mzitu.com/page/1/'
    data = get_page(session, index_url)
    pages = parse_page_totla(data)
    # pages iterator
    #   获取第 i 页
    for page in range(1):
        page_url = 'https://www.mzitu.com/page/{0}/'.format(page + 1)
        title_info_gen = parse_page_title(get_page(session, page_url))
        for ti in title_info_gen:
            # 第 i 页 迭代爬取
            image_info_gen = parse_page_image(
                get_page(session, ti._title_url), session
            )
            # 保存图片
            for image_item in image_info_gen:
                if image_item:
                    image_dict = {}
                    image_dict['title'] = ti._title
                    image_dict['image_url'] = image_item._image_url
                    save_image(session, image_dict, 2)


if __name__ == "__main__":
    main()
