import requests
from pyquery import PyQuery as pq
#  from PIL import Image
#  import time
#  import pymysql
import os
from hashlib import md5


def get_page(session, url):
    """获取页面(数据)


    :session: session
    :url: url

    :returns: 页面源码(json)

    """

    #  TODO <18-12-28, Codergege> # 使用 getpage snippets, 获取页面(json)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
            AppleWebKit/537.36(KHTML, like Gecko) \
            Chrome/70.0.3538.110 Safari/537.36'
    }
    try:
        resp = session.post(url, data=None, headers=headers)

        if resp.status_code == 200:
            return resp.text
    except requests.ConnectionError as c_e:
        print('Error:', c_e.args)

    return None


def parse_page_first(data):
    """解析第一级页面源码

    :data: TODO
    :returns: TODO

    """
    if data:
        doc = pq(data)
        # 要遍历使用 items() 方法最好, 返回一个 generator, 迭代每个 PyQuery 对象
        title_lis = doc('ul.plist.cf > li').items()
        for item in title_lis:
            title_dict = {}
            title_dict['title'] = item('a:first').text()
            title_dict['title_url'] = item('a:first').attr('href')
            yield title_dict


def parse_page_second(data):
    """获取所有大图 url

    :data: TODO
    :returns: TODO

    """
    if data:
        doc = pq(data)
        lias = doc('div.pic-list > ul.cf > li > a').items()
        for item in lias:
            image_dict = {}
            image_dict['image_name'] = item('img').attr('data-intro')
            image_dict['image_url'] = item('img').attr('src')
            yield image_dict


def parse_page_total(data):
    """获取总分页数

    :data: 网页源码
    :returns: 分页总页数 total_pages

    """
    doc = pq(data)
    return int(doc('div.pages>a')[-2].text)


def save_image(session, item):
    """保存图片, title 作为目录名, 图片内容的 md5 值作为图片名, 防止重复.

    :item: 图片信息字典, 包含 title 和 image_url
    :returns: None

    """
    if not os.path.exists('Downloads/' + item.get('title')):
        os.mkdir('Downloads/' + item.get('title'))
    try:
        resp_img = session.get(item.get('image_url'))
        if resp_img.status_code == 200:
            file_path = 'Downloads/{0}/{1}.{2}'\
                .format(
                    item.get('title'), md5(resp_img.content).hexdigest(), 'jpg'
                )
            if not os.path.exists(file_path):
                with open(file_path, 'wb') as f:
                    f.write(resp_img.content)
                print(file_path, '保存成功!')
            else:
                print('Already downloaded', file_path)
    except requests.ConnectionError as e:
        print('Failed to save image:', e.args)


def main():
    """爬虫入口函数
    :returns: TODO

    """
    session = requests.session()

    first_level_url = 'http://www.bx1k.com/funnyimg/find-cate-1-p-1.html'
    # 先获取总页数
    total_pages = parse_page_total(get_page(session, first_level_url))
    print(str(total_pages))
    page_url_temp = 'http://www.bx1k.com/funnyimg/find-cate-1-p-{0}.html'
    # 只爬取两页就好了
    for i in range(0, 2):
        page_url = page_url_temp.format(i + 1)
        title_dict_gen = parse_page_first(get_page(session, page_url))
        # 解析第二级页
        for item in title_dict_gen:
            image_dict_gen = parse_page_second(
                get_page(session, item.get('title_url')))
            for image_dict in image_dict_gen:
                image_dict['title'] = item.get('title')
                # 保存图片
                save_image(session, image_dict)


if __name__ == "__main__":
    main()
