import re
import os
import time
import copy
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlencode
from multiprocessing import Pool

# 设定header
header = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}


def get_index(page):
    """获取每周壁纸合集的网页"""
    url = 'http://db2.gamersky.com/LabelJsonpAjax.aspx?'
    # 这里的callback 是回调函数， 不用管参数
    querystring = {
        'callback': 'jQuery18308633961083269517_1531234733750',
        'jsondata': {
            'type': 'updatenodelabel',
            'isCache': 'true',
            'cacheTime': 60,
            'nodeId': '20117',
            'isNodeId': 'true',
            'page': page
        },
        '_': int(time.time() * 1000)
    }
    # 使用 urlencode 进行 url 编码
    url = url + urlencode(querystring)
    # print(url)
    try:
        r = requests.get(url, headers=header, timeout=10)
        # print(r.text)
        return r.text
    except Exception:
        print('获取第%d页每周壁纸url出错' % page)
        return None

def max_page():
    """检测最大页数"""
    text =  get_index(1)
    pattern = re.compile('"totalPages":(.+?),')
    totalPages = pattern.findall(text)[0]
    print("总共有%s页！"%totalPages)
    return totalPages

def get_url_title(r_text):
    """从索引返回值中提取 title 和 url"""
    # title and url pattern
    # pattern_title = re.compile(r'title=\\"(.+?)\\">', re.S)
    # pattern_url = re.compile(r'g\\"><a href=\\"(.+?)\\"', re.S)
    pattern = re.compile(r'g\\"><a href=\\"(.+?)\\" | title=\\"(.+?)\\">', re.S)
    urltitle = pattern.findall(r_text)
    list1, list2 = [1, 2], []
    try:
        for i in urltitle:
            if i[0]:
                list1[0] = i[0]
            else:
                list1[1] = i[1]
                temp = copy.copy(list1)
                list2.append(temp)
        # return list2
        for i in list2:
            yield i
    except Exception:
        print('提取索引页壁纸网址及标题错误！')
        return None

def get_html(html_url):
    """获取提取页面的 html """
    r = requests.get(html_url, headers=header, timeout=15)
    # 原编码不是 utf-8, 进行转化并且格式化
    html = r.text.encode(r.encoding).decode('utf-8')
    html = BeautifulSoup(html, 'lxml')
    return html

def get_othurl(html):
    """获取壁纸剩下的页数网址"""
    try:
        pk_tag = html.select('li > b')[0]
        othurl_list = pk_tag.parent.parent.select('a')
        othurl_list = [i.get('href') for i in othurl_list]
        return othurl_list
    except Exception as error:
        print('没有其他页面！')
        return None


def get_urlkind(html):
    """提取图片 url 和图片种类"""
    # 需要用到的正则
    global pic_url
    pattern_picurl = re.compile('shtml\?(.*)')
    pattern_kind = re.compile('[:：](.+?)$')
    pic_url =[]
    try:
        # 图片所在标签，图片url合集
        pic_tag = html.select('p > a')
        pic_url = [pattern_picurl.findall(i['href'])[0] for i in pic_tag]
        pk_tag = html.select('li > b')[0]
        pic_kind = pattern_kind.findall(pk_tag.get_text())[0]
    except IndexError:
        pic_kind='无'
    return pic_url, pic_kind


def save_pic(pic_url, title, pic_kind):
    """根据 url 下载图片, 文件名为 filename"""
    # 设定存储位置
    path = os.getcwd()
    isExists = os.path.exists('wallpaper/' + title)
    if not isExists:
        os.makedirs('wallpaper/' + title)
    filepath = 'wallpaper/%s/'%(title)
    num = 0
    try:
        print('-'*50);print("开始下载--标题：%s"%title);print('-'*50)
        for i in pic_url:
            r = requests.get(i, headers=header, timeout=15)
            num += 1
            filename = filepath + '%s-%d.jpg'%(pic_kind, num)
            print('开始下载(壁纸类型：%s)===>第%d张'%(pic_kind, num))
            with open(filename, 'wb') as f:
                f.write(r.content)
                f.close()
    except Exception as error:
        print('下载图片出错！')
        print(error)


def down_pic(url, title):
    """整体函数行为"""
    html = get_html(url)
    pic_url, pic_kind = get_urlkind(html)
    if pic_url:
        save_pic(pic_url, title, pic_kind)

def main(i):
    index_html = get_index(i)
    for item in get_url_title(index_html):
        url, title = item[0], item[1]
        html = get_html(url)
        down_pic(url, title)
        othurl = get_othurl(html)
        if othurl:
            for i in othurl:
                down_pic(i, title)


if __name__ == '__main__':
    # main(1)
    mpage = max_page()
    num = int(input('输入要爬取的页面数（最大为%s）：'%mpage))
    pages = [i for i in range(1, num+1)]
    pool = Pool(processes=4)
    pool.map(main, pages)
    pool.close()
    pool.join()
