import requests
from urllib.parse import urlencode
import json
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import re
import urllib.request
import os
from multiprocessing import Pool

GROUP_START = 1
GROUP_END = 20

KEYWORD = '性感写真'


def _download_image(url, folder):
    if not os.path.isdir(folder):
        os.mkdir(folder)

    print('downloading %s' % url)
    _filename = lambda s: os.path.join(folder, os.path.split(url)[1]) + '.jpg'
    urllib.request.urlretrieve(url, _filename(url))
    return _filename(url)

def get_page_index(offset, keyword):
    data = {
    'offset':offset,
    'format':'json',
    'keyword':keyword,
    'autoload':'true',
    'count':'20',
    'cur_tab':'3'
    }
    url = 'https://www.toutiao.com/search_content/?' + urlencode(data)
    response = requests.get(url)
    try:
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求索引页面出错喽')
        return None

def parse_page_index(html):
    data = json.loads(html)
    if data and 'data' in data.keys():
        for item in data.get('data'):
            yield item.get('article_url')

def get_page_detail(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求详情页出错喽',url)
        return None

def parse_page_detail(html, url):
    soup = BeautifulSoup(html, 'lxml')
    title = soup.select('title')[0].get_text()
    print(title)
    result = re.compile(r'gallery: (.*),').search(html)
    # if result:
    #     print(result.group(1))
    # images_pattern = re.compile('gallery: (.+),', re.S)
    # result = re.search(images_pattern,html)
    if result:
        #print(result.group(1))
        data = json.loads(result.group(1))
        #print(data)
        if data and 'sub_images' in data.keys():
            sub_images = data.get('sub_images')
            images = [item.get('url') for item in sub_images]
            return {
                'title': title,
                'url': url,
                'images': images
            }

def main(offset):
    html = get_page_index(offset,KEYWORD)
    for url in parse_page_index(html):
        html = get_page_detail(url)
        if html:
            result = parse_page_detail(html, url)
            if result and 'images' in result.keys():
                #print(result['images'])
                for urls in result['images']:
                    _download_image(urls,KEYWORD)

if __name__ == '__main__':
    groups = [x * 20 for x in range(GROUP_START, GROUP_END +1)]
    pool = Pool()
    pool.map(main,groups)
