import requests
from urllib.parse import urlencode
import json
import re
import os
from multiprocessing import Pool

#请求
def get_request_data(offset, keyword):
    data = {
        "offset": offset,
        "format": "json",
        "keyword": keyword,
        "autoload": "true",
        "count": 20,
        "cur_tab": 1
    }
    url = "https://www.toutiao.com/search_content/?"+urlencode(data)
    response = requests.get(url)
    if response.status_code == 200:
        page = response.text
        return page
    else:
        print("连接失败...")


#解析返回数据
def parse_page(page):
    jp = json.loads(page)
    print(isinstance(jp, str))
    data_list = jp.get("data")
    return data_list


#访问详情页
def get_detail_page(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            print("链接成功")
            page = response.text
            pattern = re.compile("var gallery = (.*?);")
            search_res = re.search(pattern, page)
            if search_res:
                res_j = json.loads(search_res.group(1))
                im = res_j.get("sub_images")
                ti = res_j.get("sub_titles")
                images = [item.get("url") for item in im]
                return {
                    "images": images,
                    "title": ti
                }
            else:
                print("链接失败")
    except IOError:
        print(IOError)

def downlaod(url, title, num):
    response = requests.get(url)
    pic = response.content
    lib = "d:/python_image/"+title + "/"
    if not os.path.exists(lib):
        os.makedirs(lib)

    file_path = lib + str(num) + ".jpg"
    if not os.path.exists(file_path):
        file = open(file_path, 'wb')
        file.write(pic)
        file.close()

def start(offset):
    keyword = "街拍"
    data_list = parse_page(get_request_data(offset, keyword))
    for i in range(len(data_list)):
        art_url = data_list[i].get("article_url")
        if art_url is None:
            continue
        if(not art_url.startswith("http://")):
            continue
        images = get_detail_page(art_url)
        if images is None:
            continue
        num = 0
        img_list = images.get("images")
        title = images.get("title")
        for res in range(len(img_list)):
            num += 1
            downlaod(img_list[res], title[res], num)

if __name__ == "__main__":
    start(20)