# coding=utf8
# 今日头条pc版，搜索关键词，对搜索结果保存，并下载图片，关键词多线程并发
# 需要python3.6及以上版本
# 暂时需要手动替换cookie

import hashlib
import json
import os
import re
from multiprocessing import Pool

import requests

cookie = 'tt_webid=6826590832570058254; s_v_web_id=verify_ka6f9pcm_Rx9Ej36b_6ur6_4xkD_84sJ_c3oAOhKyrTPl; WEATHER_CITY=%E5%8C%97%E4%BA%AC; tt_webid=6826590832570058254; ttcid=42ea1de3db064f47a085b1d3253c5b1266; csrftoken=bb14e6561556122fda8b0e1750d04e0f; SLARDAR_WEB_ID=3a7428d2-a183-480e-89c3-cd545115bacf; __tasessionId=kwnvxo3s31589447028845; tt_scid=xI76fSUj.7PXrmdIC4cKG.qingSJ61VOx9faT1RExUhC0aUN7L512N6Hp0LN9y4x66f3'


def get_img(j):  # 获取下载图片
    rep = re.compile(r'[/\\:*?"<>|]')
    data = j.get('data')
    kw = j.get('keyword')
    for d in data:
        try:
            title = d.get('title')
            if not title:
                continue
            title = rep.sub('', title)  # 以title创建文件夹
            image_li = d.get('image_list')
            p = f'./头条/{kw}/{title}'
            print(f'-----下载关键词-{kw}-{title}的图片')
            if title and image_li and not os.path.exists(p):
                os.makedirs(p)
            if image_li:
                for image in image_li:
                    u = image.get('url')
                    u = u.replace('list/190x124', 'large').replace('list', 'large')  # 下载大图
                    img = requests.get(u)
                    cont = img.content
                    n = hashlib.md5(cont).hexdigest()
                    with open(f'{p}/{n}.png', 'wb')as f:
                        f.write(cont)
        except Exception as e:
            print(e)


def get_json(kw):
    # headers格式化
    h = f'''
        accept: application/json, text/javascript
        accept-encoding: gzip, deflate, br
        accept-language: zh-CN,zh;q=0.9
        content-type: application/x-www-form-urlencoded
        cookie: {cookie}
        referer: https://www.toutiao.com/search
        user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400
        x-requested-with: XMLHttpRequest
        '''
    h = h.strip().split('\n')
    hs = {x.split(':')[0].strip(): ("".join(x.split(':')[1:])).strip().replace('//', "://") for x in h}
    # print(hs)
    url = 'https://www.toutiao.com/api/search/content/'

    i = 0
    while 1:
        params = {
            'aid': '24',
            'app_name': 'web_search',
            'offset': i*20,
            'format': 'json',
            'keyword': kw,
            'autoload': 'true',
            'count': 20,
            'en_qc': 1,
            'cur_tab': 1,
            'from': 'search_tab',
            'pd': 'synthesis',
            # 'timestamp':1589435388018,
        }
        print(f'查询关键词-{kw}，第-{i+1}-页')
        r = requests.get(url, params=params, headers=hs)
        try:
            j = r.json()
        except Exception as e:
            print(e)
        with open(f'头条/json/{kw}_{i}.json', 'w', encoding='utf8')as f:
            f.write(json.dumps(j, indent=4, ensure_ascii=False))
        if j.get('data'):
            get_img(j)
        if not j.get('has_more'):
            break
        i += 1


def main():
    kw_li = ['风景', '街拍', '科技', '数码', '动漫', '漫画']
    tt_path = './头条/json'
    if not os.path.exists(tt_path):
        os.makedirs(tt_path)
    pool = Pool()
    pool.map(get_json, kw_li)
    pool.close()
    pool.join()


if __name__ == "__main__":
    main()
