import json
import re
from urllib.parse import urlencode
import os
from hashlib import md5
import pymongo
from requests.exceptions import RequestException
import requests
import time
from config import *
from multiprocessing import Pool

# 建立mongodb
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]

# 时间戳
timestamp = int(round(time.time() * 1000))

headers = {
    "authority": "www.toutiao.com",
    "method": "GET",
    "path": "/api/search/content/?aid=24&app_name=web_search&offset=0&format=json&keyword "
            "=%E8%A1%97%E6%8B%8D&autoload=true&count=20&cur_tab=1&en_qc=1&from=search_tab&pd=synthesis"
            "&timestamp=" + str(timestamp),
    "scheme": "https",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9, image/webp, image/apng, */*;q=0.8, "
              "application/signed-exchange;v=b3",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN, zh;q=0.9",
    "cache-control": "max-age=0",
    "cookie": "WEATHER_CITY=%E5%8C%97%E4%BA%AC;csrftoken=8886162d2e2f874d0ab13af862422681;tt_webid"
              "=6787229573823579662;sso_uid_tt=59e68449dc606f9ad63bf5678a5203ba;sso_uid_tt_ss"
              "=59e68449dc606f9ad63bf5678a5203ba;toutiao_sso_user=3c3042946b9a941937a3b189a4271333"
              ";toutiao_sso_user_ss=3c3042946b9a941937a3b189a4271333;sid_guard"
              "=dd9d2a1ed973eaac7559f41a97204015%7C1580275095%7C5184000%7CSun%2C+29-Mar-2020+05%3A18%3A15"
              "+GMT;uid_tt=40709669ec94a344ab3c09630f2f7549;uid_tt_ss=40709669ec94a344ab3c09630f2f7549"
              ";sid_tt=dd9d2a1ed973eaac7559f41a97204015;sessionid=dd9d2a1ed973eaac7559f41a97204015"
              ";sessionid_ss=dd9d2a1ed973eaac7559f41a97204015;s_v_web_id"
              "=k69cw02r_bYFqlQre_sTeU_4yZM_9QEf_SuPKJywbLPrm;__tasessionId=dqbujb8q81580911802200",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/69.0.3497.100 Safari/537.36",

}


# https: // www.toutiao.com / api / search / content /?aid = 24 & app_name = web_search & offset = 0 & format =
# json & keyword = 街拍 & autoload = true & count = 20 & en_qc = 1 & cur_tab = 1 & from=search_tab & pd = synthesis
# & timestamp = 1580910902211
# 获取索引的页面
def get_page_index(offset, keyword):
    data = {
        'aid': 24,
        'app_name': 'web_search',
        'offset': offset,
        'format': 'json',
        'keyword': keyword,
        'autoload': 'true',
        'count': '20',
        'cur_tab': 3,
        'en_qc': 1,
        'from': 'search_tab',
        'pd': 'synthesis',
        'timestamp': timestamp,
    }

    url = 'https://www.toutiao.com/api/search/content/?' + urlencode(data)
    print(url)
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print("请求索引失败")
        return None


# 解析索引的页面内容
def parse_page_index(html):
    data = json.loads(html)
    if data and 'data' in data.keys():
        for item in data.get('data'):
            if item.get('article_url') and item.get('title') and item.get('image_list'):
                yield {
                    'atticle_url': item.get('article_url'),
                    'title': item.get('title'),
                    'imglist': item.get('image_list')
                }


# 获取详情页面数据
def get_page_detail(url):
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print("请求详情页出错")
        return None


# 解析详情页面的图片url
def parse_page_detail(html):
    pattern = re.compile(r'http:(.*?)&quot;', re.I | re.M)
    items = pattern.findall(html)
    for i in range(len(items)):
        items[i] = 'http:' + items[i].replace('\\u002F', '/').replace('\\', '')
        if items[i]:
            download_img(items[i])
    return items


# 保存到数据库
def save_to_db(result):
    if db[MONGO_TABLE].insert_one(result):
        print('save_to_mongodb', result)
        return True
    return False


# 下载图片
def download_img(url):
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            save_img(response.content)
        return None
    except RequestException:
        print("请求下载出错", url)
        return None


# 保存下载的图片
def save_img(content):
    file_path = '{0}/{1}.{2}'.format(os.getcwd() + '\\pic', md5(content).hexdigest(), 'jpg')
    if not os.path.exists(file_path):
        with open(file_path, 'wb') as f:
            f.write(content)
            f.close()
            print('保存', file_path)


def main(offset):
    html = get_page_index(offset, '蒋聘婷')
    for url in parse_page_index(html):
        if url:
            if url['atticle_url']:
                html = get_page_detail(url['atticle_url'])
                result = {'title': url['title'], 'atticle_url': url['atticle_url'], 'img_url': parse_page_detail(html)}
                if result.get('img_url'):
                    # save_to_db(result)
                    print(result)

if __name__ == "__main__":
    groups = [x * 20 for x in range(GROUP_START, GROUP_END + 1)]
    pool = Pool()
    pool.map(main, groups)
