# -*- coding: utf-8 -*-
# @Time    : 2019/5/13 15:42
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : dayDayNewsApp.py
# @Software: PyCharm
import scrapy
import json
from iQiYiSpider.items import dayDayNewsAppItem
import time
import random
class dayDayNewsApp(scrapy.Spider):
    name = "daydaynews_app"
    allowed_domains = ['r.cnews.qq.com']
    start_urls = ['https://r.cnews.qq.com/getSubNewsChlidInterest?devid=863254010762044']
    unixtimesign = "1557740115838"
    qn_sig = "3e7b79040fb084de5d8f3892e7530a94"
    qn_rid = "8d9af138-9d6f-403f-bb74-96427b138f0b"
    snqn = "HdHFZQUHj2Xd5LohEN8XlEWuEIk8iSa4D5Q8dLcwbIZQHRl86r1EuVqFcbpblWT2uwYjPIQjdetZ6Px4N5iwCldthV5t4UTOptqBRigm8PrcFe8sjsY0pP3akMypMPZGMXo9EzsW2vgIgnihxAZDyg=="
    page = str(1)

    headers = {
        'Host': 'r.cnews.qq.com',
        'Accept-Encoding': 'gzip,deflate',
        'Referer': 'http://cnews.qq.com/cnews/android/',
        'User-Agent': '%E5%A4%A9%E5%A4%A9%E5%BF%AB%E6%8A%A55050(android)',
        'Cookie': 'lskey=; luin=; skey=; uin=; logintype=0;',
        'snqn': snqn,
        'svqn': '1_4',
        'qn-sig': qn_sig,
        'qn-rid': qn_rid,
        'Content-Type': 'application/x-www-form-urlencoded',
        'Connection': 'Keep-Alive'
    }

    # body = "cityList=%E5%B9%BF%E5%B7%9E%7C%E9%87%8D%E5%BA%86%7C%E6%88%90%E9%83%BD%7C%E6%9D%AD%E5%B7%9E%7C%E8%8B%8F%E5%B7%9E%7C%E6%B7%B1%E5%9C%B3%7C%E9%83%91%E5%B7%9E%7C%E6%AD%A6%E6%B1%89%7C%E7%9F%B3%E5%AE%B6%E5%BA%84%7C%E8%A5%BF%E5%AE%89%7C%E5%8D%97%E4%BA%AC%7C%E4%B8%8A%E6%B5%B7%7C%E5%8C%97%E4%BA%AC&isUpdatingLocation=1&REQBuildTime=1557734178661&ssid=sagita4ccc6a9ef4863990&omgid=fc444b4282653b410d5ba3aaa34bc0f18f000010212b09&forward=1&REQExecTime=1557734178661&qqnetwork=wifi&last_time=1557734151&commonsid=cd169f20248b4df08572eb7c1e0bd3fb&kingCardType=0&picSizeMode=0&commonGray=1_3%7C2_1%7C18_1%7C12_1%7C22_2%7C49_1%7C14_1%7C17_1%7C30_1&currentTab=kuaibao&manualRefresh=1&proxy_addr=192.168.3.33%3A8888&is_wap=0&lastCheckCardType=0&locMore=0&omgbizid=9753baf359adb54368f8862842bdd2f1f9260080212b09&page=1&imsi=460077620410615&lastRefreshTime=1557734151&commonIsFirstLaunch=1&bssid=4C%3ACC%3A6A%3A9E%3AF4%3A86&locType=&activefrom=icon&unixtimesign=1557734178662&qimei=863254010762044&refresh_from=refresh_footer&cachedCount=18&direction=1&Cookie=%26lskey%3D%26luin%3D%26skey%3D%26uin%3D%26logintype%3D0&sessionid=&chRefreshTimes=1&chlid=kb_location_beijing&imsi_history=460077620410615&qn-sig=26fe2d6efe7797a234e2ef45d4c4cab6&qn-rid=792b6f99-e3cd-4eda-9a71-3954104dfb70&hw_fp=Xiaomi%2Fsagit%2Fsagit%3A5.1.1%2FNMF26X%2F500190227%3Auser%2Frelease-keys&mid=d4a3ad9d280a930c45ca9a5136865ac5c6828679&devid=863254010762044&mac=4C%3ACC%3A6A%3A9E%3AF4%3A86&store=9001087&screen_height=1280&apptype=android&origin_imei=863254010762044&rover=1&hw=Xiaomi%20_MI6&appversion=5.0.50&appver=22_areading_5.0.50&uid=4ccc6a9ef4863990&screen_width=720&sceneid=&android_id=4ccc6a9ef4863990"
    # body_dict = {}
    # for s in unquote(body).split("&"):
    #     ss = s.split('=')
    #     body_dict[ss[0]] = str(ss[1])
    # print(body_dict)
    body_dict = {
        'cityList': '广州|重庆|成都|杭州|苏州|深圳|郑州|武汉|石家庄|西安|南京|上海|北京',
        'isUpdatingLocation': '1',
        'REQBuildTime': '1557734178661',
        'ssid': 'sagita4ccc6a9ef4863990',
        'omgid': 'fc444b4282653b410d5ba3aaa34bc0f18f000010212b09',
        'forward': '1',
        'REQExecTime': '1557734178661',
        'qqnetwork': 'wifi',
        'last_time': '1557734151',
        'commonsid': 'cd169f20248b4df08572eb7c1e0bd3fb',
        'kingCardType': '0',
        'picSizeMode': '0',
        'commonGray': '1_3|2_1|18_1|12_1|22_2|49_1|14_1|17_1|30_1',
        'currentTab': 'kuaibao',
        'manualRefresh': '1',
        'proxy_addr': '192.168.3.33:8888',
        'is_wap': '0',
        'lastCheckCardType': '0',
        'locMore': '0',
        'omgbizid': '9753baf359adb54368f8862842bdd2f1f9260080212b09',
        'page': page,
        'imsi': '460077620410615',
        'lastRefreshTime': '1557734151',
        'commonIsFirstLaunch': '1',
        'bssid': '4C:CC:6A:9E:F4:86',
        'locType': '',
        'activefrom': 'icon',
        'qimei': '863254010762044',
        'refresh_from': 'refresh_footer',
        'cachedCount': '18',
        'direction': '1',
        'Cookie': '',
        'lskey': '',
        'luin': '',
        'skey': '',
        'uin': '',
        'logintype': '0',
        'sessionid': '',
        'chRefreshTimes': '1',
        'chlid': 'kb_location_beijing',
        'imsi_history': '460077620410615',
        'hw_fp': 'Xiaomi/sagit/sagit:5.1.1/NMF26X/500190227:user/release-keys',
        'mid': 'd4a3ad9d280a930c45ca9a5136865ac5c6828679',
        'devid': '863254010762044',
        'mac': '4C:CC:6A:9E:F4:86',
        'store': '9001087',
        'screen_height': '1280',
        'apptype': 'android',
        'origin_imei': '863254010762044',
        'rover': '1',
        'hw': 'Xiaomi _MI6',
        'appversion': '5.0.50',
        'appver': '22_areading_5.0.50',
        'uid': '4ccc6a9ef4863990',
        'screen_width': '720',
        'sceneid': '',
        'android_id': '4ccc6a9ef4863990',
        'unixtimesign': unixtimesign,
        'qn-sig': qn_sig,
        'qn-rid': qn_rid,
    }


    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.FormRequest(url, method='POST', headers=self.headers, formdata=self.body_dict, callback=self.parse, dont_filter=True)

    def parse(self, response):
        try:
            ad_id_list = []
            html = response.text
            text_json = json.loads(html)
            if "ret" in text_json:
                if text_json['ret'] == 1:
                    print('进入休眠状态，停止爬数据！！！！！')
                    time.sleep(30)
                    for url in self.start_urls:
                        yield scrapy.FormRequest(url, method='POST', headers=self.headers, formdata=self.body_dict, callback=self.parse, dont_filter=True)
            if "changeInfo" in text_json:
                changeInfo = text_json['changeInfo']
                if "commentList" in changeInfo:
                    commentList = changeInfo['commentList']
                    for comment in commentList:
                        ad_id_list.append(comment['id'])
            if ad_id_list:
                if "newslist" in text_json:
                    newsList = text_json['newslist']
                    for news in newsList:
                        if news['id'] in ad_id_list:
                            item = dayDayNewsAppItem()
                            for key in item.fields:
                                item[key] = None
                            item['ad_id'] = news.pop('id')
                            for key in news:
                                if key in item.fields:
                                    item[key] = news[key]
                            for key in item.fields:
                                if item[key] == '':
                                    item[key] = None
                            yield item

            for url in self.start_urls:
                yield scrapy.FormRequest(url, method='POST', headers=self.headers, formdata=self.body_dict, callback=self.parse, dont_filter=True)

        except Exception as e:
            print(e)
            for url in self.start_urls:
                yield scrapy.FormRequest(url, method='POST', headers=self.headers, formdata=self.body_dict, callback=self.parse, dont_filter=True)


if __name__ == '__main__':
    from scrapy import cmdline
    cmdline.execute("scrapy crawl daydaynews_app".split())