# -*- coding: utf-8 -*-
# @Time    : 2019/12/9 14:03
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time
from urllib.parse import urlencode
from hashlib import md5
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags
import uuid


class KanDianKuaiBaoNews(scrapy.Spider):
    '''
        看点快报App  固定api Post urlencode
    '''

    name = 'Kdkb'

    t = Times()
    redis = Redis_DB()

    category = ['getSubNewsChlidInterest','getVerticalChannel']
    # getSubNewsInterest
    types = ['daily_timeline', 'kb_news_xianggang', 'kb_news_sjd','kb_news_house',
             'kb_news_hotshare', 'kb_news_history', 'kb_news_world', 'kb_news_finance',
             'kb_news_car', 'kb_news_sex', 'kb_news_pet', 'kb_news_astro', 'kb_news_baby',
             'kb_news_travel', 'kb_house_shanghai', 'kb_news_fishing', 'kb_news_football',
             'kb_news_epl', 'kb_news_farmer', 'kb_news_edu', 'kb_news_gaojidi', 'kb_news_zhengnengliang',
             'kb_location_beijing', 'kb_location_shanghai', 'kb_location_chongqing', 'kb_location_xian',
             'kb_location_tianjin', 'kb_location_chengdu', 'kb_location_guangzhou', 'kb_location_fuzhou',
             'province33', 'kb_location_xiamen', 'kb_location_shenzhen', 'kb_location_guangzhou',
             'kb_location_nanjing','kb_location_shijiazhuang','kb_location_wuhan','kb_location_zhengzhou',
             'kb_location_hangzhou', 'province34', 'province32','kb_location_kunming','kb_location_suzhou',
             'city48', 'city55', 'city50', 'city62', 'city63', 'city57', 'city58', 'city53', 'city51', 'city56',
             'city61', 'city147', 'city146', 'city148', 'city142', 'city144', 'city143','city65',
             'city145', 'city339', 'city341', 'city349', 'city352', 'city343', 'city340', 'city347',
             'city351', 'city350', 'city346', 'city348', 'city342', 'city344', 'city345', 'city216',
             'city207', 'city210', 'city203', 'city205', 'city209', 'city217', 'city213', 'city204',
             'city215', 'city201', 'city206', 'city202', 'city211', 'city218', 'city212', 'city214',
             'city208', 'city200', 'city219', 'city223', 'city228', 'city232', 'city224',
             'city226', 'city221', 'city230', 'city229', 'city231', 'city220', 'city225',
             'city222', 'city227', 'city297', 'city300']
    # getVerticalChannel
    types2 = ['kb_news_hotnews', 'kb_news_nba', 'kb_news_mil', 'kb_news_sports','kb_news_filmtv',
              'kb_news_cate', 'kb_news_laugh', 'kb_news_tech', 'kb_news_star', 'kb_news_game',
              'kb_news_chaobao', 'kb_news_erciyuan', 'kb_news_filmtv', 'kb_news_bagua']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    headers = {
        'accept-encoding': 'gzip,deflate',
        'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
        'efqn': '1',
        'Content-Type': 'application/x-www-form-urlencoded',
    }
    devid = '869342479554350'

    def get_qn_sig(self, cgi, qn_rid, devid) -> str:
        '''
        :param cgi: 请求频道名称 getSubNewsInterest
        :param qn_rid: uuid.uuid4() 随机
        :param devid:  设备号 a44507ee64dcbc36
        :return:
        '''
        appver = '25_areading_6.1.41'
        secret = 'qn123456'
        qn_sig_old = f"appver={appver}&cgi={cgi}&devid={devid}&qn-rid={qn_rid}&secret={secret}"
        return md5(qn_sig_old.encode()).hexdigest()

    def start_requests(self):
        for c in self.category:
            if c == 'getSubNewsChlidInterest':
                for t1 in self.types:
                    url = "https://r.cnews.qq.com/getSubNewsChlidInterest"
                    cgi = 'getSubNewsChlidInterest'
                    qn_rid = str(uuid.uuid4())
                    qn_sig = self.get_qn_sig(cgi,qn_rid,self.devid)
                    unixtimesign = str(int(time.time()*1000))
                    payload = {'last_id': '', 'lastRefreshTime': '',
                               'preload': '1', 'refreshType': 'normal', 'last_time': '', 'bottom_id': '',
                               'refresh_from': 'refresh_footer', 'sessionid': 'undefined',
                               'top_time': '', 'top_id': '',
                               'chlid': t1, 'bottom_time': '',
                               'page': '0', 'qqnetwork': 'wifi',
                               'unixtimesign': unixtimesign,
                               'qn-sig': qn_sig, 'qn-rid': qn_rid,
                               'devid': self.devid,
                               'appversion': '6.1.41', 'appver': '25_areading_6.1.41'}
                    params = urlencode(payload)
                    yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                         dont_filter=True, callback=self.parse_text,
                                         meta={"payload": payload, "number": 0,"cgi":c})
            else:
                for t2 in self.types2:
                    url = f"https://r.cnews.qq.com/{c}"
                    cgi = c
                    qn_rid = str(uuid.uuid4())
                    qn_sig = self.get_qn_sig(cgi, qn_rid, self.devid)
                    unixtimesign = str(int(time.time()*1000))
                    payload = {'last_id': '', 'lastRefreshTime': '',
                               'preload': '1', 'refreshType': 'normal', 'last_time': '', 'bottom_id': '',
                               'refresh_from': 'refresh_footer', 'sessionid': 'undefined',
                               'top_time': '', 'top_id': '',
                               'chlid': t2, 'bottom_time': '',
                               'page': '0', 'qqnetwork': 'wifi',
                               'unixtimesign': unixtimesign,
                               'qn-sig': qn_sig, 'qn-rid': qn_rid,
                               'devid': self.devid,
                               'appversion': '6.1.41', 'appver': '25_areading_6.1.41'}
                    params = urlencode(payload)
                    yield scrapy.Request(url, method='POST', headers=self.headers, body=params,
                                         dont_filter=True, callback=self.parse_text, meta={"payload": payload, "number": 0,"cgi":c})

    def parse_text(self, response):
        print('正在访问列表页:',response.url)
        print(response)
        if response.text:
            number = response.meta['number']
            payload = response.meta['payload']
            c = response.meta['cgi']
            if number > 10:
                pass
            else:
                datas = json.loads(response.text)
                if datas['ret'] == 0:
                    data_ = datas['newslist']
                    cgi = c
                    qn_rid = str(uuid.uuid4())
                    qn_sig = self.get_qn_sig(cgi, qn_rid, self.devid)
                    unixtimesign = str(int(time.time() * 1000))
                    payload['unixtimesign'] = unixtimesign
                    payload['lastRefreshTime'] = datas['timestamp']
                    payload['last_time'] = datas['timestamp']
                    payload['bottom_id'] = data_[-1]['id']
                    # 这里又可能第一位是广告 进行异常处理
                    try:
                        payload['top_time'] = data_[0]['timestamp']
                        payload['last_id'] = data_[0]['id']
                        payload['top_id'] = data_[0]['id']
                    except:
                        payload['top_time'] = data_[1]['timestamp']
                        payload['last_id'] = data_[1]['id']
                        payload['top_id'] = data_[1]['id']
                    payload['bottom_time'] = data_[-1]['id']
                    payload['qn-rid'] = qn_rid
                    payload['qn-ig'] = qn_sig
                    payload['page'] = str(number + 1)
                    params = urlencode(payload)
                    yield scrapy.Request(response.url, method='POST', headers=self.headers, body=params,
                                         dont_filter=True, callback=self.parse_text,
                                         meta={"payload": payload, "number": number + 1,"cgi":c})
                    for d in data_:
                        dicts = {}
                        articles_id = d['id']
                        title = d['title']
                        try:
                            url = d['url']
                        except:
                            continue
                        id = Utils.url_hash(url)
                        if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                            print('该id:%s已存在' % id)
                            continue
                        try:
                            pubdate_datetime = d['time']
                            pubdate = str(self.t.datetimes(pubdate_datetime))
                        except:
                            continue
                        if not self.t.time_is_Recent(pubdate):
                            continue
                        try:
                            dataSource = d['source']
                        except:
                            dataSource = ''
                        dicts['id'] = id
                        dicts['url'] = url
                        dicts['pubdate'] = pubdate
                        dicts['title'] = title
                        dicts['dataSource'] = dataSource
                        qn_rid = str(uuid.uuid4())
                        qn_sig = self.get_qn_sig("getSubNewsContent", qn_rid, self.devid)
                        content_url = "https://r.cnews.qq.com/getSubNewsContent?"
                        content_payload = {'id': articles_id, 'qn-sig': qn_sig,
                                           'qn-rid': qn_rid, 'devid': self.devid,
                                           'appversion': '6.1.41', 'appver': '25_areading_6.1.41'}
                        content_params = urlencode(content_payload)
                        yield scrapy.Request(content_url, method='POST', headers=self.headers, body=content_params,
                                             dont_filter=True, callback=self.parse,
                                             meta=dicts)
                else:
                    print("当前频道:",response.meta['payload']['chlid'],response.text)

    def parse(self, response):
        datas = json.loads(response.text)
        item = NewsItem()
        if datas:
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            html = datas['content']['text']
            content = remove_tags(html)
            item['content'] = content
            try:
                item['author'] = datas['card']['chlname']
            except:
                item['author'] = ''
            item['formats'] = "app"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "看点快报"
            item['html'] = html
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            # print(item)
            yield item

            url2 = datas['short_url']
            id2 = Utils.url_hash(url2)
            if self.redis.check_exist_2("wenzhangquchong", id2, '') == 0:
                print('该id:%s已存在' % id2)
            else:
                item['url'] = url2
                item['id'] = id2
                item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                yield item
