# -*- coding: utf-8 -*-
# @Time    : 2020/1/3 14:40
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from urllib.parse import urlencode
from hashlib import md5


class XiaoShiNews(scrapy.Spider):
    '''
       小时新闻  固定api获取 最后一位sortNum翻页 详情页进行正文提取
    '''

    name = 'Xiaoshi'
    t = Times()
    redis = Redis_DB()
    types = [i for i in range(1, 21)]

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    headers = {
        'Cookie': 'sessionId=086d858e-b915-488c-b311-b637d66331e3',
        'deviceInfo': '{"appVersion":"TwentyFourHours6.1.1","screenSize":"1440*2392","channel":"yingyongbao","os":"Android","regionCode":"","did":"ffffffff-cd54-7a6d-ffff-ffffa1f8053c","deviceType":"Genymotion Google Nexus 6P - 7.1.0 - API 25 - 1440x2560"}',
        'User-Agent': 'TwentyFourHours/6.1.1 (Genymotion; Android 7.1.1)',
        'Content-Type': 'application/json; charset=utf-8',
        'Host': 'app.thehour.cn',
        'Connection': 'Keep-Alive',
        'Accept-Encoding': 'gzip',
    }
    session = '086d858e-b915-488c-b311-b637d66331e3'

    def start_requests(self):
        for t in self.types:
            body = {
                "channelId": t,
                "lastMinPublishTime": 0
            }
            params = json.dumps(body)
            timestamp = int(time.time() * 1000)
            get_sign = f'url=/h24/app/v6.1/channel/news/refreshsessionId={self.session}timestamp={timestamp}channel=APPdeviceInfo={self.headers.get("deviceInfo")}body={params}24b9877553e548ada36eb92bda830e76'
            sign = md5(get_sign.encode()).hexdigest()
            url = f'https://app.thehour.cn/h24/app/v6.1/channel/news/refresh?timestamp={timestamp}&channel=APP&sign={sign}'
            yield scrapy.Request(url, method="POST", callback=self.parse_text, headers=self.headers, dont_filter=True,
                                 body=params, meta={"type": t, "number": 1})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['number']
        datas = json.loads(response.text)
        data_ = datas['data']['articleList']
        publishTime = data_[-1]['publishTime']
        # 下一页的翻页
        if number > 5:
            pass
        else:
            body = {
                "channelId": t,
                "lastMinPublishTime": publishTime
            }
            params = json.dumps(body)
            timestamp = int(time.time() * 1000)
            get_sign = f'url=/h24/app/v6.1/channel/news/refreshsessionId={self.session}timestamp={timestamp}channel=APPdeviceInfo={self.headers.get("deviceInfo")}body={params}24b9877553e548ada36eb92bda830e76'
            sign = md5(get_sign.encode()).hexdigest()
            next_url = f'https://app.thehour.cn/h24/app/v6.1/channel/news/refresh?timestamp={timestamp}&channel=APP&sign={sign}'
            yield scrapy.Request(next_url, method="POST", callback=self.parse_text, headers=self.headers, dont_filter=True,
                                 body=params, meta={"type": t, "number": number + 1})

        for d in data_:
            dicts = {}
            content_id = d['id']
            pubdate_datetime = d['publishTime']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['shareUrl']
            except:
                url = None
            if url is None:
                continue
            try:
                author = d['fgdAuthorName']
            except:
                author = ''
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                continue
            dicts['id'] = id
            dicts['author'] = author
            dicts['url'] = url
            dicts['title'] = title
            dicts['pubdate'] = pubdate
            body = {"articleId":content_id}
            params = json.dumps(body)
            timestamp = int(time.time() * 1000)
            get_sign = f'url=/h24/app/v6.1/article/loadsessionId={self.session}timestamp={timestamp}channel=APPdeviceInfo={self.headers.get("deviceInfo")}body={params}24b9877553e548ada36eb92bda830e76'
            sign = md5(get_sign.encode()).hexdigest()
            get_content = f'https://app.thehour.cn/h24/app/v6.1/article/load?timestamp={timestamp}&channel=APP&sign={sign}'
            yield scrapy.Request(get_content, method="POST", callback=self.parse, headers=self.headers, dont_filter=True,
                                 body=params,meta=dicts)

    def parse(self, response):
        if response.text is not None:
            datas = json.loads(response.text)
            data_ = datas['data']
            item = NewsItem()
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = data_['HTMLContent']
            except:
                html = ''
            item['content'] = remove_tags(html)
            item['author'] = response.meta['author']
            item['formats'] = "app"
            item['dataSource'] = '小时新闻'
            item['serchEnType'] = "小时新闻"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
