# -*- coding: utf-8 -*-
# @Time    : 2020/1/14 12:39
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB


class YangShiNews(scrapy.Spider):
    '''
       央视新闻 get请求 固定api获取 p翻页
    '''

    name = 'Yangshi'
    t = Times()
    redis = Redis_DB()
    types = ['Nav-9Nwml0dIB6wAxgd9EfZA160510', 'Nav-7VoQDnopurN8EE0iqihA161101',
             'Nav-9FHRlePE9rWWSViVn5tW170525', 'Nav-x1EttmgGbITPUk4msBDj160812',
             'Nav-iqwRTtNj4tQCEkyUkBzW160812', 'Nav-c9aZErstPWnzhTy9ZHTB160812',
             'Nav-GxfrDirK3AR2nnyMC9Ub160812', 'Nav-Y7GOiDYMu0PLMSWBFJRs160812',
             'Nav-90H6Ufov92Vcy2DvRPSS160812', 'Nav-BIJQ6pPGvkbp6V9D74Gu160812',
             'Nav-WRLNzU41eEG7G10Xflg0180117']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for t in self.types:
            url = f'http://api.cportal.cctv.com/api/rest/navListInfo/getHandDataListInfoNew?id={t}&toutuNum=1&version=1&p=1&n=20'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, "p": 1})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['p']
        new_p = number + 1
        datas = json.loads(response.text)
        try:
            data_ = datas['itemList']
        except:
            data_ = []
        if number > 7:
            pass
        else:
            url = f'http://api.cportal.cctv.com/api/rest/navListInfo/getHandDataListInfoNew?id={t}&toutuNum=1&version=1&p={new_p}&n=20'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, "p": new_p})
        if data_:
            for d in data_:
                dicts = {}
                title = d['itemTitle']
                try:
                    url = d['detailUrl']
                except:
                    continue
                id = Utils.url_hash(url)
                content_id = d['itemID']
                if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                    print('该id:%s已存在' % id)
                    continue
                try:
                    pubdate_datetime = d['operate_time']
                    pubdate = str(self.t.datetimes(pubdate_datetime))
                except:
                    continue
                if not self.t.time_is_Recent(pubdate):
                    print("文章不在范围时间内:",pubdate)
                    continue
                dicts['id'] = id
                dicts['url'] = url
                dicts['pubdate'] = pubdate
                dicts['title'] = title
                content_url = f'http://api.cportal.cctv.com/api/rest/articleInfo?id={content_id}'
                yield scrapy.Request(content_url, dont_filter=True, callback=self.parse, meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            datas = json.loads(response.text)
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = datas['content']
            except:
                html = ''
            item['content'] = remove_tags(html)
            try:
                item['author'] = datas['source']
            except:
                item['author'] = ''
            item['formats'] = "app"
            item['dataSource'] = "央视新闻APP"
            item['serchEnType'] = "央视新闻APP"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
            try:
                url2 = datas['url']
                id2 = Utils.url_hash(url2)
                item['url'] = url2
                item['id'] = id2
                if self.redis.check_exist_2("wenzhangquchong", id2, '') == 0:
                    print('该id:%s已存在' % id2)
                else:
                    yield item
            except:
                pass

