# -*- coding: utf-8 -*-
# @Time    : 2020/1/8 10:41
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time

import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.WeiXinParse import parse_weixin
from w3lib.html import remove_tags


class YueTouTiaoNews(scrapy.Spider):
    '''
        悦头条 固定api 根据时间进行获取最新文章
    '''

    name = 'Yuetoutiao'

    types = ['new_health', 'news_courage', 'news_regimen', 'news_food', 'news_tech',
             'news_life', 'news_car', 'news_fashion', 'news_baby', 'news_travel', 'news_zodiac',
             'news_collection', 'news_house', 'news_home']

    t = Times()
    redis = Redis_DB()

    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 7.1.2; G011A Build/N2G48H; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36",
        "Accept-Encoding": "gzip",
        "Connection": "keep-alive"
    }
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for t in self.types:
            base_url = f'http://inmnews.ytoutiao.net/yfax-news-api/api/htt/getMainList?curPage=1&domain={t}'
            yield scrapy.Request(base_url, headers=self.headers, callback=self.parse_text, dont_filter=True,
                                 meta={"type": t, "number": 1})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['number']
        datas = json.loads(response.text)
        data_ = datas['data']['entityList']
        # 下一页的翻页
        if number > 5:
            pass
        else:
            base_url = f'http://inmnews.ytoutiao.net/yfax-news-api/api/htt/getMainList?curPage=1&domain={t}'
            yield scrapy.Request(base_url, headers=self.headers, callback=self.parse_text, dont_filter=True,
                                 meta={"type": t, "number": number + 1})
        for d in data_:
            dicts = {}
            content_id = d['id']
            pubdate_datetime = d['publishTime']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['url']
            except:
                continue
            try:
                author = d['category']
            except:
                author = ''
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                continue
            dicts['id'] = id
            dicts['author'] = author
            dicts['url'] = url
            dicts['title'] = title
            dicts['pubdate'] = pubdate
            get_content = f'http://wnews.ytoutiao.net/yfax-news-api/api/htt/getDetailById?id={content_id}'
            yield scrapy.Request(get_content, callback=self.parse, headers=self.headers,
                                 dont_filter=True, meta=dicts)

    def parse(self, response):
        if response.text is not None:
            datas = json.loads(response.text)
            data_ = datas['data']
            item = NewsItem()
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = data_['content']
            except:
                html = ''
            item['content'] = remove_tags(html)
            item['author'] = response.meta['author']
            item['formats'] = "app"
            item['dataSource'] = '悦头条'
            item['serchEnType'] = "悦头条"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
