# -*- coding: utf-8 -*-
# @Time    : 2019/12/23 14:42
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from urllib.parse import urlencode


class TaoNews(scrapy.Spider):
    '''
       淘新闻  固定api获取 时间戳获取最新数据  详情页进行正文提取
    '''

    name = 'Tao'
    t = Times()
    redis = Redis_DB()
    types = [str(i) for i in range(1,22)] +['23']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for t in self.types:
            url = f'https://reco.coohua.com/api/news/?userId=111365884&times=0&typeId={t}&pubTime=0&isNew=true'
            yield scrapy.Request(url, method="POST",callback=self.parse_text,dont_filter=True,meta={"type":t,"number":1})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['number']
        datas = json.loads(response.text)
        data_ = datas['result']
        # 下一页的翻页
        if number > 3:
            pass
        else:
            next_url = f'https://reco.coohua.com/api/news/?userId=111365884&times=0&typeId={t}&pubTime={data_[0]["pubTime"]}&isNew=true'
            yield scrapy.Request(next_url, method="POST", dont_filter=True,callback=self.parse_text)
        for d in data_:
            dicts = {}
            content_id = d['id']
            dataSource = d['uperName']
            pubdate_datetime = d['pubTime']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:",pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = f'https://xiuxi.kaixindianzi688.com/content/article/detail/page?id={content_id}'
            except:
                url = None
            if url is None:
                continue
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                dicts['id'] = id
                dicts['title'] = title
                dicts['dataSource'] = dataSource
                dicts['pubdate'] = pubdate
                yield scrapy.Request(url, callback=self.parse, dont_filter=True, meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            url = response.url
            item['id'] = response.meta['id']
            item['url'] = url
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = response.css("#article_content").extract_first()
            except:
                html = ''
            item['content'] = remove_tags(html)
            try:
                author = response.css(".authorBox p[class=author]::text").extract_first()
            except:
                author = ''
            item['author'] = author
            item['formats'] = "app"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "淘新闻"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
