# -*- coding: utf-8 -*-
# @Time    : 2019/12/17 14:04
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time

import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.WeiXinParse import parse_weixin
from w3lib.html import remove_tags

class YiDianNews(scrapy.Spider):
    '''
        一点资讯  固定api 根据时间进行获取最新文章
    '''

    name = 'Yidian'

    docids = ['0O7QhP9V,0O7aCSpl,0O7Qc7lx,0O7QR03j,0O7Pdcov,0O7QXhWL,0O7TbVf4',
              '0O7Qpyhq,0O7PWqFs,0O6Ei8Rd,0O7YWbGK,0O7QhP9V,0O7Pf41A,0O7b6Ku1',
              '0O7bthnc,0O7JVKUX,0O7aNt4x,0O7emNRu,0O6mm17K,0O796ZsX,0O5ohHKj', '0O7bKn8R,0O7OsdJU',
              '0O7IeOd1,0O6G6HGI,0O789Saq,0O6xhzj9,0O6XoRfc,0O668cVC,0O7SZBOl,0O78xXlG',
              '0O7Vs96g,0O7VtYo9,0O7VwG3g,0O7W1IJn,0O7PQKfE,0O6q8wvd,0O6qEMhy',
              '0O6zkpnE,0O7JJH8A,0O7LQSU8,0O7Po96u,0O7Fn6V6,0O7RTRUQ',
              '0O7d8Jwq,0O7fVTB8,0O7dBWOp,0O7dKOGM,0O7fViT9,0O7dtTOI,0O7ejEc0,0O7ZynvJ,0O7aCSpl,0O7aH22J',
              '0O7XhRv6,0O7JVHqM,0O6icmgG,0O7WPmDY,0O7H37d8,0O7Wf8Xx,0O6j6NYJ,0O6tc8e6,0O7NdQBN',
              '0O7H0ta2,0O7TP6QS',
              '0O7Sq9B9,0O7YWbGK,0O7VGJzn,0O7bvrJE,0O7heTmv,0O7Mr0Cl,0O7gRqQY,0O7eD0Pd,0O7dAoNE,0O7Qpyhq',
              '0O7SXF05,0O7RGjkd,0O7QjQxo,0O7QeDue,0O7QQUFF,0O7NJ2yS,0O7LOaWm,0O7Lca60,0O7K0shG,0O7IhHLJ',
              '0O73VEAE,0O6gnTSo,0O7VRXr8,0O7MNMOu,0O7M8VIx,0O79jNXR',
              '0O6mkKsI,0O6qgVTZ,0O7djv7u,0O5vmDcP,0O6hp19S,0O6mLKhN,0O77Nn0R,0O6ynZl8,0O6sECsg', '0O6sWXfd',
              '0O7caiDb,0O41eK4A,0O7OvjVD,0O76IyeG,0O6cF0LG,0O6duie5,0O6bzhaL,0O76VGvd,0O6buhlO,0O75YaHR',
              '0O6rMwl3,0O5siEe1,0O6iJRhi,0O6ktV9u',
              '0O7Su2rE,0O7KncTA,0O7Sf2Ff,0O7UgQ4N,0O7TxImx,0O7SHHCB,0O7NFI9f,0O6rySKH,0O7HwlEZ',
              '0O7L8hwo,0O7SoERF,0O7UqtFw,0O6xJW74,0O7cNYjm,0O7V6Bxg,0O7YCdbG',
              '0O7Sq9B9,0O7J4xy4,0O6Ei8Rd,0O7WQcM5,0O7eeDuK,0O7QY4LA,0O7d3ABM,0O7Z6cbN,0O7OjnJv,0O7aH8Kf',
              '0O7NvYk7,0O75DP81,0O7ABFeH,0O6fVpOz,0O7G9MZH,0O7ISV3w,0O6RCSpz,0O7IPt9t',
              '0O7UE8RU,0O6UuLXl,0O7K0x6t,0O7O4sYM,0O7M4OVw,0O7DU5Mw', '0O7OOCXP,0O6xyyNZ,0O6vrBoZ,0O6vhP8t',
              '0O7aIUNx,0O7XPvv6,0O6rSDPA,0O7PSXp1,0O7NAWRe,0O6R3bp8,0O6bcgNj,0O7QQUFF,0O7fZBOS,0O7Jw5RO',
              '0O7kG3xL,0O7k9o5B,0O7k6ape,0O7jnXzu']

    t = Times()
    redis = Redis_DB()

    headers = {
        'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
    }
    custom_settings = {
        'DOWNLOAD_DELAY': 4,
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for docid in self.docids:
            timestamp = int(time.time() * 1000)
            base_url = f'https://a1.go2yd.com/Website/contents/content?reqid=il6o60p7_{timestamp}_181&docid={docid}'
            yield scrapy.Request(base_url, headers=self.headers,callback=self.parse_text, dont_filter=True, meta={"docid":docid,"number":1})

    def parse_text(self, response):
        item = NewsItem()
        print("正在访问详情页:", response.url)
        datas = json.loads(response.text)
        docid = response.meta['docid']
        data_ = datas['documents']
        for d in data_:
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['url']
            except:
                url = None
            item['url'] = url
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                item['title'] = title
                pubdate = d['date']
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    item['pubdate'] = pubdate
                    item['dataSource'] = d['dsource']
                    item['author'] = d['source']
                    try:
                        content = d['content']
                    except:
                        content = ''
                    item['content'] = remove_tags(content)
                    item['html'] = content
                    item['id'] = id
                    item['formats'] = "weixin"
                    item['serchEnType'] = "一点资讯"
                    item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    item['collectProcess'] = 'crawl_news'
                    item['serverIp'] = "113.128.12.74"
                    yield item
        number = response.meta['number']
        if number > 3:
            pass
        else:
            timestamp = int(time.time() * 1000)
            base_url = f'https://a1.go2yd.com/Website/contents/content?reqid=il6o60p7_{timestamp}_181&docid={docid}'
            yield scrapy.Request(base_url, headers=self.headers,callback=self.parse_text, dont_filter=True, meta={"docid":docid,"number":number+1})
