# -*- coding: utf-8 -*-
# @Time    : 2019/12/16 9:29
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json

import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.WeiXinParse import parse_weixin
from NewsSpider.tools.redis_db import Redis_DB


class WeiXiaoBao(scrapy.Spider):
    '''
    微小宝公众平台  开启去重
    '''
    name = 'Weixiaobao'
    types = ['国际', '体育', '娱乐', '搞笑', '动漫', '家居', '宠物', '社会', '时事', '财经', '科技', '情感', '汽车',
             '母婴育儿', '时尚', '游戏', '军事', '旅游', '美食', '文化', '健康养生', '星座运势', '历史', '音乐']

    t = Times()
    redis = Redis_DB()

    headers = {
        'Cookie': "aliyungf_tc=AQAAALE9EVGFBQ8ASgyAcbo7bXuC6iD6; visit-wxb-id=4ba89d9286a77dc45fa91be562b8060d; PHPSESSID=d8e5f35c05277e3ddcb336c51602f105; Hm_lvt_5859c7e2fd49a1739a0b0f5a28532d91=1575683664,1576458207; Hm_lpvt_5859c7e2fd49a1739a0b0f5a28532d91=1576460628, aliyungf_tc=AQAAALE9EVGFBQ8ASgyAcbo7bXuC6iD6; visit-wxb-id=4ba89d9286a77dc45fa91be562b8060d; PHPSESSID=d8e5f35c05277e3ddcb336c51602f105; Hm_lvt_5859c7e2fd49a1739a0b0f5a28532d91=1575683664,1576458207; Hm_lpvt_5859c7e2fd49a1739a0b0f5a28532d91=1576460628; aliyungf_tc=AQAAAFV+WSMXJQoASgyAcQp0rJzXwHxz; PHPSESSID=fb96b4aff06fd7a8a61917ac234c4de6; visit-wxb-id=4ba89d9286a77dc45fa91be562b8060d",
        'Host': "data.wxb.com",
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
    }
    custom_settings = {
        'DOWNLOAD_DELAY': 1,
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types:
            for page in range(1, 11):
                url = 'https://data.wxb.com/rank/article?baidu_cat={0}&baidu_tag=&page={1}&pageSize=50&type=2&order='.format(
                    type, page)
                yield scrapy.Request(url, callback=self.parse_text,headers=self.headers,dont_filter=True)

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        data_ = json.loads(response.text)['data']
        for d in data_:
            dicts = {}
            url = d['url']
            id = Utils.url_hash(url)
            dataSource = d['account']
            title = d['title']
            dicts['url'] = url
            dicts['id'] = id
            dicts['title'] = title
            dicts['dataSource'] = dataSource
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('WeiXiaoBao id:%s已存在' % id)
                yield None
            else:
                # 获取详情页
                yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta=dicts)

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        results = parse_weixin(response.text)
        try:
            pubdate = results[1]
            pubdate = Utils.process_timestamp(pubdate)
        except:
            pubdate = None
        pubdate = str(self.t.datetimes(pubdate))
        if not self.t.time_is_Recent(pubdate):
            yield None
        else:
            try:
                content = results[2]
            except:
                content = ''
            try:
                author = results[0]
                if author == '':
                    author = response.meta['dataSource']
            except:
                author = '微小宝公众平台'
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = pubdate
            item['content'] = content
            item['author'] = author
            item['formats'] = "weixin"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "微小宝公众平台"
            item['html'] = results[3]
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
