# -*- coding: utf-8 -*-
# @Time    : 2019/12/13 14:52
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html,extract_pubtime,extract_author
from NewsSpider.tools.redis_db import Redis_DB


class BBNews(scrapy.Spider):
    '''
    BB资讯
    '''
    name = 'BB'
    types = [str(i) for i in range(25,74)]
    types2 = types + ['3','19','20','21','22','23']+ [str(i) for i in range(6,18)]

    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types2:
            url = 'https://news.bicido.com/api/news/?type_id={0}'.format(type)
            yield scrapy.Request(url,callback=self.parse_text,dont_filter=True)

    def parse_text(self,response):
        print("正在访问列表页:",response.url)
        data_ = eval((response.text).replace("null", 'None'))
        for d in data_:
            dicts = {}
            url = d['source_url']
            id = Utils.url_hash(url)
            title = d['title']
            dicts['url'] = url
            dicts['id'] = id
            dicts['title'] = title
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('parse_text id:%s已存在' % id)
            else:
                # 获取详情页
                yield scrapy.Request(url=url,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        try:
            pubdate = extract_pubtime(response.text)
        except:
            pubdate = None
        pubdate = str(self.t.datetimes(pubdate))
        if not self.t.time_is_Recent(pubdate):
            yield None
        else:
            try:
                content = extract_html(response.text)
            except:
                content = ''
            try:
                author = extract_author(response.text)
            except:
                author = '哔哔资讯'
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] =response.meta['title']
            item['pubdate'] = pubdate
            item['content'] = content
            item['author'] = author
            item['formats'] = "web"
            item['dataSource'] = ''
            item['serchEnType'] = "哔哔资讯"
            item['html'] = content
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item

