# -*- coding: utf-8 -*-
# @Time    : 2019/12/10 17:15
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
from urllib.parse import urlencode
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB


class BeiDouNewsKeyWords(scrapy.Spider):

    name = 'Beidoukey'
    words = ['辽宁']
    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    headers = {
        "User-Agent": random.choice(Ua),
        'Content-Type': "application/x-www-form-urlencoded",
        'Host': "bdapp.lntv.cn:8088",
        'Accept-Encoding': "gzip",
        'Connection': "keep-alive",
        'Accept': "*/*",
        'Cache-Control': "no-cache",
    }
    t = Times()
    redis = Redis_DB()
    count = 0

    custom_settings = {
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    def start_requests(self):
        for word in self.words:
            base_url = 'http://bdapp.lntv.cn:8088/BDTV/news/search?'
            querystring = {"words": word, "page": "1"}
            params = urlencode(querystring)
            url = base_url + params + "&"
            # print(url)
            yield scrapy.Request(url, headers=self.headers,callback=self.parse_text,dont_filter=True,meta={"page":querystring['page']})

    def parse_text(self,response):
        if self.__class__.count > 100:
            # 在引擎关闭该爬虫
            self.crawler.engine.close_spider(self, '计数超过100，停止爬虫!')
        print("当前页数%s" % response.meta['page'])
        self.__class__.count += 1
        datas = json.loads(response.text)
        data = datas['data']['search']
        if data:
            base_url = 'http://bdapp.lntv.cn:8088/BDTV/news/search?'
            querystring = {"words": "ji", "page": str(int(response.meta['page'])+1)}
            params = urlencode(querystring)
            url = base_url + params + "&"
            yield scrapy.Request(url, headers=self.headers, callback=self.parse_text, dont_filter=True,meta={"page":querystring['page']})
            for d in data:
                dicts = {}
                url = d['url']
                id = Utils.url_hash(url)
                title = d['title']
                pubdate_datetime = d['newstime']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                if "duanshipin" in url:
                    continue
                dicts['url'] = url
                dicts['id'] = id
                dicts['title'] = title
                dicts['pubdate'] = pubdate
                if not self.t.time_is_Recent(pubdate):
                    yield None
                if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                    print('该id:%s已存在' % id)
                    yield None
                else:
                    yield scrapy.Request(url=url,headers=self.headers,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        content = extract_html(response.text)
        if len(content) <= 60:
            return None
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] =response.meta['title']
        item['pubdate'] =response.meta['pubdate']
        item['content'] =content
        item['author'] = "北斗新闻"
        item['formats'] = "app"
        item['dataSource'] = ""
        item['serchEnType'] = "北斗新闻"
        item['html'] = content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item