# -*- coding: utf-8 -*-
# @Time    : 2019/12/11 11:39
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from w3lib.html import remove_tags
from urllib.parse import urljoin
from NewsSpider.tools.redis_db import Redis_DB

class JinRiWuHuNews(scrapy.Spider):
    '''
        递增遍历类型
        解析出列表页url并生成下一页的url,
        开启ua ip kafka
        开启计数 若不是最近2天数据超过15页 则停止 一共9个分类 则 15*9
    '''
    name = 'Wuhu'
    base = 'http://www.wuhunews.cn/'
    urls = ['http://www.wuhunews.cn/yaowen/index.html', 'http://www.wuhunews.cn/weiwen/index.html',
           'http://www.wuhunews.cn/waimei/index.html', 'http://www.wuhunews.cn/xianqu/index.html',
           'http://www.wuhunews.cn/shequ/index.html', 'http://www.wuhunews.cn/shiping/index.html',
           'http://www.wuhunews.cn/zonghe/index.html','http://www.wuhunews.cn/anhui/index.html', 'http://www.wuhunews.cn/shehui/index.html']
    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    t = Times()
    redis = Redis_DB()
    count = 0

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    def start_requests(self):
        for base_url in self.urls:
            # print(base_url)
            yield scrapy.Request(base_url,callback=self.parse_next_list,dont_filter=True)

    def parse_next_list(self,response):
        if self.__class__.count > 50:
            # 在引擎关闭该爬虫
            self.crawler.engine.close_spider(self, '计数超过100，停止爬虫!')
        else:
            print("正在访问:", response.url)
            list_url = response.css("#ContentList ul a::attr(href)").extract()
            next_url = response.css("#ContentList a.next::attr(href)").extract_first()
            for url in list_url:
                yield scrapy.Request(url=urljoin(response.url,url),callback=self.parse,dont_filter=True)
            if next_url:
                # 最多翻100页  每次访问 计数值加1  因为一共9个分类 提高了计数量
                self.__class__.count += 1
                yield scrapy.Request(url=next_url,callback=self.parse_next_list)


    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        title = response.css("#ContentText h2::text").extract_first()
        try:
            pubdate = response.css("#ContentText time::text").extract_first()
            pubdate = pubdate.split("：")[1].strip()
            pubdate = str(self.t.datetimes(pubdate))
        except:
            pubdate = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # 若不是否近两天数据
        if not self.t.time_is_Recent(pubdate):
            return None
        old_content = response.css("#ContentText div.article-content").extract_first()
        try:
            author = response.css("#ContentText span.editors::text").extract_first()
            author = author.split("：")[1].strip()
        except:
            author = "今日芜湖"
        content = remove_tags(old_content)
        try:
            dataSource = response.css("#ContentText span.source a::text").extract_first()
        except:
            dataSource = ''
        id = Utils.url_hash(response.url)
        item['id'] = Utils.url_hash(response.url)
        if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
            print('该id:%s已存在' % id)
            return None
        item['url'] = response.url
        item['title'] = title
        item['pubdate'] = pubdate
        item['content'] = content
        item['author'] = author
        item['formats'] = "web"
        item['dataSource'] = dataSource
        item['serchEnType'] = "今日芜湖"
        item['html'] = old_content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item
        # 移动端
        item['formats'] = "app"
        m_url = response.url.replace("www", 'm')
        item['url'] = m_url
        item['id'] = Utils.url_hash(m_url)
        if self.redis.check_exist_2("wenzhangquchong", Utils.url_hash(m_url), '') == 0:
            print('该id:%s已存在' % Utils.url_hash(m_url))
            return None
        yield item