# -*- coding: utf-8 -*-
# @Time    : 2019/12/17 14:46
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB


class ChuiZiReaderNews(scrapy.Spider):
    '''
        锤子阅读  详情页提取  web和app链接 翻页3次
    '''

    name = 'Chuizi'
    types = ['10','11','15','34','44','43','16','45']
    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.FakeUserAgentMiddleware': 543,
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types:
            base_url = f'http://reader.smartisan.com/index.php?r=find/GetArticleList&cate_id={type}&art_id=0&page_size=20'
            yield scrapy.Request(base_url, callback=self.parse_text, dont_filter=True, meta={"type": type,"number":1})

    def parse_text(self, response):
        print("正在访问详情页:", response.url)
        datas = json.loads(response.text)
        type = response.meta['type']
        number = response.meta['number']
        data_ = datas['data']['list']
        # 下一页的翻页
        for d in data_:
            dicts = {}
            author = d['author_name']
            title = d['title']
            url = d['url']
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                pubdate_datetime = d['update_time']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    dicts['title'] = title
                    dicts['pubdate'] = pubdate
                    dicts['author'] = author
                    dicts['id'] = id
                    dicts['url'] = url
                    dicts['origin_url'] = d['origin_url']
                    yield scrapy.Request(url, callback=self.parse, dont_filter=True, meta=dicts)
            next_id = d['id']
        if number > 3:
            pass
        else:
            base_url = f'http://reader.smartisan.com/index.php?r=find/GetArticleList&cate_id={type}&art_id={next_id}&page_size=20'
            yield scrapy.Request(base_url, callback=self.parse_text, dont_filter=True, meta={"type": type,"number":number+1})

    def parse(self, response):
        item = NewsItem()
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        origin_url = response.meta['origin_url']
        try:
            content = extract_html(response.text)
        except:
            content = ''
        item['content'] = content
        item['author'] = response.meta['author']
        item['formats'] = "web"
        item['dataSource'] = "锤子阅读"
        item['serchEnType'] = "锤子阅读"
        item['html'] = content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item
        # 移动端
        item['formats'] = "app"
        item['url'] = origin_url
        new_id = Utils.url_hash(origin_url)
        item['id'] = new_id
        if self.redis.check_exist_2("wenzhangquchong", new_id, '') == 0:
            print('该id:%s已存在' % new_id)
            return None
        yield item
