# -*- coding: utf-8 -*-
# @Time    : 2019/12/16 16:17
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.WeiXinParse import parse_weixin


class MONONews(scrapy.Spider):
    '''
        MONO资讯  固定api获取翻页访问  获取详情页之后 详细判断文章 解析出发布时间和文章内容
    '''

    name = 'Mono'
    types = ['127098', '127130',
             '127076', '100297', '123968', '127057', '127060',
             '122210', '123969', '100022',
             '100005', '103316', '122210', '127092',
             '127088', '121861', '127997',
             '124078', '112621', '102188', '128267',
             '100067', '114265']
    t = Times()
    redis = Redis_DB()

    headers = {
        'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
        'HTTP-AUTHORIZATION': "07ed81871fd411eaaf63525400ff1f2f",
    }
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types:
            base_url = f'http://mmmono.com/api/v3/group/{type}/content/kind/1/?start=0'
            yield scrapy.Request(base_url, headers=self.headers, callback=self.parse_text, dont_filter=True,
                                 meta={"type": type, 'number': 1})

    def parse_text(self, response):
        print("正在访问详情页:", response.url)
        datas = json.loads(response.text)
        type = response.meta['type']
        number = response.meta['number']
        strat = datas['start']
        if number >= 3:
            pass
        else:
            base_url = f'http://mmmono.com/api/v3/group/{type}/content/kind/1/?start={strat}'
            yield scrapy.Request(base_url, headers=self.headers, callback=self.parse_text, dont_filter=True,
                                 meta={"type": type, 'number': number + 1})
        data_ = datas['meow_list']
        # 下一页的翻页
        for d in data_:
            dicts = {}
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['rec_url']
            except:
                url = None
            dicts['url'] = url
            dicts['title'] = title
            if url is None:
                continue
            yield scrapy.Request(url.strip(), callback=self.parse, headers=self.headers, dont_filter=True, meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            url = response.meta['url']
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                try:
                    results = parse_weixin(response.text)
                except:
                    results = ''
                item['id'] = id
                item['url'] = url
                item['title'] = response.meta['title']
                if results == '':
                    pubdate = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                else:
                    pubdate = results[1]
                    pubdate = str(self.t.datetimes(pubdate))
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    item['pubdate'] = pubdate
                    if results == '':
                        content = extract_html(response.text)
                    else:
                        content = results[2]
                    item['content'] = content
                    try:
                        author = results[0]
                    except:
                        author = ''
                    item['author'] = author
                    item['formats'] = "weixin"
                    item['dataSource'] = 'MONO资讯'
                    item['serchEnType'] = "MONO资讯"
                    try:
                        item['html'] = results[3]
                    except:
                        item['html'] = content
                    item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    item['collectProcess'] = 'crawl_news'
                    item['serverIp'] = "113.128.12.74"
                    yield item
