# -*- coding: utf-8 -*-
# @Time    : 2019/12/13 16:34
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
from urllib.parse import urlencode
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags


class MereNews(scrapy.Spider):
    '''
        Mere新闻  固定api获取翻页访问  生成详情页  最好id去重 url生成
    '''

    name = 'Mere'
    types = ['T1348647909107', 'T1411113472760', 'T1348649580692', 'T1348648517839', 'T1370583240249', 'T1414142214384',
             'T1348654225495', 'T1348649079062', 'T1348648037603', 'T1348648141035', 'T1348648650048', 'T1348648756099',
             'T1348649145984', 'T1348649176279', 'T1348649475931', 'T1348649503389', 'T1348649654285', 'T1348649776727',
             'T1348650593803', 'T1348650839000', 'T1348654060988', 'T1348654085632', 'T1348654105308', 'T1348654151579',
             'T1348654204705', 'T1349837670307', 'T1349837698345', 'T1350383429665', 'T1351233117091', 'T1356600029035',
             'T1368497029546', 'T1379038288239', 'T1397016069906', 'T1397116135282', 'T1414389941036', 'T1429173683626',
             'T1441074311424', 'T1456394562871', 'T1467284926140', 'T1473054348939', 'T1464592736048', 'T1474271789612',
             'T1498701411149', 'T1499853820829', 'T1504171773862', 'T1509448512433', 'T1509504918215', 'T1464677529259',
             'T1465913214214', 'T1374538896985', 'T1374539467780', 'T1402452375609', 'T1408358090693', 'T1425536140071',
             'T1432884273758', 'T1435031802301', 'T1435198433041', 'T1440129813308', 'T1443017451668', 'T1451460602924',
             'T1453188277662', 'T1453779161204', 'T1454310147855', 'T1456465723341', 'T1456902217191', 'T1456989131243',
             'T1457403500808', 'T1459496338561', 'T1460256397238', 'T1460358206122', 'T1460451213721', 'T1461040381132',
             'T1461806919990', 'T1462417695101', 'T1463649152673', 'T1465700224209', 'T1466386762312', 'T1466565927622',
             'T1466998061677', 'T1467079994591', 'T1467100238109', 'T1468814169539', 'T1374537739895', 'T1374537849029',
             'T1374538660651', 'T1414487688825', 'T1433487817762', 'T1457583889187', 'T1457594773769', 'T1462260641741',
             'T1469167711734', 'T1469500605873']
    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types:
            for i in range(0, 41, 20):
                base_url = f'http://c.m.163.com/nc/article/headline/{type}/{i}-20.html'
                yield scrapy.Request(base_url, callback=self.parse_text, dont_filter=True, meta={"iid": type})

    def parse_text(self, response):
        print("正在访问详情页:", response.url)
        if response.text:
            datas = json.loads(response.text)
            type = response.meta['iid']
            data_ = datas[type]
            # 下一页的翻页
            for d in data_:
                dicts = {}
                docid = d['docid']
                dataSource = d['source']
                title = d['title']
                pubdate_datetime = d['mtime']
                pubdate = str(self.t.datetimes(pubdate_datetime))
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    url = f"http://c.m.163.com/nc/article/{docid}/full.html"
                    dicts['title'] = title
                    dicts['pubdate'] = pubdate
                    dicts['dataSource'] = dataSource
                    dicts['getid'] = docid
                    yield scrapy.Request(url, callback=self.parse, dont_filter=True, meta=dicts)

    def parse(self, response):
        if response.text:
            # print("Mere:",response.text)
            datas = json.loads(response.text)
            item = NewsItem()
            get_id = response.meta['getid']
            url = datas[get_id]['shareLink']
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                item['id'] = id
                item['url'] = url
                item['title'] = response.meta['title']
                item['pubdate'] = response.meta['pubdate']
                body = datas[get_id]['body']
                content = remove_tags(body)
                item['content'] = content
                item['author'] = response.meta['dataSource']
                item['formats'] = "web"
                item['dataSource'] = response.meta['dataSource']
                item['serchEnType'] = "Mere新闻"
                item['html'] = body
                item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                item['collectProcess'] = 'crawl_news'
                item['serverIp'] = "113.128.12.74"
                yield item
