# -*- coding: utf-8 -*-
# @Time    : 2019/12/20 9:56
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags


class TuiKuNews(scrapy.Spider):
    '''
        推酷App  固定api 最后一位进行翻页依据  所有数据均在详情页api
    '''

    name = 'Tuiku'

    t = Times()
    redis = Redis_DB()
    types = ['101000000', '0', '101040000', '20', '108000000', '114000000']
    types2 = ['aaMFJj', '22MNvmN', '2YJBje', '6j6RZr', 'a6bYJ3', 'aayMjmq', 'aAZrIvn', 'AF3qmi', 'AJj2Un', 'AnyqueI',
              'aQVvqiq', 'auu2Uvi',
              'E3IFfa', 'EBnUJvn', 'eyqyQ3', 'f2Iz2e', 'f2myYz', 'fAFVNri', 'FbI36bb', 'FFfaMv', 'iaMNvq', 'imyuQ3',
              'm2qMVf',
              'mY3iae', 'nqiqYv2', 'QFVjIj', 'qMNZbm', 'Qrma6b', 'uU7v2e', 've6vEf', 'vqUJRf', 'vuQve2n', 'YNNZja',
              'YrEN322']
    types3 = ['10000021', '10000105', '10050185', '10050381', '10050742', '10050973', '10150069', '10150121', '10200003', '10200139', '10200312', '10250090', '10300245', '10300246', '10300331', '10350017', '10350027', '10350160', '10500013', '10500092', '11020125', '11040019', '11060116', '11090128', '11090139', '11090140', '11200143']
    xx1 = ['http://api.tuicool.com/api/articles/hot.json?size=30&pn=0&last_id=&cid={0}&is_pad=1'.format(cid) for cid in types]
    xx2 = ['http://api.tuicool.com/api/sites/{0}.json?pn=0&last_id=&size=30&is_pad=1'.format(id) for id in types2]
    xx3 = ['http://api.tuicool.com/api/topics/{0}.json?pn=0&last_id=&size=30&is_pad=1'.format(topics) for topics in types3]
    xx4 = xx1 + xx2 + xx3
    # uA只设置了一个
    headers = {
        'user-agent': "Mozilla/5.0 (Linux; Android 5.1.1; G011A Build/LMY48Z; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36",
        'Authorization': "Basic MTAuMC4yLjE1OnR1aWNvb2w=",
        'Connection': "close"}

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for url in self.xx4:
            yield scrapy.Request(url, headers=self.headers,callback=self.parse_text, dont_filter=True)

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        if response.text:
            datas = json.loads(response.text)
            data_ = datas['articles']
            for d in data_:
                dicts = {}
                articles_id = d['id']
                pubdate_datetime = d['time']
                pubdate = str(self.t.datetimes(pubdate_datetime))
                dicts['pubdate'] = pubdate
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    url = f"http://api.tuicool.com/api/articles/{articles_id}.json?need_image_meta=1&type=1"
                    yield scrapy.Request(url, headers=self.headers,callback=self.parse, dont_filter=True,meta=dicts)

    def parse(self, response):
        if response.text:
            datas = json.loads(response.text)
            item = NewsItem()
            data = datas['article']
            url = data['url']
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                item['id'] = id
                item['url'] = url
                item['title'] = data['title']
                item['pubdate'] = response.meta['pubdate']
                body = data['content']
                content = remove_tags(body)
                item['content'] = content
                item['author'] = data['feed_title']
                item['formats'] = "app"
                item['dataSource'] = "推酷资讯"
                item['serchEnType'] = "推酷资讯"
                item['html'] = body
                item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                item['collectProcess'] = 'crawl_news'
                item['serverIp'] = "113.128.12.74"
                # print(item)
                yield item
