# -*- coding: utf-8 -*-
# @Time    : 2019/12/21 12:18
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags


class HuiTouTiaoNews(scrapy.Spider):
    '''
        惠头条app 所有均是API直接获取
    '''

    name = 'Huitoutiao'

    types = ['toutiao','redian','yule','jiankang','xiaohua','yinshi','jiaju','caijing',
		 'keji','qiche','sannong','gushi','guowai','xingzuo','jiaoyu','wenhua','youxi',
		 'lvxing','tiyu','shishang']

    headers = {
        "user-agent": "Mozilla/5.0 (Linux; Android 5.1.1; G011A Build/LMY48Z; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36",
        "Connection": "keep-alive",
        "Content-Type": "application/json; charset=utf-8",
    }

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    t = Times()
    count = 0
    redis = Redis_DB()

    def start_requests(self):
        for type in self.types:
            url = 'http://api.admin.cp.cashtoutiao.com/headLine/getVideoAndArticleNoCoverApi'
            for page in range(4):
                params = {
                    "platform": "Android",
                    "versionName": "4.1.5.2",
                    "deviceId": "865547631807066",
                    "appVersion": '71',
                    "backVersion": "3",
                    "userId": '143955864',
                    "type": type,
                    "page": str(page),
                    "loginId": "29a6034f741e46e2807a51b394b348a5"
                }
                data = json.dumps(params)
                yield scrapy.Request(url, method='POST', headers=self.headers,callback=self.parse_text, body=data,dont_filter=True, )

    def parse_text(self, response):
        print("正在访问起始页:", response.url)
        datas = json.loads(response.text)
        if datas:
            data_ = datas['data']
            for d in data_:
                dicts = {}
                try:
                    url = d['url']
                except:
                    continue
                dataSource = d['nickName']
                content_id = d['id']
                try:
                    author = d['nickName']
                except:
                    author = ''
                id = Utils.url_hash(url)
                title = d['topic']
                pubdate_datetime = d['date']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    dicts['url'] = url
                    dicts['id'] = id
                    dicts['title'] = title
                    dicts['pubdate'] = pubdate
                    dicts['dataSource'] = dataSource
                    dicts['author'] = author
                    if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                        print('该id:%s已存在' % id)
                        yield None
                    else:
                        get_content = f'https://api.admin.cp.cashtoutiao.com/headLine/h5Api?id={content_id}'
                        yield scrapy.Request(url=get_content, callback=self.parse,headers=self.headers,dont_filter=True, meta=dicts)

    def parse(self, response):
        content_data = json.loads(response.text)
        item = NewsItem()
        try:
            html = content_data['data']['content']
        except:
            html = ''
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        item['content'] = remove_tags(html)
        item['author'] = response.meta['author']
        item['formats'] = "app"
        item['dataSource'] = response.meta['dataSource']
        item['serchEnType'] = "惠头条"
        item['html'] = html
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        # print(item)
        yield item


