# -*- coding: utf-8 -*-
# @Time    : 2019/12/12 16:43
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
from urllib.parse import urlencode
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags

class ZiXunGouNews(scrapy.Spider):
    '''
        api获取 详情页访问 post提交json格式 id翻页
    '''

    name = 'Zixungou'
    # base http://39.104.98.106/v1/media/sub-list/article/list
    types = [str(i) for i in range(26,95)]
    types2 = types + [str(i) for i in range(127,446)]

    headers = {
        "Access-Token": "7D6172019A63E0604B1717E6C425FBA5",
        "Content-Type": "application/json",
        "Host": "39.104.98.106",
        "Accept-Encoding": "gzip",
    }
    t = Times()
    count = 0
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types2:
            url = 'http://39.104.98.106/v1/media/sub-list/article/list'
            params = {"feed_id": type, "published": "0"}
            data = json.dumps(params)
            yield scrapy.FormRequest(url, headers=self.headers,
                                     callback=self.parse_text,dont_filter=True, body=data,meta={"type":type})

    def parse_text(self,response):
        # 一共388个分类 每个翻3页的情况
        if self.__class__.count > 1165:
            # 在引擎关闭该爬虫
            self.crawler.engine.close_spider(self, '计数超过1165，停止爬虫!')
        print("正在访问详情页:",response.url)
        # 没次翻页增加计数
        self.__class__.count += 1
        datas = json.loads(response.text)
        if datas:
            data_ = datas['data']['list']
            next_id = str(data_[-1]['id'])
            type = response.meta['type']
            # 下一页的翻页
            url = "http://39.104.98.106/v1/media/sub-list/article/list"
            params = {"feed_id": type, "published": next_id}
            data = json.dumps(params)
            yield scrapy.FormRequest(url, headers=self.headers,
                                         callback=self.parse_text,dont_filter=True, body=data,meta={"type":type})
            for d in data_:
                dicts = {}
                url = d['url']
                dataSource = d['feed_name']
                id = Utils.url_hash(url)
                title = d['title']
                pubdate_datetime = d['created_at']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                dicts['url'] = url
                dicts['id'] = id
                dicts['link'] = d['link']
                dicts['title'] = title
                dicts['pubdate'] = pubdate
                dicts['dataSource'] = dataSource
                if not self.t.time_is_Recent(pubdate):
                    continue
                else:
                    if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                        print('parse_text id:%s已存在' % id)
                        yield None
                    else:
                        # 获取详情页
                        yield scrapy.Request(url=url,headers=self.headers,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        try:
            content = remove_tags(response.css("section[data-type='rtext']").extract_first())
        except:
            content = extract_html(response.text)
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] =response.meta['title']
        item['pubdate'] =response.meta['pubdate']
        item['content'] =content
        item['author'] = response.meta['dataSource']
        item['formats'] = "web"
        item['dataSource'] = response.meta['dataSource']
        item['serchEnType'] = "资讯狗"
        item['html'] = content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item

        # 移动端
        item['formats'] = "app"
        m_url = response.meta['link']
        item['url'] = m_url
        item['id'] = Utils.url_hash(m_url)
        if self.redis.check_exist_2("wenzhangquchong", Utils.url_hash(m_url), '') == 0:
            print('parse id:%s已存在' % Utils.url_hash(m_url))
            return None
        yield item
