# -*- coding: utf-8 -*-
# @Time    : 2019/12/23 10:01
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import time
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from urllib.parse import urlencode


class JunShiTouTiaoNews(scrapy.Spider):
    '''
       军事头条  固定api获取 时间戳获取最新数据  获取详情页之后 详细判断文章 解析出发布时间和文章内容
    '''

    name = 'Junshi'
    t = Times()
    redis = Redis_DB()
    types = [i for i in range(1,6)]

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for t in self.types:
            timestamp = int(time.time())
            params = {
                "v": '2.3.3',
                'times': '1',
                'timestamp': str(timestamp),
                'channelId': t,
                'token': 'a23ae9ee-d551-47a4-bace-556d1018f689',
                'type': '2'
            }
            urls = 'http://v1.api.danchequ.com/ttapi/hotListpic?'
            base_url = urls+ urlencode(params)
            yield scrapy.Request(base_url, callback=self.parse_text, dont_filter=True)
            params2 = {
                "v": '2.3.3',
                'times': '1',
                'timestamp': str(timestamp),
                'channelId': t,
                'token': 'a23ae9ee-d551-47a4-bace-556d1018f689',
                'type': '1'
            }
            base_url2 = urls + urlencode(params2)
            yield scrapy.Request(base_url2, callback=self.parse_text2, dont_filter=True,
                                 meta={"params": params2, 'number': 1})

    def parse_text(self, response):
        print("正在访问详情页:", response.url)
        datas = json.loads(response.text)
        data_ = datas['content_list']
        # 下一页的翻页
        for d in data_:
            dicts = {}
            content_id = d['Id']
            author = d['FromName']
            dataSource = d['FromName']
            pubdate_datetime = d['ShowTime']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:",pubdate)
                continue
            try:
                title = d['Title']
            except:
                continue
            try:
                url = d['OriginalUrl']
            except:
                url = None
            if url is None:
                continue
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                dicts['url'] = url
                dicts['id'] = id
                dicts['title'] = title
                dicts['author'] = author
                dicts['dataSource'] = dataSource
                dicts['pubdate'] = pubdate
                get_content = f'http://v1.api.danchequ.com/ttapi/DetailedFloor?v=2.3.3&token=a23ae9ee-d551-47a4-bace-556d1018f689&market=qq&count=10&subjectId=0&id={content_id}'
                yield scrapy.Request(get_content, callback=self.parse, dont_filter=True, meta=dicts)

    def parse_text2(self, response):
        print("正在访问详情页:", response.url)
        params = response.meta['params']
        number = response.meta['number']
        if number >= 7:
            pass
        else:
            params['timestamp'] = int(time.time())
            urls = 'http://v1.api.danchequ.com/ttapi/hotListpic?'
            base_url = urls + urlencode(params)
            yield scrapy.Request(base_url, callback=self.parse_text2, dont_filter=True,
                                 meta={"params": params, 'number': number + 1})
        datas = json.loads(response.text)
        data_ = datas['content_list']
        # 下一页的翻页
        for d in data_:
            dicts = {}
            content_id = d['Id']
            author = d['FromName']
            dataSource = d['FromName']
            pubdate_datetime = d['ShowTime']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['Title']
            except:
                continue
            try:
                url = d['OriginalUrl']
            except:
                url = None
            if url is None:
                continue
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                yield None
            else:
                dicts['url'] = url
                dicts['id'] = id
                dicts['title'] = title
                dicts['author'] = author
                dicts['dataSource'] = dataSource
                dicts['pubdate'] = pubdate
                get_content = f'http://v1.api.danchequ.com/ttapi/DetailedFloor?v=2.3.3&token=a23ae9ee-d551-47a4-bace-556d1018f689&market=qq&count=10&subjectId=0&id={content_id}'
                yield scrapy.Request(get_content, callback=self.parse, dont_filter=True, meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            datas = json.loads(response.text)
            data = datas['article_info']
            url = response.meta['url']
            item['id'] = response.meta['id']
            item['url'] = url
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            html = data['OriginalContent']
            item['content'] = remove_tags(html)
            try:
                author = response.meta['author']
            except:
                author = ''
            item['author'] = author
            item['formats'] = "app"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "军事头条"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
