# -*- coding: utf-8 -*-
# @Time    : 2019/12/30 9:39
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import time
from urllib.parse import unquote
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags
import uuid


class ZheJiangNews(scrapy.Spider):
    '''
        浙江新闻的api signature最后通过sha256加密生成
        起始页不能带start 否则会无数据
    '''

    name = 'Zhejiang'
    types = ['52e5f902cf81d754a434fb50','53845624e4b08e9fb1cdfc17','553638d5498e2ca4bf9f3d99','55370925498e2ca4bf9f3daa',
             '553707f8498e2ca4bf9f3da9','5537079c498e2ca4bf9f3da7','55370963498e2ca4bf9f3dab','5d4ba8cd159bb84750661d50',
             '5d4ba90a159bb84750661d51','584e6ac7e200b2098f871d3a','5534eb21498e2ca4bf9f3c34','58eaedbae200b20e0092ef75',
             '57ff4d66e4aaf28c91d0bf8e','5651e596498e4895c345ef65']

    t = Times()
    count = 0
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for tp in self.types:
            # "/api/article/channel_list&&5e095163d4d40555a2a6140b&&570a4ab1-55a6-40f1-9f88-70fab19dae6d&&1577687138711&&XG?4VZ&4>9w"
            X_SESSION_ID = '5e095163d4d40555a2a6140b'
            X_REQUEST_ID = str(uuid.uuid4())
            X_TIMESTAMP = int(time.time()*1000)
            senkey = 'XG?4VZ&4>9w'
            X_SIGNATURE_old = f'/api/article/channel_list&&{X_SESSION_ID}&&{X_REQUEST_ID}&&{X_TIMESTAMP}&&{senkey}'
            X_SIGNATURE = Utils._sha256(X_SIGNATURE_old)
            headers = {
                "User-Agent": "zjxw; 6.1.5; ffffffff-cd54-7a6d-ffff-ffffa1f8053c; Google+Nexus+6P+-+7.1.0+-+API+25+-+1440x2560 16,9; Android; 7.1.1; zh; yingyongbao",
                "X-SESSION-ID": X_SESSION_ID,
                "X-REQUEST-ID": X_REQUEST_ID,
                "X-TIMESTAMP": str(X_TIMESTAMP),
                "X-SIGNATURE": X_SIGNATURE,
            }
            base_url = f'https://api-new.8531.cn/api/article/channel_list?channel_id={tp}&list_count=20'
            yield scrapy.Request(base_url, headers=headers,callback=self.parse_text,dont_filter=True,meta={"tp":tp,"page":1,"header":headers})

    def parse_text(self,response):
        print("正在访问起始页:",response.url)
        datas = json.loads(response.text)
        if datas:
            data_ = datas['data']['article_list']
            tp = response.meta['tp']
            page = response.meta['page']
            header = response.meta['header']
            if page > 3:
                pass
            else:
                try:
                    next_id = data_[-1]['sort_number']
                    new_page = page + 1
                    # 下一页的翻页
                    base_url = f'https://api-new.8531.cn/api/article/channel_list?channel_id={tp}&list_count=20&start={next_id}'
                    yield scrapy.Request(base_url, headers=header, callback=self.parse_text, dont_filter=True,meta={"tp":tp,'page':new_page,"header":header})
                except:
                    pass
            for d in data_:
                dicts = {}
                url = d['url']
                id = Utils.url_hash(url)
                title = d['doc_title']
                content_id = d['id']
                pubdate_datetime = d['published_at']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                if not self.t.time_is_Recent(pubdate):
                    continue
                dicts['url'] = url
                dicts['id'] = id
                dicts['title'] = title
                dicts['pubdate'] = pubdate
                if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                    print('该id:%s已存在' % id)
                    continue
                # 获取详情页
                get_content = f'https://api-new.8531.cn/api/article/detail?id={content_id}'
                X_SESSION_ID = '5e095163d4d40555a2a6140b'
                X_REQUEST_ID = str(uuid.uuid4())
                X_TIMESTAMP = int(time.time()*1000)
                senkey = 'XG?4VZ&4>9w'
                X_SIGNATURE_old = f'/api/article/detail&&{X_SESSION_ID}&&{X_REQUEST_ID}&&{X_TIMESTAMP}&&{senkey}'
                X_SIGNATURE = Utils._sha256(X_SIGNATURE_old)
                # '/api/article/detail&&5e095163d4d40555a2a6140b&&a711da4d-dc54-4a0b-98f6-0cbd137b8d69&&1577694230442&&XG?4VZ&4>9w'
                content_headers = {
                    "User-Agent": "zjxw; 6.1.5; ffffffff-cd54-7a6d-ffff-ffffa1f8053c; Google+Nexus+6P+-+7.1.0+-+API+25+-+1440x2560 16,9; Android; 7.1.1; zh; yingyongbao",
                    "X-SESSION-ID": X_SESSION_ID,
                    "X-REQUEST-ID": X_REQUEST_ID,
                    "X-TIMESTAMP": str(X_TIMESTAMP),
                    "X-SIGNATURE": X_SIGNATURE,
                }
                yield scrapy.Request(url=get_content,headers=content_headers,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        content_data = json.loads(response.text)
        item = NewsItem()
        data_ = content_data['data']['article']
        html = data_['content']
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        item['content'] = remove_tags(html)
        item['author'] = data_['author']
        item['formats'] = "web"
        item['dataSource'] = data_['source']
        item['serchEnType'] = "浙江新闻"
        item['html'] = html
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        # print(item)
        yield item