# -*- coding: utf-8 -*-
# @Time    : 2020/1/14 15:52
# @Author  : Damn7Kx
# @Software: PyCharm
# -*- coding: utf-8 -*-
# @Time    : 2020/1/4 17:14
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import time
from urllib.parse import quote_plus
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
import base64


class JuYouLiaoNews(scrapy.Spider):
    '''
        盐值: b2qKgtaW4,9z9D`Fmst?K5JZbLYOY]NP6ssGf2U~;zk9oCNgoytV!}wW7ia+`w9g
        先base64再sha1
       聚有料APP get请求 最后一位published_at翻页
    '''

    name = 'Juyouliao'
    t = Times()
    redis = Redis_DB()
    types = ['2','4','11','31','13','29','3','14','6','28','9','30','18','21','25','15']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    key = 'b2qKgtaW4,9z9D`Fmst?K5JZbLYOY]NP6ssGf2U~;zk9oCNgoytV!}wW7ia+`w9g'

    def nonce(self):
        charstr = "abcdefghijklmnopqrstuvwxyz0123456789"

        str1 = ''
        for i in range(6):
            number = random.randrange(36)
            str1 += charstr[number]
        return str1 + str(int(time.time()*1000))

    def start_requests(self):
        for t in self.types:
            timestamp = int(time.time())
            nonce = self.nonce()
            encry_data = 'app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&from=app&last_time=&limit=8&mac=A2:13:1D:C2:3B:9D&nonce={0}&os_ver_code=25&system=1&timestamp={1}'.format(nonce,timestamp)
            encry_data = encry_data+self.key
            base64_data = base64.b64encode(encry_data.encode()).decode()
            sign = Utils._sha1(base64_data)
            url = f'https://api.news.gyouliao.com/api/v1/news/category/{t}/news?app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&from=app&last_time=&limit=8&mac=A2:13:1D:C2:3B:9D&nonce={nonce}&os_ver_code=25&system=1&timestamp={timestamp}&sign={sign}'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True,meta={"type": t, "page": 0})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['page']
        datas = json.loads(response.text)
        data_ = datas['data']['news']
        # 下一页的翻页
        if number > 7:
            pass
        else:
            timestamp = int(time.time())
            nonce = self.nonce()
            last_time = data_[-1]['published_at']
            encry_data = 'app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&from=app&last_time={0}&limit=8&mac=A2:13:1D:C2:3B:9D&nonce={1}&os_ver_code=25&system=1&timestamp={2}'.format(last_time,
                nonce, timestamp)
            encry_data = encry_data + self.key
            base64_data = base64.b64encode(encry_data.encode()).decode()
            sign = Utils._sha1(base64_data)
            url = f'https://api.news.gyouliao.com/api/v1/news/category/{t}/news?app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&from=app&last_time={last_time}&limit=8&mac=A2:13:1D:C2:3B:9D&nonce={nonce}&os_ver_code=25&system=1&timestamp={timestamp}&sign={sign}'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, "page": number+1})

        for d in data_:
            dicts = {}
            pubdate_datetime = d['published_at']
            timestamp_ = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(timestamp_))
            content_id = d['id']
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['source_url']
            except:
                continue
            try:
                author = d['author_name']
            except:
                author = ''
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                continue
            dicts['id'] = id
            dicts['author'] = author
            dicts['title'] = title
            dicts['pubdate'] = pubdate
            timestamp_content = int(time.time())
            encry_data = f'app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&extra=[2,null,null]&from=app&mac=A2:13:1D:C2:3B:9D&nonce=a4kty41578987991790&os_ver_code=25&system=1&timestamp={timestamp_content}'
            encry_data = encry_data + self.key
            base64_data = base64.b64encode(encry_data.encode()).decode()
            sign = Utils._sha1(base64_data)
            content_url = f'https://api.news.gyouliao.com/api/v1/news/release/{content_id}/content?app_ver=4&channel=tencent&device_id=399d2bd4230e3b7b80ba5e9f6469e7d9&device_udid=35785aa2553b0125313d475d8b309fe8&extra=%5B2%2Cnull%2Cnull%5D&from=app&mac=A2:13:1D:C2:3B:9D&nonce=a4kty41578987991790&os_ver_code=25&sign={sign}&system=1&timestamp={timestamp_content}'
            yield scrapy.Request(content_url, callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            datas = json.loads(response.text)
            data_ = datas['data']
            item['id'] = response.meta['id']
            item['url'] = data_['news']['origin_url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = data_['content']
            except:
                html = ''
            item['content'] = remove_tags(html)
            item['author'] = response.meta['author']
            item['formats'] = "app"
            try:
                item['dataSource'] = data_['news']['source_name']
            except:
                item['dataSource'] = '聚有料APP'
            item['serchEnType'] = "聚有料APP"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
