# -*- coding: utf-8 -*-
# @Time    : 2020/1/18 15:50
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import time
import uuid

from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from urllib.parse import urlencode
from hashlib import md5
from Crypto.Cipher import AES
import base64
from Crypto.Util.Padding import pad


class SinaNews(scrapy.Spider):
    '''
       新浪新闻 GET api获取
    '''

    name = 'Sina'
    t = Times()
    redis = Redis_DB()
    types = ['news_ent', 'news_mil', 'news_sports', 'news_finance', 'news_tech', 'news_jingyao0704',
             'news_auto', 'news_fashion', 'news_5g', 'news_nba', 'news_funny', 'news_gossip',
             'local_huhehaote', 'news_travel', 'news_eladies', 'news_blog', 'news_digital', 'news_edu',
             'news_ast', 'news_game', 'news_home', 'news_health', 'news_baoxian', 'news_baby', 'news_collection',
             'news_history', 'house_huhehaote', 'news_inter', 'news_pets', 'news_cartoon', 'news_slim',
             'news_zhengwu', 'news_food', 'news_dangjian', 'news_piyao']

    types2 = ['娱乐', '军事', '体育', '财经', '科技', '精选', '汽车', '时尚', '5G', 'NBA', '搞笑',
              '八卦', '呼和浩特', '旅游', '女性', '博客', '数码', '教育', '星座', '游戏', '家居',
              '健康', '保险', '育儿', '收藏', '历史', '房产', '国际', '宠物', '动漫', '健身', '政务',
              '美食', '党建', '辟谣']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    headers = {
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'signVer': '2.0',
        'User-Agent': 'google-G011A__sinanews__7.29.0__android__7.1.2__480',
        'Connection': 'Keep-Alive',
        'Accept-Encoding': 'gzip'
    }

    ua = "google-G011A__sinanews__7.29.0__android__7.1.2__480"
    deviceId = "2af43eb87a74358a"
    key = "_E2AC6F3643"

    def rand(self):
        return str(int(random.random() * 1000 + 1))

    def localSign(self):
        return "a_" + str(uuid.uuid4())

    def aes_encrypt(self, plain_text: str, key: str, iv: str) -> str:
        aes = AES.new(key=key.encode(), mode=AES.MODE_CBC, iv=iv.encode())
        encrypted_data = aes.encrypt(pad(plain_text.encode(), AES.block_size))
        cipher_text = base64.b64encode(encrypted_data).decode()
        return cipher_text

    def sand(self):
        timestamp = int(time.time() * 1000)
        return self.aes_encrypt(f"{self.deviceId}_{timestamp}", "ePKuMBjd9XLd7zRQ", "A-16-Byte-String")

    def seId(self):
        b2 = md5((str(uuid.uuid4()) + str(time.time() * 1000) + str(random.randint(0, 900) + 100)).encode()).hexdigest()
        a2 = b2[8:24]
        return a2[0:10]

    def urlsign(self, params):
        the_hash = md5()
        the_hash.update(params.encode(encoding='utf-8'))
        the_md5 = the_hash.hexdigest()
        urlSign = the_md5[-5:] + the_md5[:5]
        return urlSign

    def lDid(self):
        return str(uuid.uuid4())

    def start_requests(self):
        for t, t2 in zip(self.types, self.types2):
            base_url = 'https://newsapi.sina.cn/?'
            localSign = self.localSign()
            lastTimestamp = str(int(time.time()) - 300)
            lDid = self.lDid()
            payload = {'resource': 'feed', 'mpName': t2, 'lDid': lDid,
                       'appVersion': '7.29.0',
                       'oldChwm': '12606_0020', 'upTimes': '0', 'city': 'CN15010000000000', 'loginType': '0',
                       'authToken': '',
                       'channel': t,
                       'link': '', 'authGuid': '', 'open_adtype': '0',
                       'ua': self.ua, 'deviceId': self.deviceId,
                       'connectionType': '2', 'resolution': '720x1280', 'mac': '02:00:00:00:00:00', 'weiboUid': '',
                       'replacedFlag': '1', 'ssoVer': '3',
                       'osVersion': '7.1.2', 'chwm': '5062_0058', 'pullTimes': '6', 'weiboSuid': '',
                       'andId': 'c2a5f2b10fd596e8',
                       'from': '6000095012',
                       'sn': '47952412', 'behavior': 'manual',
                       'aId': '01A75LG3prm2YcuWaDrySOVeRXjgPO34z-gXn13HTuvaTeKZM.',
                       'oaid': '',
                       'localSign': localSign, 'deviceIdV1': self.deviceId, 'osSdk': '25',
                       'listCount': '20', 'accessToken': '', 'downTimes': '1',
                       'sand': self.sand(),
                       'lastTimestamp': lastTimestamp, 'pullDirection': 'down', 'seId': self.seId(),
                       'imei': '869342479554350',
                       'deviceModel': 'google__google__G011A', 'location': '0.0,0.0', 'authUid': '',
                       'loadingAdTimestamp': '0'}
            params = urlencode(payload)
            rand = self.rand()
            urlSign_params = '/?' + params + "_" + rand + self.key
            urlSign = self.urlsign(urlSign_params)
            self.headers['rand'] = rand
            self.headers['urlSign'] = urlSign
            url = base_url + params
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta=payload,headers=self.headers)

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        payload = response.meta
        number = int(payload['upTimes'])
        datas = json.loads(response.text)
        data_ = datas['data']['feed']
        # 下一页的翻页
        if number > 5:
            pass
        else:
            base_url = 'https://newsapi.sina.cn/?'
            localSign = self.localSign()
            lastTimestamp = str(int(time.time()) - 300)
            lDid = self.lDid()
            payload['upTimes'] = str(number+1)
            payload['lDid'] = lDid
            payload['localSign'] = localSign
            payload['lastTimestamp'] = lastTimestamp
            payload['pullDirection'] = "up"
            payload['seId'] = self.seId()
            payload['sand'] = self.sand()
            params = urlencode(payload)
            rand = self.rand()
            urlSign_params = '/?' + params + "_" + rand + self.key
            urlSign = self.urlsign(urlSign_params)
            self.headers['rand'] = rand
            self.headers['urlSign'] = urlSign
            url = base_url + params
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta=payload,headers=self.headers)
        for d in data_:
            dicts = {}
            content_id = d['newsId']
            try:
                recommendInfo = d['recommendInfo']
            except:
                continue
            try:
                dataid = d['dataid']
            except:
                continue
            dataSource = d['source']
            pubdate_datetime = d['pubDate']
            pubdate = Utils.process_timestamp(pubdate_datetime)
            pubdate = str(self.t.datetimes(pubdate))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['link']
            except:
                continue
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                continue
            dicts['id'] = id
            dicts['url'] = url
            dicts['title'] = title
            dicts['dataSource'] = dataSource
            dicts['pubdate'] = pubdate
            lDid = self.lDid()
            payload2 = {'resource': 'article', 'lDid': lDid, 'appVersion': '7.29.0',
             'oldChwm': '12606_0020', 'city': 'CN15010000000000', 'loginType': '0', "authToken": '',
             'link': url,
             'authGuid': '', 'postt': 'news_news_mil_feed',
             'ua': 'google-G011A__sinanews__7.29.0__android__7.1.2__480',
             'deviceId': '2af43eb87a74358a', 'connectionType': '2', 'resolution': '720x1280',
             'mac': '02:00:00:00:00:00', 'weiboUid': '',
             'ssoVer': '3', 'osVersion': '7.1.2', 'chwm': '5062_0058', 'weiboSuid': '', 'andId': 'c2a5f2b10fd596e8',
             'from': '6000095012',
             'sn': '47952412', 'aId': '01A75LG3prm2YcuWaDrySOVeRXjgPO34z-gXn13HTuvaTeKZM.', 'oaid': '',
             'recommendInfo': recommendInfo,
             'deviceIdV1': '2af43eb87a74358a', 'osSdk': '25', 'accessToken': '',
             'sand': 'gIWpleW8vVlsSSKcsDNRTi+a3CzVoFmMUcYUsty6PqM=',
             'newsId': content_id, 'dataid': dataid,
             'seId': self.sand(),
             'imei': '869342479554350', 'deviceModel': 'google__google__G011A', 'location': '0.0,0.0',
             'authUid': ''}
            params2 = urlencode(payload2)
            rand = self.rand()
            urlSign_params = '/?' + params2 + "_" + rand + self.key
            urlSign = self.urlsign(urlSign_params)
            self.headers['rand'] = rand
            self.headers['urlSign'] = urlSign
            url = "https://newsapi.sina.cn/?" + params2
            yield scrapy.Request(url, callback=self.parse, dont_filter=True, meta=dicts,headers=self.headers)

    def parse(self, response):
        if response.text is not None:
            datas = json.loads(response.text)
            data = datas['data']
            item = NewsItem()
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = data['content']
                if html == '':
                    html = response.meta['title']
            except:
                html = ''
            content = remove_tags(html)
            if content == '':
                content = response.meta['title']
            item['content'] = content
            try:
                author = data['source']
            except:
                author = ''
            item['author'] = author
            item['formats'] = "app"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "新浪新闻"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
