# -*- coding: utf-8 -*-
# @Time    : 2020/1/4 17:14
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import time
from urllib.parse import quote_plus
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.parse_html import extract_html
from Crypto.Cipher import AES
import base64
from Crypto.Util.Padding import pad


class JuZiKuaiBaoNews(scrapy.Spider):
    '''
        参数加密json aes加密 秘钥kl32547682143657  iv 1234567887654321
        模式cbc packs5 默认模式
       橘子快报 get请求 固定api获取 最后一位sortNum翻页 详情页进行正文提取
    '''

    name = 'Juzi'
    t = Times()
    redis = Redis_DB()
    types = [str(i) for i in range(1, 33)]

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    headers = {
        "User-Agent": random.choice(Ua),
    }

    key = 'kl32547682143657'
    iv = '1234567887654321'

    def aes_encrypt(self, plain_text: str, key: str, iv: str) -> str:
        aes = AES.new(key=key.encode(), mode=AES.MODE_CBC, iv=iv.encode())
        encrypted_data = aes.encrypt(pad(plain_text.encode(), AES.block_size))
        cipher_text = base64.b64encode(encrypted_data).decode()
        return cipher_text

    def start_requests(self):
        for t in self.types:
            params = {"client_type": 1, "channel": t, "channel_name": "market",
                      "v1_sign": "693aa325cc6565d1515ed380bfda4a28",
                      "v1_ver": 262, "api_ver": 1, "mobileOperator": "46000", "mobileOperatorName": "中国移动",
                      "deviceid": "c2a5f2b10fd596e8", "mac": "A2:13:1D:C2:3B:9D", "imsi": "460022412271245",
                      "imei": "869342479554350", "net_type": 1, "os_version": "7.1.2", "model": "G011A",
                      "brand": "google",
                      "package_name": "com.quyu.youliao", "app_name": "橙子快报", "page": 0, "size": 8, "action": 2,
                      "id": 0}
            # 必须要将单引号变成双引号 去除所有空格
            plain_text = str(params).replace("'", '"').replace(' ', '')
            json_ = self.aes_encrypt(plain_text, self.key, self.iv)
            # 这里使用quote_plus 否则'/'会无法转换掉
            quote_json = quote_plus(json_)
            url = f'http://api.chengzi8.com/api/getNews?json={quote_json}'
            yield scrapy.Request(url, callback=self.parse_text, headers=self.headers, dont_filter=True,meta={"type": t, "page": 0})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['page']
        datas = json.loads(response.text)
        data_ = datas['list']
        # 下一页的翻页
        if number > 7:
            pass
        else:
            params = {"client_type": 1, "channel": t, "channel_name": "market",
                      "v1_sign": "693aa325cc6565d1515ed380bfda4a28",
                      "v1_ver": 262, "api_ver": 1, "mobileOperator": "46000", "mobileOperatorName": "中国移动",
                      "deviceid": "c2a5f2b10fd596e8", "mac": "A2:13:1D:C2:3B:9D", "imsi": "460022412271245",
                      "imei": "869342479554350", "net_type": 1, "os_version": "7.1.2", "model": "G011A",
                      "brand": "google",
                      "package_name": "com.quyu.youliao", "app_name": "橙子快报", "page": 0, "size": 8, "action": 1,
                      "id": 0}
            params['page'] = number +2
            plain_text = str(params).replace("'", '"').replace(' ', '')
            json_ = self.aes_encrypt(plain_text, self.key, self.iv)
            # 这里使用quote_plus 否则'/'会无法转换掉
            quote_json = quote_plus(json_)
            url = f'http://api.chengzi8.com/api/getNews?json={quote_json}'
            yield scrapy.Request(url, callback=self.parse_text, headers=self.headers, dont_filter=True,meta={"type": t, "page": number+2})

        for d in data_:
            dicts = {}
            pubdate_datetime = d['publish_time']
            pubdate = str(self.t.datetimes(pubdate_datetime))
            if not self.t.time_is_Recent(pubdate):
                print("该篇文章不在范围时间内:", pubdate)
                continue
            try:
                title = d['title']
            except:
                continue
            try:
                url = d['url']
            except:
                url = None
            if url is None:
                continue
            try:
                author = d['source']
            except:
                author = ''
            id = Utils.url_hash(url)
            if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                print('该id:%s已存在' % id)
                continue
            dicts['id'] = id
            dicts['author'] = author
            dicts['url'] = url
            dicts['title'] = title
            dicts['pubdate'] = pubdate
            yield scrapy.Request(url, callback=self.parse, headers=self.headers, dont_filter=True,meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = extract_html(response.text)
            except:
                html = ''
            item['content'] = remove_tags(html)
            item['author'] = response.meta['author']
            item['formats'] = "app"
            item['dataSource'] = "桔子快报"
            item['serchEnType'] = "桔子快报"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
