# -*- coding: utf-8 -*-
# @Time    : 2020/1/7 13:36
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import time
import uuid
from urllib.parse import quote, urlencode
from w3lib.html import remove_tags
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB
from NewsSpider.tools.parse_html import extract_html
from Crypto.Cipher import AES


class LianShangTouTiaoNews(scrapy.Spider):
    '''
        参数加密json aes加密 秘钥q67n6CSkuoOF$w6L  iv AdWhfrhjFUhTM&J2
        模式cbc  模式NoPadding
       连尚头条 Post请求 固定api获取 pageNo翻页  cts时间戳 ts 时间戳+4
    '''

    name = 'Lianshang'
    t = Times()
    redis = Redis_DB()
    types = ['10033','10002','10018','10016','10016','10020','10017','10001','10006','10012',
             '10010','10005','10013','10009']

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    headers = {
        "Content-Type": "application/x-www-form-urlencoded",
        "User-Agent": "Mozilla/5.0 (Linux; Android 7.1.2; G011A Build/N2G48H; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36 wkbrowser lsttbrw 2.3.8 191213 js 5.1.2 newfocus",
        "Connection": "Keep-Alive",
        "Accept-Encoding": "gzip",
    }

    key = 'q67n6CSkuoOF$w6L'
    iv = 'AdWhfrhjFUhTM&J2'
    new_dicts = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']

    def change_16(self,text):
        length = len(text) % 16
        for i in range(16 - length):
            if i < (16 - length):
                text += ' '
        return text

    def aes_encrypt(self,plain_text: str, key: str, iv: str) -> str:
        aes = AES.new(key=key.encode(), mode=AES.MODE_CBC, iv=iv.encode())
        encrypted_data = aes.encrypt(plain_text.encode())
        return encrypted_data

    def process_array(self,bArr):
        if bArr is None:
            return ""
        return self.process_ed(bArr, 0, len(bArr))

    def process_ed(self,bArr, i, i2):
        cArr = []
        for i in range(i + i2):
            if i < (i + i2):
                b = bArr[i]
                cArr.append(self.new_dicts[(b >> 4) & 15])
                cArr.append(self.new_dicts[b & 15])
        return ''.join(cArr)

    def start_requests(self):
        for t in self.types:
            timestamp = int(time.time()*1000)
            serialId = str(uuid.uuid4()).replace('-', '')
            params = {"appInfo": {"lang": "cn", "appId": "NEWS01", "chanId": "tencent", "origChanId": "tencent",
                                  "verCode": "191213", "verName": "2.3.8", "dhid": "7355CA51E47EF65FE75AF24C63BD269A",
                                  "imei": "869342479554350", "androidId": "c2a5f2b10fd596e8", "feedVer": 1031,
                                  "cts": timestamp, "mac": "a2:13:1d:c2:3b:9d", "cityCode": "", "mapSP": "t",
                                  "longi": "121.48789833333333", "lati": "31.24916", "uhid": "", "openId": "",
                                  "netModel": "w", "capBssid": "be:4d:f9:b1:d1:69", "capSsid": "bcvjl36647"},
                      "extInfo": {"os": "android", "osApiLevel": "25", "osVersion": "7.1.2", "deviceType": 1,
                                  "screenWidth": "720", "screenHeight": "1280", "deviceVendor": "google",
                                  "deviceVersion": "G011A", "androidId": "c2a5f2b10fd596e8", "screenDensity": "1.2",
                                  "appPkgName": "com.linksure.tt", "androidAdId": "", "isOpenScreen": "0",
                                  "isp": "46000", "screenOrientation": "1"},
                      "serialId": serialId, "channelId": t, "bTabId": "1",
                      "pageNo": "1", "ts": str(timestamp+4), "loadType": "1"}

            plain_text = json.dumps(params).replace(" ",'')
            quote_json = quote(plain_text)
            str_1 = self.change_16(quote_json)
            ed_ = self.process_array(self.aes_encrypt(str_1, self.key, self.iv))
            sign_ = f"NEWS01{ed_}acds001001,cds001004mys#npM#S0jSU!XG#uHA!54eK&WeSwf&o"
            sign = Utils.md5_encrypt(sign_).upper()
            params = {'st': 'm', 'appId': 'NEWS01', 'sign': sign,
                      'et': 'a', 'pid': 'cds001001,cds001004', 'ed': ed_}
            url = 'https://cdstt.lsttapp.com/feeds.sec'
            params = urlencode(params)
            yield scrapy.Request(url, method='POST', callback=self.parse_text,headers=self.headers, body=params,
                                 dont_filter=True,meta={"type": t, "pageNo": 1})
    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        t = response.meta['type']
        number = response.meta['pageNo']
        datas = json.loads(response.text)
        try:
            data_ = datas['cds001001']['result']
        except:
            data_ = []
        if number > 7:
            pass
        else:
            timestamp = int(time.time() * 1000)
            serialId = str(uuid.uuid4()).replace('-', '')
            params = {"appInfo": {"lang": "cn", "appId": "NEWS01", "chanId": "tencent", "origChanId": "tencent",
                                  "verCode": "191213", "verName": "2.3.8", "dhid": "7355CA51E47EF65FE75AF24C63BD269A",
                                  "imei": "869342479554350", "androidId": "c2a5f2b10fd596e8", "feedVer": 1031,
                                  "cts": timestamp, "mac": "a2:13:1d:c2:3b:9d", "cityCode": "", "mapSP": "t",
                                  "longi": "121.48789833333333", "lati": "31.24916", "uhid": "", "openId": "",
                                  "netModel": "w", "capBssid": "be:4d:f9:b1:d1:69", "capSsid": "bcvjl36647"},
                      "extInfo": {"os": "android", "osApiLevel": "25", "osVersion": "7.1.2", "deviceType": 1,
                                  "screenWidth": "720", "screenHeight": "1280", "deviceVendor": "google",
                                  "deviceVersion": "G011A", "androidId": "c2a5f2b10fd596e8", "screenDensity": "1.2",
                                  "appPkgName": "com.linksure.tt", "androidAdId": "", "isOpenScreen": "0",
                                  "isp": "46000", "screenOrientation": "1"},
                      "serialId": serialId, "channelId": t, "bTabId": "1",
                      "pageNo": str(number+1), "ts": str(timestamp + 4), "loadType": "1"}

            plain_text = json.dumps(params).replace(" ", '')
            quote_json = quote(plain_text)
            str_1 = self.change_16(quote_json)
            ed_ = self.process_array(self.aes_encrypt(str_1, self.key, self.iv))
            sign_ = f"NEWS01{ed_}acds001001,cds001004mys#npM#S0jSU!XG#uHA!54eK&WeSwf&o"
            sign = Utils.md5_encrypt(sign_).upper()
            params = {'st': 'm', 'appId': 'NEWS01', 'sign': sign,
                      'et': 'a', 'pid': 'cds001001,cds001004', 'ed': ed_}
            url = 'https://cdstt.lsttapp.com/feeds.sec'
            params = urlencode(params)
            yield scrapy.Request(url, method='POST', callback=self.parse_text,headers=self.headers, body=params,
                                 dont_filter=True,meta={"type": t, "pageNo": number+1})
        if data_:
            for d in data_:
                dicts = {}
                title = d['item'][0]['title']
                try:
                    url = d['item'][0]['url']
                except:
                    continue
                id = Utils.url_hash(url)
                if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                    print('该id:%s已存在' % id)
                    continue
                try:
                    pubdate_datetime = d['item'][0]['pubTime']
                    pubdate = str(self.t.datetimes(pubdate_datetime))
                except:
                    continue
                if not self.t.time_is_Recent(pubdate):
                    continue
                dicts['id'] = id
                dicts['url'] = url
                dicts['pubdate'] = pubdate
                dicts['title'] = title
                yield scrapy.Request(url,headers=self.headers, dont_filter=True, callback=self.parse,meta=dicts)

    def parse(self, response):
        if response.text is not None:
            item = NewsItem()
            item['id'] = response.meta['id']
            item['url'] = response.meta['url']
            item['title'] = response.meta['title']
            item['pubdate'] = response.meta['pubdate']
            try:
                html = extract_html(response.text)
            except:
                html = ''
            item['content'] = remove_tags(html)
            try:
                item['author'] = response.css("#subtitle_text > span.source::text").extract_first()
            except:
                item['author'] = ''
            item['formats'] = "app"
            item['dataSource'] = "连尚头条"
            item['serchEnType'] = "连尚头条"
            try:
                item['html'] = html
            except:
                item['html'] = ''
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item
