# -*- coding: utf-8 -*-
# @Time    : 2019/11/4 9:31
# @Author  : Damn7Kx
# @Software: PyCharm
# -*- coding: utf-8 -*-
import scrapy
import json
from NewsSpider.tools.utils import Utils,Get_weiboID
import datetime
import time
from w3lib.html import remove_tags
from NewsSpider.items import CommandItem

class XingGuangSpider(scrapy.Spider):

    name = 'XingGuang'

    # 翻转后的type
    type = {'新浪微博': 'weibo','微信': 'weixin',}

    stime = int(time.time())
    ps = datetime.datetime.fromtimestamp(stime)
    s = datetime.datetime.strftime(ps, "%Y%m%d%H%M%S")
    cookie = f"Hm_lvt_299d80c4546d392bcee665ee15e01d83=1572260831,1572260856,1572477211; zg_did=%7B%22did%22%3A%20%2216e20bd5c3e330-0089b97529382e-7711439-1fa400-16e20bd5c3f340%22%7D; zg_f9d4df58191741e68d22e66a0cc50949=%7B%22sid%22%3A%201572507311173%2C%22updated%22%3A%201572507311190%2C%22info%22%3A%201572507311183%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22cuid%22%3A%20%2273534%22%7D; aliyungf_tc=AQAAAOI09zdGEgMASgyAcXIpI6E95nD2; Hm_lvt_0f453481018b2cffd2d3028c49240af3=1572398275,1572477244,1572926805; login=true; pushWarningBoxFlag=0; Hm_lpvt_0f453481018b2cffd2d3028c49240af3={stime}; pushWarningBTime={s}"
    s2 = datetime.datetime.strftime(ps, "%Y-%m-%d+%H:%M:%S")
    token = '11d14c70007e11ea0f1728a419c2d6e1'

    custom_settings = {
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):

        headers = {
            'Cookie': self.cookie,
            'customerId': "126264",
            'Host': "sq.istarshine.com",
            'Referer': "https://sq.istarshine.com/",
            'token': self.token,
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
            'userId': "6097"
        }

        params = "userId=6097&customerId=126264&affections=&pageSize=10&pageNum=1&screenByTime=2&isRepeat=2&topId=69735&monitorId=70185&haveChildren=false&sourceTypes=&isRead=&orderBy=&industry=&industryLevel=&noiseType=2&beginTime=2019-09-25+00:00:00&endTime=2019-11-04+00:00:00"
        url = "https://sq.istarshine.com/api/broadData/getBroadDataTotal.do?"+params
        yield scrapy.Request(url=url,callback=self.parse_num,headers=headers)

    def parse_num(self,response):
        ss = json.loads(response.text)
        number = ss['params']['status']
        for i in range(1,number//10+1):
        # for i in range(1,2):

            headers = {
                'Accept': "application/json, text/plain, */*",
                'Accept-Encoding': "gzip, deflate, br",
                'Accept-Language': "zh-CN,zh;q=0.9,en;q=0.8",
                'Connection': "keep-alive",
                'Cookie': self.cookie,
                'customerId': "126264",
                'Host': "sq.istarshine.com",
                'login': "true",
                'Referer': "https://sq.istarshine.com/",
                'token': self.token,
                'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
                'userId': "6097",
            }

            base_url = "https://sq.istarshine.com/api/broadData/getBroadData.do?"
            params = f'customerId=126264&pageSize=10&pageNum={i}&screenByTime=2&isRepeat=2&monitorId=70185&haveChildren=false&noiseType=2&beginTime=2019-09-25+00:00:00&endTime=2019-11-04+00:00:00'

            url = base_url+params
            yield scrapy.Request(url, headers=headers,callback=self.parse_detatil)

    def parse_detatil(self, response):
        response_dict = json.loads(response.text)
        params = response_dict['params']
        for p in params:
            id = p['id']
            headers = {
                'Accept': "application/json, text/plain, */*",
                'Accept-Encoding': "gzip, deflate, br",
                'Accept-Language': "zh-CN,zh;q=0.9,en;q=0.8",
                'Connection': "keep-alive",
                'Cookie': self.cookie,
                'customerId': "126264",
                'Host': "sq.istarshine.com",
                'login': "true",
                'Referer': "https://sq.istarshine.com/",
                'token': self.token,
                'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
                'userId': "6097",
                'X-Requested-With': "XMLHttpRequest",
            }
            base_url = "https://sq.istarshine.com/api/broadData/getInfoDetail.do?"
            params = f"userId=6097&customerId=126264&relationId={id}&isMarked=1"
            url = base_url+params
            yield scrapy.Request(url, headers=headers,callback=self.parse)

    def parse(self, response):
        item = CommandItem()
        response_dict = json.loads(response.text)
        for par in response_dict['params']:
            item['title'] = par['title']
            html = par['context']
            item['html'] = html
            item['content'] = remove_tags(html)
            item['author'] = par['author']
            item['pubdate'] = par['sourceTime']
            url= par['url']
            item['url'] = url
            dataSource = par['website']
            item['dataSource'] = dataSource
            formats = self.type.get(dataSource,'')
            item['formats'] = formats
            if formats == "weibo":
                item['id'] = Get_weiboID().run(url)
            else:
                item['id'] = Utils.url_hash(url)
            item['updateTime'] = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            item['collectProcess'] = 'xingguangpython'
            item['serverIp'] = '113.128.12.74'
            yield item

