# -*- coding: utf-8 -*-
# @Time    : 2019/12/7 19:19
# @Author  : Damn7Kx
# @Software: PyCharm
# -*- coding: utf-8 -*-
# @Time    : 2019/12/6 15:36
# @Author  : Damn7Kx
# @Software: PyCharm
import hashlib

import requests
from w3lib.html import remove_tags
import scrapy
import datetime
from NewsSpider.items import CommandItem
from NewsSpider.tools.utils import Get_weiboID,Utils
import json
import base64
from urllib.parse import quote


class QingboSpider(scrapy.Spider):

    name = 'yqt'
    allowed_domains = ['yuqing.bigcloudsys.cn']

    # 翻转后的type
    type = {'web': 'website', 'bbs': 'bbs', '新浪微博': 'weibo', "微信": "weixin"}

    custom_settings = {
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    headers = {
        'Accept': "application/json, text/plain, */*",
        'Accept-Encoding': "gzip, deflate, br",
        'Accept-Language': "zh-CN,zh;q=0.9,en;q=0.8",
        'Connection': "keep-alive",
        'Cookie': "sentiment_session=165f55113444b3c2ae9f7b19c063260fc9ac0eef; yq_access_token=eyJ0eXAiOiJqd3QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJjbXNzIiwiaWQiOiJ2UTRrY0RpUHd2IiwiaXAiOiIxMTMuMTI4LjEyLjc0IiwiaWF0IjoxNTc1NzE3OTQ2LCJhcHAiOiJZUSJ9.StYyiB9NA56yGC0lj1vSQJSNLQWQkt5ifTnZgKVyp8A",
        'Host': "yuqing.bigcloudsys.cn",
        "Origin": "https://yuqing.bigcloudsys.cn",
        'Sec-Fetch-Mode': "cors",
        'Sec-Fetch-Site': "same-origin",
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
        'Cache-Control': "no-cache",
    }

    def start_requests(self):
        # post请求 缺少一位传参
        username = 'xjsfy'
        password = 'cmss-yq'
        md5_pass = (hashlib.md5(password.encode()).hexdigest())
        password_ = (hashlib.sha256(md5_pass.encode()).hexdigest())
        headers = {
            'Host': "yuqing.bigcloudsys.cn",
            "Origin": "https://yuqing.bigcloudsys.cn",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
        }
        params = {
            'username': username,
            'password': password_,
            'pwd_strength': '1'
        }
        post_login = 'https://yuqing.bigcloudsys.cn/apps/analysis/manage/validate_user'
        response = requests.post(url=post_login, data=params, headers=headers)
        cookies = requests.utils.dict_from_cookiejar(response.cookies)
        cookie = ''
        for k in cookies:
            cookie += k + "=" + cookies[k] + "; "
        # print(cookie)
        self.headers['Cookie'] = cookie
        now = datetime.datetime.now()
        start_time = quote((now - datetime.timedelta(minutes=60)).strftime('%Y-%m-%d %H:%M:%S'))
        end_time = quote(now.strftime('%Y-%m-%d %H:%M:%S'))
        # start_time = quote('2019-12-07 16:00:00')
        # end_time = quote('2019-12-07 18:30:00')
        url = f"https://yuqing.bigcloudsys.cn/yq-api/themes/getYqList?comment=0&startDate={start_time}&endDate={end_time}&sortType=1&page=1&perPage=100&read=&showSubmit=1&client=web"
        # print('start_url',url)
        yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.parse_text,
                             dont_filter=True)

    def parse_text(self, response):
        datas_ = json.loads(response.text)
        # print(datas_)
        datas = datas_['data']
        dicts = {}
        for data in datas:
            url = data['url']
            # print("url----------------",url)
            dicts['url'] = url
            source_id = data['id']
            themeId = data['themeId']
            themeType = data['themeType']
            dicts['author'] = data['medianame']
            test_formats = data['source']
            formats = self.type.get(test_formats,test_formats)
            if formats == 'weibo':
                weiboid = Get_weiboID().run(url)
                dicts['id'] = weiboid
            else:
                dicts['id'] = Utils.url_hash(url)
            dicts['formats'] = formats
            dicts['pubdate'] = data['pubtime']
            dicts['title'] = data['title']
            # 需要再进行post请求 将之前的信息通过item传递过去

            post_url = 'https://yuqing.bigcloudsys.cn/apps/analysis/topics/getSnapshotInfoData'
            base_url = base64.b64encode(url.encode()).decode()
            base_id = base64.b64encode(source_id.encode()).decode()
            # print('base_url-----',base_url)
            # print('base_id-----',base_id)
            params = {
               "params[arturl]": base_url,
               "params[component_index]": "4", "params[from]": "3",
               "params[log_key]": "themebrowse", "params[type]": str(themeType),
               "params[themeid]": str(themeId), "params[artid]": str(base_id)}

            yield scrapy.FormRequest(post_url, method='POST', callback=self.parse, formdata=params, \
                                     dont_filter=True, headers=self.headers,meta=dicts)

    def parse(self, response):
        item = CommandItem()
        data = json.loads(response.text)
        # print("data:",data)
        content = data['data']['snapshot']['content']
        item['url'] = response.meta['url']
        item['author'] = response.meta['author']
        item['id'] = response.meta['id']
        item['formats'] = response.meta['formats']
        item['dataSource'] = response.meta['formats']
        item['pubdate'] = response.meta['pubdate']
        item['title'] = response.meta['title']
        item['content'] = remove_tags(content)
        item['html'] = content
        item['updateTime'] = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        item['collectProcess'] = 'yuqingtongpython'
        item['serverIp'] = '113.128.12.74'
        yield item

