# -*- coding: utf-8 -*-
# @Time    : 2019/10/29 8:43
# @Author  : Damn7Kx
# @Software: PyCharm
from w3lib.html import remove_tags
import scrapy
from hashlib import md5
import time
import datetime
from NewsSpider.items import CommandItem
from NewsSpider.tools.utils import Get_weiboID,Utils
import json

class QingboSpider(scrapy.Spider):

    name = 'qingbo'
    allowed_domains = ['yq.gsdata.cn']
    start_urls = ['http://yq.gsdata.cn/api/monitor/info/list']

    citys = ['山东省','北京市','辽宁省', '天津市', '上海市', '重庆市', '河北省', '河南省', '云南省', '黑龙江省', \
                  '湖南省', '安徽省',  '新疆自治区', '江苏省', '浙江省', '陕西省', '吉林省', '福建省', \
                  '贵州省', '广东省', '甘肃省', '青海省', '西藏自治区', \
                  '内蒙古自治区', '江西省', '湖北省', '广西省',
                  '四川省', '宁夏自治区', '海南省', '台湾省', '香港特别行政区', '澳门特别行政区']

    # 翻转后的type
    type = {'web': 'website', 'bbs': 'bbs', 'weibo': 'weibo', "wx": "weixin"}

    def yield_time(self):
        begin = datetime.date(2019, 8, 16)
        end = datetime.date(2019, 8, 20)
        # print((end - begin).days)
        # print(type(end - begin))
        for i in range((end - begin).days + 1):
            day = begin + datetime.timedelta(days=i-1)
            day_ = begin + datetime.timedelta(days=i)
            yield str(day), str(day_)

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        '''
        sid可能需要根据设置的专题更该
        :return:
        '''
        for i in self.yield_time():
            startDate = f"{i[0]} 00:00:00"
            endDate = f"{i[1]} 00:00:00"
            for city in self.citys:
                stime = int(time.time())
                data = {
                    "data[sid]": '53711',
                    "data[searchMode]": '1',
                    "data[mediaLevel]": "",
                    "data[startDate]": startDate,
                    "data[endDate]": endDate,
                    "data[province]": city,
                    "data[city]": "",
                    "data[emotion]": "",
                }

                new_startDate = data['data[startDate]']
                new_endDate = data['data[endDate]']

                sign1 = f"city=&emotion=&endDate={new_endDate}&mediaLevel=&province={city}&searchMode=1&secret=g1YoVCkanMOH6vrU&sid=53711&startDate={new_startDate}&stime={stime}".encode()
                sign = md5(sign1).hexdigest()

                headers = {
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
                    "Host": "yq.gsdata.cn",
                    "Origin": "http://yq.gsdata.cn",
                    "Cookie": f"53gid2=10935109385006; 53revisit=1572264385655; _gsdataCL=WzAsIjE1MjYzNDM1MzY3IiwiMjAxOTEwMjgyMjM0MTgiLCJhOTAwM2JhOGM1YTcwOWI4NTI4YTliOTk5NmYyYzg5MSIsMjM4MjA4XQ%3D%3D; _gsdataOL=238208%3B15263435367%3B%7B%220%22%3A%22%22%2C%221%22%3A%22%22%2C%222%22%3A%22%22%2C%223%22%3A%22%22%2C%224%22%3A%22%22%2C%225%22%3A%22%22%2C%2299%22%3A%2220191028%22%7D%3Bcd19dcc2aff0fae2d57288e71f00da8d; Hm_lvt_293b2731d4897253b117bb45d9bb7023=1572358998,1572364908,1572397722,1572568518; visitor_type=old; 53gid0=10935109385006; 53gid1=10935109385006; 53kf_72213613_from_host=yq.gsdata.cn; 53kf_72213613_keyword=; 53kf_72213613_land_page=http%253A%252F%252Fyq.gsdata.cn%252F; kf_72213613_land_page_ok=1; 53uvid=1; onliner_zdfq72213613=0; Hm_lpvt_293b2731d4897253b117bb45d9bb7023={stime}",
                    "sign": str(sign),
                    "stime": str(stime),
                }

                yield scrapy.FormRequest(url='http://yq.gsdata.cn/api/monitor/info/media-platform', formdata=data,
                                         headers=headers,callback=self.parse_num, method="POST",
                                         meta={'startDate': startDate, "endDate":endDate,"province":city})

    # 获取文章数量
    def parse_num(self,response):
        # print("parse_num",response.text)
        json_dicts = json.loads(response.text)
        datas = json_dicts['data']
        dicts = {}
        for k in datas:
            if k == 'total':
                continue
            dicts[k] = datas[k]['num']

        for type in dicts:
            # d是 web wx weibo等
            # num[d] 是对应数量
            # print(d, num[d])
            stime = int(time.time())
            # 每页展示20个 用数量//20得到剩余的页数
            # 但最多访问500页 平台限制 只显示10000条数据
            for i in range(1, dicts[type]//20):
                if i > 500:
                    break
                data = {
                    "data[sid]": '53711',
                    "data[page]": str(i),
                    "data[searchMode]": '1',
                    "data[mediaLevel]": "",
                    "data[platformType][0]": type,
                    "data[startDate]": response.meta['startDate'],
                    "data[perpage]": '20',
                    "data[endDate]": response.meta['endDate'],
                    "data[province]": response.meta['province'],
                    "data[city]": "",
                    "data[emotion]": "",
                    "data[match]": "1",
                    "data[sort]": "desc",
                    "data[searchType]": '3',
                    "data[keywords]": ""
                }

                sid = data['data[sid]']
                page = data['data[page]']
                startDate = data['data[startDate]']
                perpage = data['data[perpage]']
                endDate = data['data[endDate]']
                province = data['data[province]']
                sign1 = f"city=&emotion=&endDate={endDate}&keywords=&match=1&mediaLevel=&page={page}&perpage={perpage}&province={province}&searchMode=1&searchType=3&secret=g1YoVCkanMOH6vrU&sid={sid}&sort=desc&startDate={startDate}&stime={stime}".encode()
                sign = md5(sign1).hexdigest()

                headers = {
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36",
                    "Host": "yq.gsdata.cn",
                    "Origin": "http://yq.gsdata.cn",
                    "Cookie": f"53gid2=10935109385006; 53revisit=1572264385655; _gsdataCL=WzAsIjE1MjYzNDM1MzY3IiwiMjAxOTEwMjgyMjM0MTgiLCJhOTAwM2JhOGM1YTcwOWI4NTI4YTliOTk5NmYyYzg5MSIsMjM4MjA4XQ%3D%3D; _gsdataOL=238208%3B15263435367%3B%7B%220%22%3A%22%22%2C%221%22%3A%22%22%2C%222%22%3A%22%22%2C%223%22%3A%22%22%2C%224%22%3A%22%22%2C%225%22%3A%22%22%2C%2299%22%3A%2220191028%22%7D%3Bcd19dcc2aff0fae2d57288e71f00da8d; Hm_lvt_293b2731d4897253b117bb45d9bb7023=1572358998,1572364908,1572397722,1572568518; visitor_type=old; 53gid0=10935109385006; 53gid1=10935109385006; 53kf_72213613_from_host=yq.gsdata.cn; 53kf_72213613_keyword=; 53kf_72213613_land_page=http%253A%252F%252Fyq.gsdata.cn%252F; kf_72213613_land_page_ok=1; 53uvid=1; onliner_zdfq72213613=0; Hm_lpvt_293b2731d4897253b117bb45d9bb7023={stime}",
                    "sign": str(sign),
                    "stime": str(stime),
                }

                yield scrapy.FormRequest(url='http://yq.gsdata.cn/api/monitor/info/list', headers=headers, formdata=data, \
                                         callback=self.parse_detail, method="POST")

    def parse_detail(self,response):
        stime = int(time.time())
        datas = json.loads(response.text)
        lists = datas['data']['list']
        for i in lists:
            sign1 = f"newsId={i['newsId']}&secret=g1YoVCkanMOH6vrU&stime={stime}".encode()
            # print(sign1)
            sign = md5(sign1).hexdigest()

            headers = {
                "Accept": "application/json, text/plain, */*",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                "Cookie": f"53gid2=10935109385006; 53revisit=1572264385655; _gsdataCL=WzAsIjE1MjYzNDM1MzY3IiwiMjAxOTEwMjgyMjM0MTgiLCJhOTAwM2JhOGM1YTcwOWI4NTI4YTliOTk5NmYyYzg5MSIsMjM4MjA4XQ%3D%3D; _gsdataOL=238208%3B15263435367%3B%7B%220%22%3A%22%22%2C%221%22%3A%22%22%2C%222%22%3A%22%22%2C%223%22%3A%22%22%2C%224%22%3A%22%22%2C%225%22%3A%22%22%2C%2299%22%3A%2220191028%22%7D%3Bcd19dcc2aff0fae2d57288e71f00da8d; Hm_lvt_293b2731d4897253b117bb45d9bb7023=1572358998,1572364908,1572397722,1572568518; visitor_type=old; 53gid0=10935109385006; 53gid1=10935109385006; 53kf_72213613_from_host=yq.gsdata.cn; 53kf_72213613_keyword=; 53kf_72213613_land_page=http%253A%252F%252Fyq.gsdata.cn%252F; kf_72213613_land_page_ok=1; 53uvid=1; onliner_zdfq72213613=0; Hm_lpvt_293b2731d4897253b117bb45d9bb7023={stime}",
                "Host": "yq.gsdata.cn",
                "Proxy-Connection": "keep-alive",
                "Referer": "http://yq.gsdata.cn/detail?id={0}".format(i['newsId']),
                "sign": str(sign),
                "stime": str(stime),
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36"

            }

            url = "http://yq.gsdata.cn/api/monitor/info/detail?newsId=%s" % i['newsId']
            yield scrapy.Request(url=url,callback=self.parse,headers=headers)

    def parse(self, response):
        item = CommandItem()
        datas = eval(response.text)
        # print('parse',response.text)
        url = datas['data']['newsUrl']
        item['url'] = datas['data']['newsUrl']
        item['author'] = datas['data']['mediaName']
        test_formats = datas['data']['platform']
        formats = self.type.get(test_formats,'other')
        if formats == 'weibo':
            weiboid = Get_weiboID().run(url)
            item['id'] = weiboid
        else:
            item['id'] = Utils.url_hash(url)
        item['formats'] = formats
        item['dataSource'] = '清博'
        item['pubdate'] = datas['data']['newsPosttime']
        item['title'] = datas['data']['newsTitle']
        item['content'] = remove_tags(datas['data']['newsContent'])
        item['html'] = datas['data']['newsContent']
        item['updateTime'] = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        item['collectProcess'] = 'qingbopython'
        item['serverIp'] = '113.128.12.74'
        yield item





