import os
import random
import time
import uuid
from urllib.parse import urlencode
from hashlib import md5
import scrapy
import json

from NewsSpider.tools.utils import Utils


class xxxspider(scrapy.Spider):
    # 测试使用 单独使用代理  post提交json格式的数据  有时需要json.dumps然后提交才行
    # 有时需要Request 的 MEthod POST方法 也需要dumps为字符串 然后Body = data
    name = 'xxx'

    custom_settings = {
        'LOG_LEVEL': 'DEBUG'
    }

    def start_requests(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.2; G011A Build/N2G48H; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Accept-Encoding': ' gzip, deflate',
            'Connection': 'keep-alive'
        }
        filepath = os.path.dirname(os.getcwd()) + "/tools/zntj.json"
        with open(filepath, 'r') as f:
            result = f.read()
            aa = json.loads(result)
            token = aa['data']['token']
        params = {'appid': 'dKSXxi1LN9jcfbxJ',
                  'appsecret': 'xTzwemnZIRebS1Ny5E08qQgfjv6Jvjl4',
                  'expire_token': token}
        md5_params = ''.join(sorted(params.values()))
        signature = Utils.md5_encrypt(md5_params)
        parmas2 = params
        parmas2['signature'] = signature
        payload = urlencode(parmas2)
        posturl = 'https://api.zounai.com/v2/refresh_token'
        yield scrapy.Request(posturl, method='POST', headers=headers, body=payload,
                             dont_filter=True)

    def parse(self, response):
        print(response.text)


if __name__ == '__main__':
    from scrapy.cmdline import execute

    execute("scrapy crawl xxx".split())
