# -*- coding: utf-8 -*-

import time
import json
import random
import scrapy


from .dy_config import collections

class DySpider(scrapy.Spider):
    name = 'dy'
    allowed_domains = ['dayu.com', 'uc.cn']

    def __init__(self, num, author_id, origin_ids):
        super(DySpider, self).__init__()
        self.num = int(num)
        self.author_id = author_id
        self.origin_ids = origin_ids.split(',')

    def get_args(self):
        import requests

        url = 'https://ff.dayu.com/contents/author/' + str(self.author_id)

        querystring = {"biz_id": "1002", "_size": "15", "_page": "1", "_order_type": "published_at", "status": "1",
                       "_fetch": "1", "uc_param_str": "frdnsnpfvecpntnwprdssskt"}
        print(url)
        headers = {
            'Accept': "application/json",
            'DNT': "1",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
            'Sec-Fetch-Mode': "cors",
            'Cache-Control': "no-cache",
            'Host': "ff.dayu.com",
            'Accept-Encoding': "gzip, deflate",
            'Connection': "keep-alive",
            'cache-control': "no-cache"
        }

        response = requests.request("GET", url, headers=headers, params=querystring, verify=False)

        return response.text

    def start_requests(self):
        data = self.get_args()
        self.log(data)
        data = json.loads(data)
        kwargs_list = data.get('data')
        # for item in data.get('dataList').get('data'):
        #     kwargs = {
        #         'content_id': item.get('content_id'),
        #         'origin_id': item.get('origin_id'),
        #         'author_id': 'e3734c654aab41fb9c803944f63e9c9a',   #体育大魔王
        #         'appid': '070b5f1f4053',
        #         'uc_param_str': 'frdnsnpfvecpntnwprdssskt',
        #         'uuid': '2d795587-3a06-4bce-9473-66e5eacb3aa3'
        #     }
        if self.origin_ids:
            kwargs_list = [item for item in kwargs_list if item.get('origin_id') in self.origin_ids]
        c1, c2, c3, c4, c5, c6, c7, c8, c9 = collections
        for n in range(0, self.num):
            item = random.choice(kwargs_list)
            kwargs = {
                'content_id': item.get('content_id'),
                'origin_id': item.get('origin_id'),
                'author_id': item.get('author_id'),
                'appid': '070b5f1f4053',
                'uc_param_str': 'frdnsnpfvecpntnwprdssskt',
                'uuid': '2d795587-3a06-4bce-9473-66e5eacb3aa3'
            }
            yield self.dy_request(*c1, **kwargs)
            yield self.wmedia_request(*c2, **kwargs)
            yield self.dy_request(*c3, **kwargs)
            yield self.wmedia_request(*c4, **kwargs)
            yield self.wmedia_request(*c5, **kwargs)
            yield self.wmedia_request(*c6, **kwargs)
            yield self.wmedia_request(*c7, **kwargs)
            yield self.wmedia_request(*c8, **kwargs)
            yield self.wmedia_request(*c9, **kwargs)

    def format_url(self, url, **kwargs):
        kwargs["timestampms"] = int(time.time()*1000)
        return url.format(**kwargs)

    def dy_request(self, *args, **kwargs):
        url, method, body, priority = args
        url = self.format_url(url, **kwargs)
        headers = {
            'Host': "ff.dayu.com",
            'accept': "application/json",
            'dnt': "1",
            'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
            'sec-fetch-mode': "cors",
            'content-type': "application/x-www-form-urlencoded",
            'origin': "https://mparticle.uc.cn",
            'sec-fetch-site': "cross-site",
            'accept-language': "zh-CN,zh;q=0.9,en;q=0.8",
            'cache-control': "no-cache",
        }
        return scrapy.Request(url, callback=self.watch, method=method, body=body, headers=headers, priority=priority)

    def wmedia_request(self, *args, **kwargs):
        url, method, body, priority = args
        url = self.format_url(url, **kwargs)
        headers = {
            'Host': "wmedia-track.uc.cn",
            'Sec-Fetch-Mode': "no-cors",
            'DNT': "1",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
            'Accept': "image/webp,image/apng,image/*,*/*;q=0.8",
            'Sec-Fetch-Site': "same-site",
            'Accept-Language': "zh-CN,zh;q=0.9,en;q=0.8",
            'cache-control': "no-cache",
        }
        return scrapy.Request(url, callback=self.watch, method=method, body=body, headers=headers, priority=priority)

    def watch(self, response):
        self.log(response.text)



