# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from urllib.parse import quote
import json

from qqmusic.items import SingerItem

"""
从 QQ Music 中获取全部歌手/组合的名称
"""
class ArtistSpider(scrapy.Spider):
    name = 'artist'
    allowed_domains = ['y.qq.com']
    start_urls = ['https://u.y.qq.com/cgi-bin/musicu.fcg']  # ['http://y.qq.com/portal/singer_list.html/']
    headers = {
        'Content-Type': 'application/json',
        "accept": "application/json",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
    }
    arguments = {
        "sex": [0, 1, 2],  # 0, 男; 1, 女; 2, 组合
        "page_range": [160, 60, 76]  # 按 sex 分类的页数
    }

    def parse(self, response):
        # print(response.text)
        # code: -500001  失败是
        data = json.loads(response.text)
        if data['code'] != 0 or data['singerList']['code'] != 0:
            raise Exception("code is not 0 (%d)" % data['code'])
        else:
            data = data['singerList']['data']
            singerlist = data['singerlist']
            for singer in singerlist:
                item = SingerItem()
                item['name'] = singer['singer_name']
                item['pic'] = singer['singer_pic']
                item['sex'] = data['sex'] if data['sex'] != -100 else -1
                item['singer_id'] = singer['singer_id']
                item['singer_mid'] = singer['singer_mid']
                yield item
                pass
        pass

    def start_requests(self):
        for url in self.start_urls:
            for i, sex in enumerate(self.arguments["sex"]):
                for page in range(1, self.arguments['page_range'][i]+1):
                    args = {"cur_page": page, "sex": sex}
                    cur_url = ArtistSpider.build_url(url, args)
                    yield Request(cur_url,
                                  callback=self.parse,
                                  method="GET",
                                  dont_filter=True,
                                  headers=self.headers)
                pass
            pass
        pass

    @staticmethod
    def my_urlencode(str):
        reprStr = repr(str).replace(r'\x', '%')
        return reprStr[1:-1]
        pass

    @staticmethod
    def build_url(base_url, args):
        """ 构建目标 url
        Sample parameters:
        -: getUCGI6422837924402012
        g_tk: 5381
        loginUin: 0
        hostUin: 0
        format: json
        inCharset: utf8
        outCharset: utf-8
        notice: 0
        platform: yqq.json
        needNewCode: 0
        data: {"comm":{"ct":24,"cv":0},"singerList":{"module":"Music.SingerListServer","method":"get_singer_list","param":{"area":-100,"sex":-100,"genre":-100,"index":-100,"sin":560,"cur_page":8}}}
        """

        fixed_options = {
            "-": "getUCGI7925720615259697",
            "g_tk": 5381,
            "loginUin": 0,
            "hostUin": 0,
            "format": "json",
            "inCharset": "utf8",
            "outCharset": "utf-8",
            "notice": 0,
            "platform": "yqq.json",
            "needNewCode": 0,
            "data": ''  # 由 fixed_data_params 进行 url_decode 得到
        }

        fixed_data_params = {
            "comm": {
                "ct": 24,
                "cv": 0
            },
            "singerList": {
                "module": "Music.SingerListServer",
                "method": "get_singer_list",
                "param":{
                    "area": -100,  # -100, 全部; 200, 内地; 2, 港台; 5, 欧美; 4, 日本; 3, 韩国; 6, 其他
                    "genre": -100,  # -100, 全部; 1, 流行; 6, 嘻哈; 2, 摇滚; 4, 电子; 3, 民谣; 8, R&B;
                                    # 10, 民歌; 9, 轻音乐; 5, 爵士; 14, 古典; 25, 乡村; 20, 蓝调
                    "index": -100,  # 歌手姓名首字母
                    "sin": (args['cur_page']-1)*80,  # ???   不知道是啥。按每页 80 递增
                    "sex": args['sex'],  # -100, 全部; 0, 男; 1, 女; 2, 组合
                    "cur_page": args["cur_page"],  # 当前页
                }
            }
        }

        fixed_options["data"] = quote(json.dumps(fixed_data_params, separators=(',', ':')))
        # print(fixed_options['data'])

        # ---------------------------------------------------

        url = base_url + "?"
        for k,v in fixed_options.items():
            url += str(k) + "=" + str(v) + "&"
            pass

        return url
        pass