# -*- coding: utf-8 -*-
import scrapy
from scrapy import signals
from scrapy.http import Request
import mysql.connector
from mysql.connector import errorcode
from mysql.connector.errors import Error
from urllib.parse import quote
import json

from qqmusic.items import SongItem, PerformanceItem


class SongSpider(scrapy.Spider):
    name = 'song'
    allowed_domains = ['y.qq.com']
    start_urls = ['https://u.y.qq.com/cgi-bin/musicu.fcg']
    headers = {
        'Content-Type': 'application/json',
        "accept": "application/json",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
    }

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = cls(*args, **kwargs)
        spider._set_crawler(crawler)
        crawler.signals.connect(spider.engine_started, signal=signals.engine_started)
        crawler.signals.connect(spider.engine_stoped, signal=signals.engine_stopped)
        return spider
        pass

    def engine_started(self):
        try:
            self.connection = mysql.connector.connect(user="root", password="", host="localhost", database="db_music")
            self.cursor = self.connection.cursor()
        except Error as err:
            if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
                print("Something is wrong with your user name or password")
            elif err.errno == errorcode.ER_BAD_DB_ERROR:
                print("Database does not exist")
            else:
                print(err)
        pass

    def engine_stoped(self):
        if self.connection is not None:
            try:
                self.cursor.close()
                self.connection.close()
            except Exception as e:
                print(e)
        pass

    def parse(self, response):
        data = json.loads(response.text)
        assert data['code'] == 0 and data['albumSonglist']['code'] == 0, "Code is not 0 (%s)" % (data['code'])

        data = data['albumSonglist']['data']
        song_list = data['songList']
        for song in song_list:
            item = SongItem()
            info = song['songInfo']
            item['name'] = info['name']
            item['title'] = info['title']
            item['subtitle'] = info['subtitle']
            item['isonly'] = info['isonly']
            item['language'] = info['language']
            item['genre'] = info['genre']
            item['interval'] = info['interval']
            item['time_publish'] = info['time_public']
            item['qqmusic_song_mid'] = info['mid']
            item['qqmusic_song_id'] = info['id']
            item['qqmusic_album_mid'] = info['album']['mid']
            item['qqmusic_album_id'] = info['album']['id']
            item['singers'] = []
            for singer in info['singer']:
                perform = PerformanceItem()
                perform['singer_mid'] = singer['mid']
                perform['singer_id'] = singer['id']
                perform['singer_name'] = singer['name']
                perform['song_mid'] = info['mid']
                perform['song_id'] = info['id']
                perform['song_name'] = info['name']
                item['singers'].append(perform)
                pass
            yield item
            pass
        pass

    def start_requests(self):
        for url in self.start_urls:
            # 从数据库中取出专辑, 从 qq music 中获取专辑中的音乐
            query_sql = """SELECT `qqmusic_album_mid`, `qqmusic_album_id` FROM tb_album LIMIT {}, 10;"""
            offset_tb = 0
            while True:
                res_count = 0
                self.cursor.execute(query_sql.format(offset_tb))
                for album_mid, album_id in self.cursor:
                    res_count += 1
                    args = {"album_mid": album_mid, "album_id": album_id}
                    cur_url = SongSpider.build_url(url, args=args)
                    yield Request(cur_url, callback=self.parse, method="GET", headers=self.headers, dont_filter=True)
                    pass

                if res_count == 0:
                    break

                offset_tb += res_count
                pass
        pass

    @staticmethod
    def build_url(base_url, args):
        """
        -: albumSonglist9256435990691609
        g_tk: 5381
        loginUin: 0
        hostUin: 0
        format: json
        inCharset: utf8
        outCharset: utf-8
        notice: 0
        platform: yqq.json
        needNewCode: 0
        data: {"comm":{"ct":24,"cv":10000},"albumSonglist":{"method":"GetAlbumSongList","param":{"albumMid":"0049MVh824D7bM","albumID":0,"begin":0,"num":10,"order":2},"module":"music.musichallAlbum.AlbumSongList"}}
        """
        url = base_url + "?"

        fixed_options = {
            "-": "albumSonglist9256435990691609",
            "g_tk": 5381,
            "loginUin": 0,
            "hostUin": 0,
            "format": "json",
            "inCharset": "utf8",
            "outCharset": "utf-8",
            "notice": 0,
            "platform": "yqq.json",
            "needNewCode": 0,
            "data": ''  # 由 fixed_data_params 进行 url_decode 得到
        }

        fixed_data_params = {
            "comm": {
                "ct": 24,
                "cv": 10000
            },
            "albumSonglist":{
                  "method":"GetAlbumSongList",
                  "param":{
                     "albumMid": args['album_mid'],
                     "albumID": args['album_id'],
                     "begin":0,
                     "num": -1,  # 获取专辑内全部歌曲
                     "order": 2
                  },
                  "module":"music.musichallAlbum.AlbumSongList"
               }
        }

        fixed_options['data'] = quote(json.dumps(fixed_data_params, separators=(",", ":")))

        for k, v in fixed_options.items():
            url += str(k) + "=" + str(v) + "&"
            pass

        return url
        pass


"""
重启抓取进程：
```shelll
sudo kill -STOP 28581 # 暂停进程
sudo ps -aux | grep scrapy # 查看暂停的进程
sudo kill -CONT 28581  # 继续进程
```
"""
