# -*- coding: utf-8 -*-
import datetime
import os
import platform
import re
import socket
from urllib.parse import urljoin

import requests
import scrapy
from scrapy import Request
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from fake_useragent import UserAgent

from ..items import PropertiesItem


class WyySpider(scrapy.Spider):
    name = 'wyy'
    allowed_domains = ['music.163.com']
    # start_urls = ['https://music.163.com/discover/playlist/?order=hot']
    start_urls = ['https://music.163.com/discover/playlist/?order=hot&cat=全部&limit=35&offset=0']

    HEADERS = {
        'user-agent': UserAgent().random
    }

    def mkdir(self, uri):
        if platform.system() == 'Darwin':
            # Mac
            uri = os.path.join(r'/Users/hoipeng/Music/网易云音乐/', uri)
        else:
            # 群晖
            uri = os.path.join(r'/volume1/music/网易云热门歌单/', uri)

        if not os.path.exists(uri):
            os.makedirs(uri)
        return uri

    def parse_item(self, response):
        # 歌单名称
        song_sheet_name = response.xpath('/html/head/title/text()').extract_first().replace('\xa0', '')
        # 歌单介绍
        song_sheet_desc = response.xpath('//*[contains(@class, "intr f-brk")]//text()').extract()

        uri = self.mkdir(song_sheet_name.replace('/', '\\'))

        song_lists = response.selector.re(r'<a href="/song\?id=(\d+)">(.*?)</a>')

        song_lists = zip(song_lists[::2], song_lists[1::2])
        for i in song_lists:
            download_url = 'http://music.163.com/song/media/outer/url?id={}'.format(i[0])
            try:
                with open(r'{}/{}.mp3'.format(uri, i[1]), 'wb') as f:
                    f.write(requests.get(download_url, headers=self.HEADERS).content)
                print('单曲：{} 下载完成'.format(i[1]))
            except FileNotFoundError:
                pass
            except OSError:
                pass
        print('歌单：{} 下载完成'.format(song_sheet_name))

    def parse(self, response):
        """ This function parses a property page.

            @url http://music.163.com/discover/playlist/?order=hot
            @returns items 1
            @scrapes song_sheet_name song_sheet_urls
            @scrapes url project spider server date
        """
        song_sheet_names = response.xpath('//*[@id="m-pl-container"]/li[*]/p[1]/a/text()').extract()
        song_sheet_urls = response.xpath('//*[@id="m-pl-container"]/li[*]/p[1]/a/@href').extract()
        # l = ItemLoader(item=PropertiesItem(), response=response)
        # l.add_xpath('song_sheet_name', '//*[@id="m-pl-container"]/li[*]/p[1]/a/text()')
        # l.add_xpath('song_sheet_urls', '//*[@id="m-pl-container"]/li[*]/p[1]/a/@href',
        #             MapCompose(lambda i: urljoin(response.url, i)))
        #
        # l.add_value('url', response.url)
        # l.add_value('project', self.settings.get('BOT_NAME'))
        # l.add_value('spider', self.name)
        # l.add_value('server', socket.gethostname())
        # l.add_value('date', datetime.datetime.now())
        for song_sheet_url, song_sheet_name in zip(song_sheet_urls, song_sheet_names):
            print('歌单：{} 开始下载'.format(song_sheet_name))
            yield Request(urljoin(response.url, song_sheet_url), callback=self.parse_item)

        # 获取下一页的链接，并使用yield进行调用
        next_selector = response.xpath('//*[contains(@class,"zbtn znxt")]//@href').extract_first()
        if next_selector is not None and next_selector != 'javascript:void(0)':
            yield Request(urljoin(response.url, next_selector), callback=self.parse)
        else:
            print('全部歌单下载完成')
