# -*- coding: utf-8 -*-
import json
import logging
import re
from urllib import request

import scrapy
from scrapy_redis.spiders import RedisSpider
from Netease import gl
from Netease.items import UserItem, RecordItem, SongItem
from Netease.util.AESUtil import PrpCrypt
from Netease.util.utils import utils


class NeteaseUserSpider(scrapy.Spider):

    name = 'UserSpider'


    # 用户首页
    user_home = 'https://music.163.com/user/home?id={id}'
    #用户的粉丝
    user_followeds_url = 'https://music.163.com/weapi/user/getfolloweds?'
    user_followeds_base = '{{"userId":"{userid}","offset":"{offset}","total":"true","limit":"20"}}'

    #用户听歌排行
    user_record_url = 'https://music.163.com/weapi/v1/play/record?'
    user_record_base = '{{"uid":"{uid}","type":"-1","limit":"1000","offset":"0","total":"true"}}'

    # 歌曲详情
    song_url = 'https://music.163.com/song?id={id}'


    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)
        # self.logger=logging.getLogger(__name__)

    def start_requests(self):
        pass
        initid=317556268
        p=self.user_followeds_base.format(userid=initid, offset=0)
        params = PrpCrypt.get_encrypt_params(p)

        # yield scrapy.FormRequest(
        #     url=self.user_followeds_url,
        #     formdata={"params": params, "encSecKey": PrpCrypt.encSecKey},
        #     callback=self.parse_followeds,
        #     meta={'download_timeout': 10,  "params": params, 'userid': initid, 'offset': 0},
        #     dont_filter=False
        # )
        yield scrapy.Request(url=self.song_url.format(id='30352891'),
                             callback=self.parse_song,
                             meta={'download_timeout': 10,'id':30352891},
                             dont_filter=False)

    def parse_userinfo(self, response):
        # 用户id
        id = response.meta['userId']
        # 用户等级
        level = int(response.xpath('//*[@id="j-name-wrap"]/span[3]/text()').extract_first())
        # 用户累计听歌数量
        count_string = response.xpath('//*[@id="rHeader"]/h4/text()').extract_first()
        count = -1
        if count_string:
            count = int(re.sub("\D", "", count_string))
        # 用户所在地区
        area_ = response.xpath('//*[@id="head-box"]/dd/div[contains(@class, "inf s-fc3")]/span[1]/text()').extract_first()
        area = re.sub("所在地区：", "", area_)
        # 粉丝数 如果为0就不抓ta的粉丝
        fans = int(response.xpath('//*[@id="fan_count"]/text()').extract_first())
        # 关注数 如果为0就不抓取ta的关注
        follows = int(response.xpath('//*[@id="follow_count"]/text()').extract_first())

        # 获取用户的听歌排行
        p = self.user_record_base.format(uid=id)
        params = PrpCrypt.get_encrypt_params(p)
        yield scrapy.FormRequest(
            url=self.user_record_url,
            formdata={"params": params, "encSecKey": PrpCrypt.encSecKey},
            callback=self.parse_record,
            meta={'download_timeout': 10,'userId': id, "params": params},
            dont_filter=False
        )

        # 对粉丝进行抓取
        # if fans > 0:
        #     p = self.get_followeds_base.format(userid=id, offset=0)
        #     params = PrpCrypt.get_encrypt_params(p)
        #     yield scrapy.FormRequest(
        #         url=self.user_followeds_url,
        #         formdata={"params": params,"encSecKey": PrpCrypt.encSecKey},
        #         callback=self.parse_followeds,
        #         meta={'download_timeout': 10,"params": params, 'userid': id, 'offset': 0 ,'level':level ,'count':count, 'area': area},
        #         dont_filter=False
        #     )
        # 对关注的人进行抓取
        if follows > 0 :
            pass
        # 将此用户入库
        item = UserItem()
        for field in item.fields:
            if field in response.meta.keys():
                item[field] = response.meta.get(field)
        item['level'] = level
        item['totalCount'] = count
        item['area'] = area
        yield item

    # 获取歌曲信息
    def parse_song(self,response):
        songId = int(response.meta['id'])
        songname = response.xpath('//body[1]/div[3]/div[1]/div/div/div[1]/div[1]/div[2]/div[1]/div/em/text()').extract_first()
        # 歌曲艺术家
        artists = []
        a = response.xpath('//body[1]/div[3]/div[1]/div/div/div[1]/div[1]/div[2]/p[1]/span/a').extract()
        for artist in a:
            searchObj=(re.search(r'.*id=(\d*)">(.*)<', artist, re.M | re.I))
            id = searchObj.group(1)
            name = searchObj.group(2)
            artists.append({'artistId': id, 'artist': name})
        # 歌曲所属专辑
        alb = response.xpath('//body[1]/div[3]/div[1]/div/div/div[1]/div[1]/div[2]/p[2]/a').extract_first()
        searchObj = (re.search(r'..*id=(\d*)".*>(.*)<', alb, re.M | re.I))
        albumId = int(searchObj.group(1))
        albumname = searchObj.group(2)
        comments = int(response.xpath('//body[1]/div[1]/span/span/text()').extract_first())
        # MV id
        mvId = 0
        mv = response.xpath('//body[1]/div[3]/div[1]/div/div/div[1]/div[1]/div[2]/div[1]/div/a/@href').extract_first()
        if mv:
            mvId = int((re.search(r'.*id=(\d*)', mv, re.M | re.I)).group(1))
        item = SongItem()
        item['songId'] = songId
        item['songname'] = songname
        item['artists'] = artists
        item['albumId'] = albumId
        item['albumname'] = albumname
        item['comments'] = comments
        item['mvId'] = mvId
        yield item

    def parse_record(self,response):
        result = json.loads(response.text)
        if result.get('code') != 200:
            return
        userId = response.meta['userId']
        # 总排行榜
        allData = []
        all = result.get('allData')
        for data in all:
            score = data.get('score')
            song = data.get('song')
            # 歌曲信息
            songId = song.get('id')
            songname = song.get('name')
            # 专辑
            al = song.get('al')
            albumId = al.get('id')
            albumname = al.get('name')
            # 艺术家（可能有多位
            artists = []
            for art in song.get('ar'):
                artist = art.get('name')
                artistId = art.get('id')
                one = {'artistId': artistId,'artist': artist}
                artists.append(one)
            # 拼凑字典
            one = {'songId': songId,'songname': songname,'score': score,'artists': artists,'albumId': albumId,'albumname': albumname}
            allData.append(one)
        # 周排行榜
        weekData = []
        week = result.get('weekData')
        for data in week:
            score = data.get('score')
            song = data.get('song')
            # 歌曲信息
            songId = song.get('id')
            songname = song.get('name')
            # 专辑
            al = song.get('al')
            albumId = al.get('id')
            albumname = al.get('name')
            # 艺术家（可能有多位
            artists = []
            for art in song.get('ar'):
                artist = art.get('name')
                artistId = art.get('id')
                one = {'artistId': artistId,'artist': artist}
                artists.append(one)
            # 拼凑字典
            one = {'songId': songId,'songname': songname,'score': score,'artists': artists,'albumId': albumId,'albumname': albumname}
            weekData.append(one)
        top = {'allData': allData,'weekData': weekData}
        json_str = json.dumps(top)
        item = RecordItem()
        item['userId'] = userId
        item['top'] = json_str
        yield item

    def parse_followeds(self, response):

        if utils.is_json(response.text)==False:
            return
        result = json.loads(response.text)

        # 如果有更多数据
        if result.get("more"):
            # 构造表单参数
            id = response.meta['userid']
            offset = response.meta['offset'] + 100
            p=self.user_followeds_base.format(userid=id, offset=offset)
            # 加密表单参数
            params = PrpCrypt.get_encrypt_params(p)
            yield scrapy.FormRequest(
                url=self.user_followeds_url,
                formdata={"params": params, "encSecKey": PrpCrypt.encSecKey},
                callback=self.parse_followeds,
                meta={'download_timeout': 10, "params": params, 'userid': id, 'offset': offset},
                dont_filter=False
            )
        else:
            print('用户',response.meta['userid'],"的粉丝抓取完毕 more:",result.get("more"))

        followeds = result.get("followeds")
        for f in followeds:
            yield scrapy.Request(url=self.user_home.format(id=f.get('userId')),
                                 callback=self.parse_userinfo,
                                 meta=f,
                                 dont_filter=False)
