# encoding=utf-8
import re
from pprint import pprint
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import PatternFill
from selenium import webdriver
from lxml import etree
import json
import time
import random

class Netease_spider:
    # 初始化数据（需要修改）
    def __init__(self):
        # 无头启动 selenium
        # opt = webdriver.chrome.options.Options()
        # opt.set_headless()
        self.browser = webdriver.Chrome()#chrome_options=opt
        self.originURL = 'https://music.163.com/#/discover/playlist'
        self.data = list()

    # 获取网页源代码（需要修改）
    def get_page(self,url):
        self.browser.get(url)
        self.browser.switch_to.frame('g_iframe')
        html = self.browser.page_source   #整个网页源代码
        return html

    # 解析网页源代码，获取数据
    def parse4data(self,html,mid_class):
        html_elem = etree.HTML(html)
        #播放量
        play_num = html_elem.xpath('//ul[@id="m-pl-container"]/li[1]/div/div/span[@class="nb"]/text()')
        #歌单名称
        song_title = html_elem.xpath('//ul[@id="m-pl-container"]/li[1]/p[1]/a/@title')
        #歌单链接
        song_href = html_elem.xpath('//ul[@id="m-pl-container"]/li/p[1]/a/@href')
        song_link = ['https://music.163.com/#'+item for item in song_href]   #1111111111111111111111突破点
        #歌单详情
        for li in song_link[:1]:
            self.browser.get(li)
            self.browser.switch_to_frame('g_iframe')
            html_gedan = self.browser.page_source
            html_gedan_li = etree.HTML(html_gedan)
            #歌曲链接
            song= html_gedan_li.xpath('//div[@class="j-flag"]/table/tbody/tr//span[@class="txt"]/a/@href')
            song_link_list = ['https://music.163.com'+i for i in song]
            #歌名
            song_name = html_gedan_li.xpath('//div[@class="j-flag"]/table/tbody/tr//span[@class="txt"]/a/b/@title')
            #歌手
            songer = html_gedan_li.xpath('//div[@class="j-flag"]/table/tbody/tr/td[4]/div/@title')
            #专辑
            album = html_gedan_li.xpath('//div[@class="j-flag"]/table/tbody/tr/td[5]/div/a/@title')
            data1 = list(map(lambda a,b,c,d:{'歌曲链接':a,'歌名':b,'歌手':c,'专辑':d},song_link_list,song_name,songer,album))

        data = list(map(lambda a,b:{'播放量':a,'歌单名称':b},play_num,song_title))
        for i in data1:
            i.update(mid_class)
            for j in data:
                i.update(j)

        return data1

    # 解析网页源代码，获取下一页链接
    def parse4link(self,html):
        html_elem = etree.HTML(html)
        href = html_elem.xpath('//div[@id="m-pl-pager"]/div[@class="u-page"]/a[@class="zbtn znxt"]/@href')
        if bool(re.search(r'/discover/playlist/\?order=hot&cat=.*&limit=35&offset=35',str(href))):#固定爬取第一页  每一页 not href
            return None
        else:
            return 'https://music.163.com/#' + href[0]

    # 开始爬取网页
    def crawl(self):
        # 爬取数据
        print('爬取数据')
        self.browser.get(self.originURL)
        self.browser.switch_to_frame("g_iframe")
        html = self.browser.page_source
        html_elem = etree.HTML(html)
        #大分类
        link = html_elem.xpath('//div[@class="bd"]/dl')
        for li in link: #遍历五个大分类（语种，风格，场景，情感，主题）

            # 爬取大分类和中间分类
            big_class = {}
            big_class["大分类"] =  li.xpath("./dt/text()")#大分类

            #遍历中间分类 爬取歌单
            a = li.xpath('./dd/a')
            for i in a:#遍历每个中间分类  爬取里面的歌单
                #爬取中间分类
                mid_class_li = {}
                mid_class_li["中间分类"] = i.xpath("./text()")
                mid_class_li.update(big_class)

                href_li = i.xpath("./@href[1]")
                for j in href_li:
                    b = "https://music.163.com" + j
                    html = self.get_page(b)
                    data = self.parse4data(html,mid_class_li)
                    self.data.extend(data)
                    link = self.parse4link(html)
                    while(link):#下一页
                        html = self.get_page(link)
                        data = self.parse4data(html)
                        self.data.extend(data)
                        link = self.parse4link(html)
                        time.sleep(random.random())
        # 处理数据，按播放量进行排序
        print('处理数据')
        data_after_sort = sorted(self.data,key=lambda item:int(item['播放量'].replace('万','0000')),reverse=True)
        self.excel(data_after_sort)
        # 写入文件
        print('写入文件')
        with open('netease.json','w',encoding='utf-8') as f:
            for item in data_after_sort:
                json.dump(item,f,ensure_ascii=False)

    #存入表格
    def excel(self,data):
        print("写入excel")
        wb = Workbook()
        ws = wb.active
        ws.title = "网易云歌单详情"
        ws1 = wb.create_sheet("歌单数据分析")
        fill = PatternFill("solid", fgColor="DDDDDD")
        ws.freeze_panes = 'A2'#冻结窗体  使第一行不随滚动被隐藏
        ws["A1"] = "歌曲链接"
        ws["A1"].fill = fill
        ws.column_dimensions['A'].width = 41#控制列宽   行高：ws.row_dimensions[1].height = 41
        ws["B1"] = "歌名"
        ws["B1"].fill = fill
        ws.column_dimensions['B'].width = 50
        ws["C1"] = "歌手"
        ws["C1"].fill = fill
        ws.column_dimensions['C'].width = 9
        ws["D1"] = "专辑"
        ws["D1"].fill = fill
        ws.column_dimensions['D'].width = 31
        ws["E1"] = "歌单播放量"
        ws["E1"].fill = fill
        ws.column_dimensions['E'].width = 11
        ws["F1"] = "歌单名称"
        ws["F1"].fill = fill
        ws.column_dimensions['F'].width = 35
        ws["G1"] = "中间分类"
        ws["G1"].fill = fill
        ws.column_dimensions['G'].width = 12
        ws["H1"] = "大分类"
        ws["H1"].fill = fill
        ws.column_dimensions['H'].width = 9

        a = []
        b = []
        ws1.column_dimensions['A'].width = 75
        ws1.row_dimensions[1].height = 30
        ws1.row_dimensions[2].height = 30
        ws1.row_dimensions[3].height = 30
        ws1["A1"] = "总共爬取网易云歌曲%d首"%len(data)
        for li in data:
            ws.append(
                [
                    str(li["歌曲链接"]),
                    str(li["歌名"]),
                    str(li["歌手"]),
                    str(li["专辑"]),
                    str(li["播放量"]),
                    str(li["歌单名称"]),
                    str(li["中间分类"]),
                    str(li["大分类"])
                ]
            )
            b.append(li["歌单名称"])
            a.append(li["播放量"])

        rows = []
        for row in ws.iter_rows():
            rows.append(row)
        ws1["A3"] = "播放量最高的歌单为:%s  播放量：%s"%(rows[1][5].value,rows[1][4].value)
        cols = []#播放量
        cols2 = []#歌单
        for i in a:
            if i not in cols:
                cols.append(i)
        for i in b:
            if i not in cols2:
                cols2.append(i)
        ws1["A2"] = "总共爬取了网易云%s个歌单"%len(cols2)
        wb.save("网易云歌单.xlsx")


if __name__ == '__main__':
    spider = Netease_spider()
    spider.crawl()
    print('Finished')