'''
陈超依   19377189
现代程序设计 第十二次作业
爬取网易云歌单
'''
import csv
import random
import threading
import time
import urllib.request
from queue import Queue
from threading import Thread
from bs4 import BeautifulSoup
from selenium import webdriver

#--------------------------第一部分 获取所有歌单的id-----------------------------


def chinese_to_utf8(chinese : str):
    #输入中文，将其转化为utf-8编码的字符串并返回
    data = str(str.encode(chinese, 'utf-8'))
    data_list = data.split('\\x')[1:]
    for i in range(len(data_list)):
        data_list[i] = data_list[i].upper()
    result = ('%' + '%'.join(data_list))[:-1]
    return result


def get_id_from_url(url : str, page_number : int, queue_of_id : Queue):
    #输入网易云歌单的url链接，提取出其中的歌单id
    #以元组 (歌单数字序号, 歌单名称，歌单id) 的形式输出到指定队列中

    #将url转化为html代码
    driver = webdriver.Chrome()
    driver.get(url)
    driver.switch_to.frame('g_iframe')
    html = driver.page_source

    #从html代码中提取歌单信息
    bs = BeautifulSoup(html, 'html.parser')
    Lv1_body = bs.body
    Lv2_div = Lv1_body.find('div', {'id' : 'm-disc-pl-c'})
    Lv3_div = Lv2_div.find('div')
    Lv4_ul = Lv3_div.find('ul')
    Lv5_li = Lv4_ul.find_all('li')
    order = page_number*35
    for li in Lv5_li:
        Lv6_p = li.find_all('p')[0]
        Lv7_a = Lv6_p.find('a')
        title = Lv7_a.get('title')
        Lv8_href = Lv7_a.get('href')
        songlist_id = Lv8_href.split('id=')[-1]
        order += 1
        queue_of_id.put( (order, title, songlist_id) )


def id_producer(chinese_classification : str, start_page_number : int,
queue_of_id : Queue, end_page_number = 38):
    #输入中文分类，得到网易云在该中文分类下的所有歌单id，存入result_queue中
    first_link = 'https://music.163.com/#/discover/playlist/?order=hot&cat='\
    + chinese_to_utf8(chinese_classification)+'&limit=35&offset='
    for i in range(start_page_number, end_page_number):
        get_id_from_url(first_link+str(i), i, queue_of_id)
        time.sleep(random.uniform(1,20))
    

#-------------------------第二部分 依据id获取歌单信息----------------------------


def get_cover_from_bs(bs : BeautifulSoup, out_path : str,\
order : str, title : str):
    #输入网页BS，爬取歌单封面并保存到指定路径
    Lv1_body = bs.body
    Lv2_div = Lv1_body.find('div', {'id' : 'm-playlist'})
    Lv3_div = Lv2_div.find('div', {'class' : 'g-mn4'})
    Lv4_div = Lv3_div.find('div', {'class' : 'g-mn4c'})
    Lv5_div = Lv4_div.find('div', {'class' : 'g-wrap6'})
    Lv6_div = Lv5_div.find('div', {'class' : 'm-info f-cb'})
    Lv7_div = Lv6_div.find('div', {'class' : 'cover u-cover u-cover-dj'})
    Lv8_img = Lv7_div.find('img')
    img_url = Lv8_img.get('src')
    urllib.request.urlretrieve(img_url,filename=out_path + \
    '/{}_{}.jpg'.format(order, title.split('/')[0].split('|')[0]))
    #↑此处使用split除去'/'和'|'是因为部分歌单名中含有该字符导致路径读取错误


def get_data_from_bs(bs : BeautifulSoup, order : str, out_csv_path : str):
    #输入网页BS，爬取歌单各项信息并保存到指定路径

    #获取网页信息
    Lv1_body = bs.body
    Lv2_div = Lv1_body.find('div', {'id' : 'm-playlist'})
    Lv3_div = Lv2_div.find('div', {'class' : 'g-mn4'})
    Lv4_div = Lv3_div.find('div', {'class' : 'g-mn4c'})
    Lv5_div = Lv4_div.find('div', {'class' : 'g-wrap6'})
    Lv6_div = Lv5_div.find('div', {'class' : 'm-info f-cb'})

    #创建列表储存爬取结果
    result = [order]

    #爬取标题
    title_Lv1_div = Lv6_div.find('div', {'class' : 'cnt'})
    title_Lv2_div = title_Lv1_div.find('div', {'class' : 'cntc'})
    title_Lv3_div = title_Lv2_div.find('div', {'class' : 'hd f-cb'})
    title_Lv4_div = title_Lv3_div.find('div', {'class' : 'tit'})
    title_Lv5_h2 = title_Lv4_div.find('h2')
    result.append(title_Lv5_h2.string)

    #爬取作者名称及id
    author_Lv1_div = title_Lv2_div
    author_Lv2_div = author_Lv1_div.find('div', {'class' : 'user f-cb'})
    author_Lv3_span = author_Lv2_div.find('span', {'class' : 'name'})
    author_Lv4_a = author_Lv3_span.find('a')
    result.append(author_Lv4_a.get('href').split('id=')[-1])
    result.append(author_Lv4_a.string)

    #爬取创建时间
    time_Lv1_div = author_Lv2_div
    time_Lv2_span = time_Lv1_div.find('span', {'class' : 'time s-fc4'})
    result.append(time_Lv2_span.string.split('\xa0')[0])

    #爬取添加到播放列表次数
    addnum_Lv1_div = title_Lv2_div
    addnum_Lv2_div = addnum_Lv1_div.find('div', {'class' : 'btns f-cb'})
    addnum_Lv3_a = addnum_Lv2_div.find('a', {'class':'u-btni u-btni-fav'})
    result.append(addnum_Lv3_a.get('data-count'))
    
    #爬取分享次数
    sharenum_Lv1_div = addnum_Lv2_div
    sharenum_Lv2_a = sharenum_Lv1_div.find('a',{'class':'u-btni u-btni-share'})
    result.append(sharenum_Lv2_a.get('data-count'))

    #爬取评论数
    cmmt_Lv1_div = addnum_Lv2_div
    cmmt_Lv2_a = cmmt_Lv1_div.find('a', {'class' : 'u-btni u-btni-cmmt'})
    cmmt_Lv3_i = cmmt_Lv2_a.find('i')
    cmmt_Lv4_span = cmmt_Lv3_i.find('span')
    result.append(cmmt_Lv4_span.string)

    #爬取歌曲数
    songnum_Lv1_div = Lv5_div
    songnum_Lv2_div = songnum_Lv1_div.find('div', {'class' : 'n-songtb'})
    songnum_Lv3_div = songnum_Lv2_div.find\
    ('div', {'class' : 'u-title u-title-1 f-cb'})
    songnum_Lv4_span = songnum_Lv3_div.find('span', {'class' : 'sub s-fc3'})
    songnum_Lv5_span = songnum_Lv4_span.find('span')
    result.append(songnum_Lv5_span.string)

    #爬取播放量
    playnum_Lv1_div = songnum_Lv3_div
    playnum_Lv2_div = playnum_Lv1_div.find('div', {'class' : 'more s-fc3'})
    playnum_Lv3_strong = playnum_Lv2_div.find('strong')
    result.append(playnum_Lv3_strong.string)

    #爬取介绍
    intro_Lv1_div = title_Lv2_div
    intro_Lv2_p = intro_Lv1_div.find('p', {'id' : 'album-desc-more'})
    intro = (str(intro_Lv2_p).split('</b>')[-1]).split('</p>')[0]
    intro = intro.replace('<br/>', '')
    result.append(intro)

    #将结果列表输出到路径中
    with open(out_csv_path, 'a', newline='',errors='ignore') as f:
        writer = csv.writer(f)
        writer.writerow(result)


def id_customer(queue_of_id : Queue, first_out_path : str, out_csv_path : str):
    #从id队列中取出一个id信息，获取该id对于的歌单信息，输出到指定路径中
    #获取输入元组的信息
    data = queue_of_id.get()
    if data is None:
        time.sleep(20)
        data = queue_of_id.get()
        if data is None:
            raise TimeoutError
    songlist_order = data[0]
    songlist_title = data[1]
    songlist_id = data[2]

    #爬取内容并将内容存入输出路径
    link = 'https://music.163.com/#/playlist?id=' + songlist_id
    driver = webdriver.Chrome()
    driver.get(link)
    driver.switch_to.frame('g_iframe')
    html = driver.page_source
    bs = BeautifulSoup(html, 'html.parser')
    get_cover_from_bs(bs, first_out_path, songlist_order, songlist_title)
    get_data_from_bs(bs, songlist_order, out_csv_path)

    #输出状态
    print("第%d个歌单已爬取完成\n"%songlist_order)
    queue_of_id.task_done()


#------------------------------------主函数-------------------------------------


if __name__=='__main__':
    #参数设置区
    start_page_num = 15 #起始页码，从0开始
    end_page_num = 38  #终止页码，最多为38
    chinese_classification = '说唱'  #歌单分类
    out_path = 'E:/Py_Programs/week12/output'  #封面图片储存路径，需为文件夹
    out_csv_path='E:/Py_Programs/week12/data.csv' #歌单信息储存路径，需为csv文件
    max_stop_time = 6  #最大随机暂停时间
    #在1至设置值之间取随机秒数间隔启动进程，以粗略控制并发进程数，并防止被网站屏蔽
    max_threading_num = 15  #最大线程数
    first_or_continue = 0  #0为第一次启动，1为非第一次启动

    #程序执行
    if (first_or_continue == 0):
        with open(out_csv_path, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(['序号','标题','作者id','作者名称','创建时间',\
    '添加到播放列表次数','分享次数','评论数','歌曲数','播放量','介绍'])
    global id_q
    id_q = Queue()
    producer = Thread(target=id_producer, args=(chinese_classification,\
    start_page_num,id_q, end_page_num) )
    producer.start()
    list_of_thread = []
    for i in range( (end_page_num-start_page_num)*35 ):
        customer=Thread(target=id_customer,args=(id_q, out_path, out_csv_path))
        list_of_thread.append(customer)
        time.sleep(random.uniform(1,max_stop_time))
        while(len(threading.enumerate()) > max_threading_num):
            time.sleep(max_stop_time/2)
        customer.start()
    producer.join()
    for t in list_of_thread:
        t.join()