'''
陈超依   19377189
现代程序设计 第十四二次作业
协程改写爬取网易云歌单
'''
import asyncio
import csv
from os import read
from bs4 import BeautifulSoup
from bs4.element import ResultSet
from selenium import webdriver

#第一部分 改写爬虫程序，使用协程实现--------------------------------------------
#输入中文歌单名，将歌单内信息爬取到指定文件中

async def get_data_from_id(order : int, id : str, out_path : str):
    #输入歌单id，爬取歌单各项信息并保存到指定路径
    link = 'https://music.163.com/#/playlist?id=' + id
    driver = webdriver.Chrome()
    driver.get(link)
    driver.switch_to.frame('g_iframe')
    html = driver.page_source
    bs = BeautifulSoup(html, 'html.parser')

    #获取网页信息
    Lv1_body = bs.body
    Lv2_div = Lv1_body.find('div', {'id' : 'm-playlist'})
    Lv3_div = Lv2_div.find('div', {'class' : 'g-mn4'})
    Lv4_div = Lv3_div.find('div', {'class' : 'g-mn4c'})
    Lv5_div = Lv4_div.find('div', {'class' : 'g-wrap6'})
    Lv6_div = Lv5_div.find('div', {'class' : 'm-info f-cb'})

    #创建列表储存爬取结果
    result = [order, id]

    #爬取标题
    title_Lv1_div = Lv6_div.find('div', {'class' : 'cnt'})
    title_Lv2_div = title_Lv1_div.find('div', {'class' : 'cntc'})
    title_Lv3_div = title_Lv2_div.find('div', {'class' : 'hd f-cb'})
    title_Lv4_div = title_Lv3_div.find('div', {'class' : 'tit'})
    title_Lv5_h2 = title_Lv4_div.find('h2')
    result.append(title_Lv5_h2.string)

    #爬取作者名称及id
    author_Lv1_div = title_Lv2_div
    author_Lv2_div = author_Lv1_div.find('div', {'class' : 'user f-cb'})
    author_Lv3_span = author_Lv2_div.find('span', {'class' : 'name'})
    author_Lv4_a = author_Lv3_span.find('a')
    result.append(author_Lv4_a.get('href').split('id=')[-1])
    result.append(author_Lv4_a.string)

    #爬取创建时间
    time_Lv1_div = author_Lv2_div
    time_Lv2_span = time_Lv1_div.find('span', {'class' : 'time s-fc4'})
    result.append(time_Lv2_span.string.split('\xa0')[0])

    #爬取添加到播放列表次数
    addnum_Lv1_div = title_Lv2_div
    addnum_Lv2_div = addnum_Lv1_div.find('div', {'class' : 'btns f-cb'})
    addnum_Lv3_a = addnum_Lv2_div.find('a', {'class':'u-btni u-btni-fav'})
    result.append(addnum_Lv3_a.get('data-count'))
    
    #爬取分享次数
    sharenum_Lv1_div = addnum_Lv2_div
    sharenum_Lv2_a = sharenum_Lv1_div.find('a',{'class':'u-btni u-btni-share'})
    result.append(sharenum_Lv2_a.get('data-count'))

    #爬取评论数
    cmmt_Lv1_div = addnum_Lv2_div
    cmmt_Lv2_a = cmmt_Lv1_div.find('a', {'class' : 'u-btni u-btni-cmmt'})
    cmmt_Lv3_i = cmmt_Lv2_a.find('i')
    cmmt_Lv4_span = cmmt_Lv3_i.find('span')
    result.append(cmmt_Lv4_span.string)

    #爬取歌曲数
    songnum_Lv1_div = Lv5_div
    songnum_Lv2_div = songnum_Lv1_div.find('div', {'class' : 'n-songtb'})
    songnum_Lv3_div = songnum_Lv2_div.find\
    ('div', {'class' : 'u-title u-title-1 f-cb'})
    songnum_Lv4_span = songnum_Lv3_div.find('span', {'class' : 'sub s-fc3'})
    songnum_Lv5_span = songnum_Lv4_span.find('span')
    result.append(songnum_Lv5_span.string)

    #爬取播放量
    playnum_Lv1_div = songnum_Lv3_div
    playnum_Lv2_div = playnum_Lv1_div.find('div', {'class' : 'more s-fc3'})
    playnum_Lv3_strong = playnum_Lv2_div.find('strong')
    result.append(playnum_Lv3_strong.string)

    #爬取介绍
    intro_Lv1_div = title_Lv2_div
    intro_Lv2_p = intro_Lv1_div.find('p', {'id' : 'album-desc-more'})
    intro = (str(intro_Lv2_p).split('</b>')[-1]).split('</p>')[0]
    intro = intro.replace('<br/>', '')
    result.append(intro)

    #将结果列表输出到路径中
    with open(out_path, 'a', newline='',errors='ignore') as f:
        writer = csv.writer(f)
        writer.writerow(result)


async def data_producer(classification : str, out_path : str, start_page : int,
    end_page : int):
    #输入中文分类名，获取该分类下的歌单信息，写入out_path中
    data = str(str.encode(classification, 'utf-8'))
    data_list = data.split('\\x')[1:]
    for i in range(len(data_list)):
        data_list[i] = data_list[i].upper()
    result = ('%' + '%'.join(data_list))[:-1]
    first_link = 'https://music.163.com/#/discover/playlist/?order=hot&cat='\
    + result+'&limit=35&offset='
    for i in range(start_page, end_page):
        #将url转化为html代码
        url = first_link + str(i)
        driver = webdriver.Chrome()
        driver.get(url)
        driver.switch_to.frame('g_iframe')
        html = driver.page_source

        #从html代码中提取歌单信息
        bs = BeautifulSoup(html, 'html.parser')
        Lv1_body = bs.body
        Lv2_div = Lv1_body.find('div', {'id' : 'm-disc-pl-c'})
        Lv3_div = Lv2_div.find('div')
        Lv4_ul = Lv3_div.find('ul')
        Lv5_li = Lv4_ul.find_all('li')
        order = i*35
        list_of_task = []
        for li in Lv5_li:
            Lv6_p = li.find_all('p')[0]
            Lv7_a = Lv6_p.find('a')
            Lv8_href = Lv7_a.get('href')
            songlist_id = Lv8_href.split('id=')[-1]
            order += 1
            print(f'\n{order} {songlist_id} {out_path}\n')
            list_of_task.append(asyncio.create_task(get_data_from_id(order, songlist_id, out_path)))
        for task in list_of_task:
            await task


#第二部分 对获取信息进行简单分析------------------------------------------------
def analyse_data(path : str, pass_line : int):
    with open(path, 'r') as f:
        reader = csv.reader(f)
        next(reader)
        line = next(reader)
        while line:
            count = int(line[7])
            if (count < pass_line):
                line = next(reader)
                continue
            else:
                ID = line[1]
                yield ID
                line = next(reader)


#主函数-------------------------------------------------------------------------
if __name__=='__main__':


    #第一问程序
    '''
    #参数设置区
    start_page = 0 #起始页码，从0开始
    end_page = 1  #终止页码，最多为38
    chinese_classification = '说唱'  #歌单分类
    out_csv_path='E:/Py_Programs/week14/data.csv' #歌单信息储存路径，需为csv文件

    #程序执行
    with open(out_csv_path, 'w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['序号','歌单id','标题','作者id','作者名称',\
        '创建时间','添加到播放列表次数','分享次数','评论数','歌曲数',\
        '播放量','介绍'])
    asyncio.run(data_producer(chinese_classification, out_csv_path, start_page, end_page))
    '''


    #第二问程序
    file = 'E:/Py_Programs/week14/data.csv'
    a = analyse_data(file, 100)
    result = []
    try:
        while True:
            result.append(a.send(None))
    except:
        print(result)