import requests
from lxml import etree
from pprint import pprint
import json
class WangYi_Spider:
    def __init__(self):
        self.start_url = "http://music.163.com/discover/playlist/"
        self.part_url = "http://music.163.com"
        self.headers = {
            "User-Agent":
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
            # "Referer": "http://music.163.com/",
            #"Cookie": "_iuqxldmzr_=32; _ntes_nnid=dcf16d8378da0e4d655514671c28ec02,1520055516865; _ntes_nuid=dcf16d8378da0e4d655514671c28ec02; __utmc=94650624; __f_=1520586361500; __utmz=94650624.1521353557.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; playerid=62719679; abt=25; JSESSIONID-WYYY=iwG%2B3NMSKACJkINpNyBBN05oU3Ns%2FGOGWQ6d5%2B64sCipnt40C%5CSDEGNM%5C5KwPfysCmTnFa%2FGcv8Tm%2B6ObkzytX%2Fubq1e7DfpTfk89CgwylJtwxg91CiSkPyJQcH0sT9dZCJjrucDzmvIisBXaNe7540TTpveoKz2kPizVCrwM54O5grh%3A1521422966094; __utma=94650624.533871865.1520055518.1521353557.1521421167.4; __utmb=94650624.8.10.1521421167"
        }

    def parse_url(self, url):
        response = requests.get(url, headers=self.headers)
        return response.content.decode()

    def get_detail_content_dict(self, html_str):
        html_element = etree.HTML(html_str)

        #with open("wangyi_detail.html", "w", encoding="utf-8") as f:
        #    f.write(etree.tounicode(html_element))
        content_dict = {}

        content_dict["author"] = html_element.xpath('//div[@class="user f-cb"]/span[@class="name"]/a/text()')[0] \
                if len(html_element.xpath('//div[@class="user f-cb"]/span[@class="name"]/a/text()')) >0 else None
        content_dict["tag"] = html_element.xpath("//div[@class='tags f-cb']/a[@class='u-tag']/i/text()")
        content_dict["desc"] = html_element.xpath("//p[@id='album-desc-more']/text()")

        li_list = html_element.xpath("//div[@id='song-list-pre-cache']/ul/li")
        song_list = []
        for li in li_list:
            song_item = {}
            song_item["title"] = li.xpath("./a/text()")[0] if len(li.xpath("./a/text()"))>0 else None
            song_item["url"] = self.part_url + li.xpath("./a/@href")[0] if len(li.xpath("./a/@href"))>0 else None
            song_list.append(song_item)

        content_dict["song_list"] = song_list

        return content_dict

    def get_content_list(self, html_str):
        html_element = etree.HTML(html_str)
        # print(etree.tounicode(html_element))
        li_list = html_element.xpath("//ul[@id='m-pl-container']/li")
        content_list = []
        for li in li_list:
            item = {}
            item["img_src"] = li.xpath("./div/img[@class='j-flag']/@src")[0] if len(li.xpath("./div/img[@class='j-flag']/@src"))>0 else None
            if item["img_src"]:
                # http://p1.music.126.net/ynYE4b910KmOzoqgD2NAFA==/18509178744178182.jpg?param=140y140
                item["img_src"] = item["img_src"].split("?")[0]
            item["title"] = li.xpath("./p[@class='dec']/a/@title")[0] if len(li.xpath("./p[@class='dec']/a/@title"))>0 else None
            item["href"] = (self.part_url + li.xpath("./p[@class='dec']/a/@href")[0]) if len(li.xpath("./p[@class='dec']/a/@href"))>0 else None

            if item["href"]:
                # 重新发送请求获取详情页
                detail_content_str = self.parse_url(item["href"])
                print(item["href"])
                # 获取详情页的内容集合
                detail_dict = self.get_detail_content_dict(detail_content_str)
                item.update(detail_dict)


            content_list.append(item)
        next_url = self.part_url + html_element.xpath("//a[@class='zbtn znxt']/@href")[0] if len(html_element.xpath("//a[@class='zbtn znxt']/@href")) >0 else None
        pprint(content_list)
        return content_list,next_url

    def save_content(self, content_list):
        for gedan in content_list:
            filename = "gedan" + gedan["href"].split("id=")[-1] + ".json"
            with open(filename, "w", encoding="utf-8") as f:
                f.write(json.dumps(gedan, ensure_ascii=False, indent=2))
        print("保存成功")



    def run(self):
        #  构造开始url
        next_url = self.start_url
        while next_url:
            #  发送请求
            html_str = self.parse_url(next_url)

            #  提取数据
            content_list, next_url = self.get_content_list(html_str)
            # print(content_list)
            #  保存
            self.save_content(content_list)




#<a href="/discover/playlist/?order=hot&cat=%E5%85%A8%E9%83%A8&limit=35&offset=35" class="zbtn znxt">下一页</a>
#<a href="javascript:void(0)" class="zbtn znxt js-disabled">下一页</a>

if __name__ == '__main__':
    wangyi = WangYi_Spider()
    wangyi.run()