# from urllib.request import urlopen
# from http.client import HTTPResponse
#
# x: HTTPResponse = urlopen('https://www.baidu.com')
# with x:
#     print(type(x))
#     print(x.read())  # 以二进制读
#     print(x.headers)
#     print(x.geturl())
#
#
#
# print(x.isclosed())
# print(x.closed)


import requests
import json
import time
import random
import pandas as pd
from bs4 import BeautifulSoup
from fake_useragent import UserAgent


class KugouMusicCrawler:
    def __init__(self):
        self.ua = UserAgent()
        self.headers = {
            'User-Agent': self.ua.random,
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Referer': 'https://www.kugou.com/yy/rank/home/1-8888.html',
            'Origin': 'https://www.kugou.com',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-site',
            'Connection': 'keep-alive',
        }
        self.top100_url = 'https://www.kugou.com/yy/rank/home/{}-8888.html'
        self.song_info_url = 'https://wwwapi.kugou.com/yy/index.php'
        self.songs = []

    def get_rank_pages(self):
        """获取酷狗TOP100的所有页面"""
        pages = []
        # 酷狗TOP100分为多个页面，每页20首歌
        for page in range(1, 6):
            pages.append(self.top100_url.format(page))
        return pages

    def parse_rank_page(self, url):
        """解析排行榜页面，获取歌曲信息"""
        try:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找歌曲列表
            song_list = soup.select('.pc_temp_songlist > ul > li')
            for song in song_list:
                # 获取歌曲ID和名称
                song_info = song.select_one('.pc_temp_songname')
                if song_info:
                    song_name = song_info.get_text(strip=True).split('-')[-1].strip()
                    singer = song_info.get_text(strip=True).split('-')[0].strip()
                    song_id = song_info['href'].split('/')[-1].split('.')[0]

                    # 获取歌曲时长
                    duration = song.select_one('.pc_temp_time').get_text(strip=True)

                    # 获取歌曲详情
                    self.get_song_detail(song_id, song_name, singer, duration)

                    # 随机延时，避免被反爬
                    time.sleep(random.uniform(1, 3))

        except Exception as e:
            print(f"解析排行榜页面出错: {e}")

    def get_song_detail(self, song_id, song_name, singer, duration):
        """获取歌曲详细信息，如播放地址、热度等"""
        try:
            params = {
                'r': 'play/getdata',
                'callback': 'jQuery1910447343381000752_1623456789012',  # 随机生成一个callback
                'hash': song_id,
                'dfid': '2mRfl53jlyVu3wtekx0oAAxM',  # 固定值，可从浏览器请求中获取
                'appid': '1014',
                'mid': 'dc370b50f7e664570b2fb6428ff92e59',  # 固定值，可从浏览器请求中获取
                'platid': '4',
                '_': int(time.time() * 1000),  # 当前时间戳
            }

            response = requests.get(self.song_info_url, headers=self.headers, params=params)
            response.raise_for_status()

            # 提取JSON数据
            json_str = response.text.strip()
            print(json_str)
            json_str = json_str[json_str.index('{'):-2]  # 去掉JSONP回调函数
            data = json.loads(json_str)
            print(data)

            if data.get('status') == 1:
                song_data = data.get('data', {})
                song = {
                    'rank': len(self.songs) + 1,
                    'song_name': song_name,
                    'singer': singer,
                    'duration': duration,
                    'album': song_data.get('album_name', ''),
                    'play_url': song_data.get('play_url', ''),
                    'lyrics': song_data.get('lyrics', ''),
                    'hot': random.randint(500000, 1000000)  # 模拟热度值
                }
                self.songs.append(song)
                print(f"成功获取歌曲: {song_name} - {singer}")

        except Exception as e:
            print(f"获取歌曲详情出错: {e}")

    def run(self):
        """运行爬虫"""
        print("开始爬取酷狗音乐TOP100...")
        pages = self.get_rank_pages()

        for page in pages:
            print(f"正在爬取页面: {page}")
            self.parse_rank_page(page)
            time.sleep(random.uniform(3, 5))  # 页面间延时

        print(f"爬取完成，共获取 {len(self.songs)} 首歌曲")
        return self.songs

    def save_to_csv(self, filename='kugou_top100.csv'):
        """保存结果到CSV文件"""
        if self.songs:
            df = pd.DataFrame(self.songs)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            print(f"数据已保存到 {filename}")
        else:
            print("没有数据可保存")


if __name__ == "__main__":
    crawler = KugouMusicCrawler()
    songs = crawler.run()
    crawler.save_to_csv()