import os
import requests
from bs4 import BeautifulSoup
import json
from urllib.parse import unquote

def create_directory(title):
    """创建以标题命名的文件夹"""
    # 去除非法字符
    invalid_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
    for char in invalid_chars:
        title = title.replace(char, '_')
    
    # 限制文件夹名称长度
    title = title[:50]
    
    # 创建img目录（如果不存在）
    if not os.path.exists('img'):
        os.makedirs('img')
    
    # 创建标题目录
    dir_path = os.path.join('img', title)
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    
    return dir_path

def download_image(img_url, save_path):
    """下载图片并保存到指定路径"""
    try:
        response = requests.get(img_url, stream=True)
        response.raise_for_status()
        
        # 从URL中提取文件名
        filename = os.path.basename(unquote(img_url.split('?')[0]))
        
        # 完整的保存路径
        full_path = os.path.join(save_path, filename)
        
        # 保存图片
        with open(full_path, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)
        
        return full_path
    except Exception as e:
        print(f"下载图片失败: {e}")
        return None

def crawl_netease_music():
    """爬取网易云音乐数据"""
    url = "https://music.163.com/discover/playlist"
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Referer': 'https://music.163.com/'
    }
    
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        ul = soup.find('ul', class_='m-cvrlst f-cb')
        
        if not ul:
            print("未找到目标ul元素")
            return []
        
        results = []
        for li in ul.find_all('li'):
            img = li.find('img')
            img_src = img['src'] if img and 'src' in img.attrs else None
            
            a_tag = li.find('a', class_='msk')
            title = a_tag['title'] if a_tag and 'title' in a_tag.attrs else None
            
            nb = li.find('span', class_='nb')
            play_count = nb.text if nb else None
            
            if img_src and title:
                # 创建文件夹
                dir_path = create_directory(title)
                
                # 下载图片
                img_path = download_image(img_src, dir_path)
                
                results.append({
                    'title': title,
                    'img_src': img_src,
                    'img_local_path': img_path,
                    'play_count': play_count
                })
        
        return results
    
    except Exception as e:
        print(f"爬取过程中发生错误: {e}")
        return []

def save_to_json(data, filename='netease_music.json'):
    """保存数据到JSON文件"""
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"数据已保存到 {filename}")

if __name__ == '__main__':
    print("开始爬取网易云音乐数据并下载图片...")
    music_data = crawl_netease_music()
    
    if music_data:
        print(f"成功爬取 {len(music_data)} 条数据")
        for i, item in enumerate(music_data[:3], 1):
            print(f"\n示例数据 {i}:")
            print(f"标题: {item['title']}")
            print(f"图片URL: {item['img_src']}")
            print(f"本地图片路径: {item['img_local_path']}")
            print(f"播放量: {item['play_count']}")
        
        save_to_json(music_data)
    else:
        print("未能获取数据")