import os
import re
import time
from datetime import datetime
from typing import Dict, List, Optional

import requests
import xlwt
from bs4 import BeautifulSoup


class Music:
    """豆瓣音乐数据类，统一封装排行榜条目"""

    def __init__(self, rank: int = 0):
        self.rank = rank  # 排名
        self.title: str = ""  # 专辑名称
        self.url: str = ""  # 豆瓣链接
        self.img_url: str = ""  # 封面图片
        self.rating: float = 0.0  # 评分
        self.rating_count: str = ""  # 评价人数
        self.performers: str = ""  # 表演者
        self.genre: str = ""  # 流派
        self.release_date: str = ""  # 发行时间
        self.img_local_path: str = ""  # 本地封面路径
        self.download_success: bool = False  # 封面是否下载成功

    def to_dict(self) -> Dict:
        """转换为便于写入Excel的字典"""
        return {
            '排名': self.rank,
            '专辑名': self.title,
            '音乐链接': self.url,
            '封面URL': self.img_url,
            '评分': self.rating,
            '评价人数': self.rating_count,
            '表演者': self.performers,
            '流派': self.genre,
            '发行时间': self.release_date,
            '本地封面路径': self.img_local_path,
            '封面下载状态': '成功' if self.download_success else '失败'
        }

    def __str__(self) -> str:
        """美化输出音乐信息"""
        return f"""
🎵 音乐 #{self.rank}
💿 专辑: {self.title}
📎 链接: {self.url}
🎤 表演者: {self.performers or '未知'}
🎼 流派: {self.genre or '未知'}
📅 发行时间: {self.release_date or '未知'}
⭐ 评分: {self.rating} ({self.rating_count})
🖼️ 封面: {'✅' if self.download_success else '❌'} {os.path.basename(self.img_local_path) if self.img_local_path else '无文件'}
        """


class DoubanMusicSpider:
    """requests+BeautifulSoup 版豆瓣音乐爬虫"""

    def __init__(self):
        self.base_url = 'https://www.douban.com/doulist/158870218/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                          'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36'
        }
        self.page_size = 25
        self.img_folder = os.path.join(os.getcwd(), 'music_covers')
        self.musics: List[Music] = []

    def scrape_music(self, max_pages: int = 5) -> List[Music]:
        """
        豆瓣华语音乐Top250榜单爬取入口
        :param max_pages: 最大页数（每页25条）
        """
        print("🚀 开始使用requests+BeautifulSoup抓取豆瓣华语音乐Top250...")
        print(f"📄 计划爬取页数: {max_pages}")

        current_page = 1
        current_rank = 1

        while current_page <= max_pages:
            page_url = self._build_page_url(current_page)
            print(f"📄 爬取第{current_page}页 -> {page_url}")

            try:
                response = requests.get(page_url, headers=self.headers, timeout=15)
                response.raise_for_status()
            except Exception as exc:
                print(f"⚠️ 请求第{current_page}页失败: {exc}")
                break

            soup = BeautifulSoup(response.text, 'html.parser')
            music_blocks = soup.select('div.doulist-item')
            print(f"📊 第{current_page}页抓取到 {len(music_blocks)} 条记录")

            if not music_blocks:
                print("ℹ️ 未找到更多音乐条目，提前结束")
                break

            for block in music_blocks:
                music = self._extract_music_info(block, current_rank)
                if music:
                    self.musics.append(music)
                    current_rank += 1

            current_page += 1
            time.sleep(2)

        print(f"🎉 爬虫结束，共获取 {len(self.musics)} 条音乐数据")
        return self.musics

    def _build_page_url(self, page: int) -> str:
        """构造分页URL"""
        if page <= 1:
            return self.base_url
        offset = (page - 1) * self.page_size
        return f"{self.base_url}?start={offset}&sort=seq&sub_type="

    def _extract_music_info(self, block, rank: int) -> Optional[Music]:
        """从单个条目DOM中解析音乐信息"""
        try:
            music = Music(rank)

            title_link = block.select_one('div.title a')
            if title_link:
                music.title = title_link.get_text(strip=True)
                music.url = title_link['href'].strip()

            img_element = block.select_one('div.post img')
            if img_element:
                music.img_url = img_element.get('src', '').strip()

            rating_element = block.select_one('div.rating span.rating_nums')
            if rating_element:
                try:
                    music.rating = float(rating_element.get_text(strip=True))
                except ValueError:
                    music.rating = 0.0

            music.rating_count = self._extract_rating_count(block.select_one('div.rating'))

            abstract_element = block.select_one('div.abstract')
            details = self._parse_abstract(abstract_element)
            music.performers = details.get('表演者', '')
            music.genre = details.get('流派', '')
            music.release_date = details.get('发行时间', '')

            return music

        except Exception as exc:
            print(f"⚠️ 解析排名#{rank} 音乐失败: {exc}")
            return None

    def _extract_rating_count(self, rating_block) -> str:
        """提取评价人数文案"""
        if not rating_block:
            return ""
        text = rating_block.get_text(" ", strip=True)
        match = re.search(r'(\d+人评价)', text)
        return match.group(1) if match else ""

    def _parse_abstract(self, abstract_element) -> Dict[str, str]:
        """拆解简介中的字段"""
        details: Dict[str, str] = {}
        if not abstract_element:
            return details

        for raw_line in abstract_element.stripped_strings:
            normalized = raw_line.replace('：', ':')
            if ':' not in normalized:
                continue
            label, value = normalized.split(':', 1)
            details[label.strip()] = value.strip()
        return details

    def download_covers(self, musics: Optional[List[Music]] = None) -> None:
        """批量下载封面"""
        if musics is None:
            musics = self.musics

        if not musics:
            print("ℹ️ 没有音乐数据，不执行封面下载")
            return

        if not os.path.exists(self.img_folder):
            os.makedirs(self.img_folder, exist_ok=True)

        print(f"🖼️ 开始下载封面 -> {self.img_folder}")
        for music in musics:
            if not music.img_url:
                continue
            music.download_success = self._download_cover(music)

    def _download_cover(self, music: Music) -> bool:
        """下载单张封面"""
        try:
            response = requests.get(music.img_url, headers=self.headers, timeout=15)
            if response.status_code != 200:
                return False

            filename = self._clean_filename(f"{music.rank:03d}_{music.title}.jpg")
            local_path = os.path.join(self.img_folder, filename)
            with open(local_path, 'wb') as file:
                file.write(response.content)

            music.img_local_path = local_path
            return True

        except Exception as exc:
            print(f"⚠️ 下载封面失败 ({music.title}): {exc}")
            return False

    def _clean_filename(self, filename: str) -> str:
        """删除非法字符并控制长度"""
        cleaned = re.sub(r'[<>:"/\\|?*]', '', filename)
        return cleaned[:80]

    def export_to_excel(self, musics: Optional[List[Music]] = None, filename: Optional[str] = None) -> str:
        """导出Excel"""
        if musics is None:
            musics = self.musics

        if not musics:
            print("ℹ️ 没有音乐数据可导出")
            return ""

        if filename is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"豆瓣音乐Top250_{timestamp}.xls"

        workbook = xlwt.Workbook(encoding='utf-8')
        worksheet = workbook.add_sheet('豆瓣音乐Top250')
        headers = ['排名', '专辑名', '音乐链接', '封面URL', '评分', '评价人数',
                   '表演者', '流派', '发行时间', '本地封面路径', '封面下载状态']

        for col, header in enumerate(headers):
            worksheet.write(0, col, header)

        for row, music in enumerate(musics, start=1):
            music_data = music.to_dict()
            for col, header in enumerate(headers):
                worksheet.write(row, col, str(music_data.get(header, '')))

        excel_path = os.path.join(os.getcwd(), filename)
        workbook.save(excel_path)
        print(f"📄 Excel已生成: {excel_path}")
        return excel_path

    def print_music_info(self, musics: Optional[List[Music]] = None) -> None:
        """打印音乐信息"""
        if musics is None:
            musics = self.musics

        if not musics:
            print("ℹ️ 暂无音乐数据")
            return

        print("\n" + "=" * 80)
        print("🎵 豆瓣华语音乐Top250 信息汇总")
        print("=" * 80)
        for music in musics:
            print(music)
            print("-" * 80)


def main():
    """脚本入口"""
    print("🎵 豆瓣音乐Top250爬虫 (requests + BeautifulSoup)")
    print("=" * 60)

    try:
        max_pages = int(input("请输入要爬取的页数(默认3): ").strip() or "3")
    except Exception:
        max_pages = 3

    spider = DoubanMusicSpider()
    musics = spider.scrape_music(max_pages=max_pages)

    if not musics:
        print("⚠️ 未获取到音乐数据，程序结束")
        return

    # spider.download_covers()
    spider.print_music_info()
    excel_file = spider.export_to_excel()

    success_count = sum(1 for music in musics if music.download_success)
    print("\n📊 运行统计")
    print(f"🎧 总音乐数: {len(musics)}")
    print(f"🖼️ 封面下载成功: {success_count}/{len(musics)}")
    print(f"📄 Excel文件: {excel_file if excel_file else '未生成'}")
    print("\n🎉 程序执行完毕")


if __name__ == "__main__":
    main()
