import scrapy
import json
import time
import random
from urllib.parse import urljoin
from ximalaya_scraper.items import AlbumItem, TrackItem

class XimalayaSpider(scrapy.Spider):
    name = "ximalaya"
    allowed_domains = ["ximalaya.com", "xmcdn.com"]
    
    def start_requests(self):
        """起始URL - 喜马拉雅热门分类"""
        categories = [
            {"name": "有声书", "url": "https://www.ximalaya.com/youshengshu/"},
            {"name": "音乐", "url": "https://www.ximalaya.com/yinyue/"},
            {"name": "相声", "url": "https://www.ximalaya.com/xiangsheng/"},
            {"name": "儿童", "url": "https://www.ximalaya.com/ertong/"},
            {"name": "知识", "url": "https://www.ximalaya.com/zhishi/"},
        ]
        
        for category in categories:
            yield scrapy.Request(
                url=category['url'],
                callback=self.parse_category,
                meta={'category': category['name']}
            )

    def parse_category(self, response):
        """解析分类页面，获取专辑列表"""
        category = response.meta['category']
        
        # 提取专辑链接 - 根据实际页面结构调整选择器
        album_links = response.css('a.album-item__cover::attr(href)').getall()
        
        for link in album_links:
            if '/album/' in link:
                album_url = urljoin('https://www.ximalaya.com', link)
                yield scrapy.Request(
                    url=album_url,
                    callback=self.parse_album,
                    meta={'category': category}
                )
        
        # 分页处理
        next_page = response.css('a.page-next::attr(href)').get()
        if next_page:
            next_url = urljoin(response.url, next_page)
            yield scrapy.Request(
                url=next_url,
                callback=self.parse_category,
                meta={'category': category}
            )

    def parse_album(self, response):
        """解析专辑详情页"""
        album = AlbumItem()
        
        # 提取专辑基本信息
        album['album_id'] = response.url.split('/')[-1]
        album['album_title'] = response.css('h1.album-title::text').get() or response.css('title::text').get()
        album['album_cover'] = response.css('img.album-cover::attr(src)').get()
        album['album_description'] = ''.join(response.css('.album-intro ::text').getall())[:500]  # 限制长度
        album['album_category'] = response.meta['category']
        album['album_author'] = response.css('.username::text').get()
        album['album_url'] = response.url
        album['created_time'] = int(time.time())
        
        # 提取统计信息
        stats = {
            'play': response.css('.play-count::text').get(),
            'track': response.css('.track-count::text').get(),
            'subscribe': response.css('.subscribe-count::text').get(),
        }
        
        album['album_play_count'] = stats['play']
        album['album_track_count'] = stats['track']
        album['album_subscribe_count'] = stats['subscribe']
        
        yield album
        
        # 获取专辑下的音频列表
        album_id = album['album_id']
        yield from self.get_track_list(album_id, 1)

    def get_track_list(self, album_id, page_num):
        """获取音频列表API"""
        api_url = f"https://www.ximalaya.com/revision/album/v1/getTracksList?albumId={album_id}&pageNum={page_num}"
        
        yield scrapy.Request(
            url=api_url,
            callback=self.parse_track_list,
            meta={'album_id': album_id, 'page_num': page_num}
        )

    def parse_track_list(self, response):
        """解析音频列表API响应"""
        try:
            data = json.loads(response.text)
            album_id = response.meta['album_id']
            page_num = response.meta['page_num']
            
            if data.get('ret') == 200 and data.get('data', {}).get('tracks'):
                tracks = data['data']['tracks']
                
                for track_data in tracks:
                    track = TrackItem()
                    track['track_id'] = track_data.get('trackId')
                    track['track_title'] = track_data.get('title')
                    track['track_duration'] = track_data.get('duration')
                    track['track_play_count'] = track_data.get('playCount')
                    track['track_like_count'] = track_data.get('likesCount')
                    track['track_comment_count'] = track_data.get('commentsCount')
                    track['track_cover'] = track_data.get('cover')
                    track['album_id'] = album_id
                    track['created_time'] = int(time.time())
                    
                    # 获取音频播放URL
                    yield scrapy.Request(
                        url=f"https://www.ximalaya.com/revision/track/v1/getTrackInfo?trackId={track['track_id']}",
                        callback=self.parse_track_detail,
                        meta={'track_item': track}
                    )
                
                # 检查是否有下一页
                total_page = data['data'].get('totalPage', 1)
                if page_num < total_page:
                    yield from self.get_track_list(album_id, page_num + 1)
                    
        except json.JSONDecodeError:
            self.logger.error(f"JSON解析错误: {response.url}")

    def parse_track_detail(self, response):
        """解析音频详情获取播放URL"""
        try:
            data = json.loads(response.text)
            track = response.meta['track_item']
            
            if data.get('ret') == 200 and data.get('data'):
                track_data = data['data']
                track['track_mp3_url'] = track_data.get('playPath')
            
            yield track
            
        except json.JSONDecodeError:
            self.logger.error(f"音频详情JSON解析错误: {response.url}")
            yield response.meta['track_item']