import json

import scrapy
from scrapy import Request

# 如果有多个爬虫，可以在这里写多个class，注意爬虫名称


from spider.item_rank_hot_video import SpiderItem, ChannelItem, subChannelItem, CommentItem, hot_video_lItem


class MarketIndustryoverviewSpiderSubChannel(scrapy.Spider):
    name = 'rank_hot_subvideo3'

    def start_requests(self):
        for url in ['https://api.bilibili.com/x/web-interface/web/channel/category/list']:
            reponse = Request.get('https://api.bilibili.com/x/web-interface/web/channel/category/list')
            print(reponse)
            yield Request(url, meta={})

    #parse方法去应答，拿到我们想要的内容
    def parse(self, response):
        #str转dict
        content = json.loads(response.text)
        #
        # 如果data和categories为空，则会报缺少key这个错，为了防止这种问题出现，要使用get方法
        # 如果data为空的时候返回一个空的字典,后面返回空的列表，表示data和categories都没有时也不会报错
        categories = content.get('data',{}).get('categories',[])
        for d in categories:
            channel_id= d.get('id',-1)

            if channel_id == 12:
            #子频道首页的url
            #如果删掉上面的if判断，然后让下面的语句和channel_id对齐，就可以获频道里面的所有子频道
                url = 'https://api.bilibili.com/x/web-interface/web/channel/category/channel_arc/list?id={}&offset='.format(channel_id)
                print(url)
                yield Request(url,meta={'channel_id':channel_id,'archive_channels':[]},callback=self.parse2)#去b站访问再返回一个结果;archive_channels由于刚开始没有，因此返回一个空列表
                #meta的作用就是传递数据

    def parse2(self,response):
        channel_id = response.meta['channel_id']
        archive_channels = response.meta['archive_channels']
        conten = json.loads(response.text)
        data  = conten.get('data',{})
        has_more = data.get('has_more',False)
        offset = data.get('offset', -1)
        archive_channels += data.get('archive_channels',[]) #+=是把一个list和新的list合到一起，返回一个新的list
        # "has_more": true,
        # "offset": "6",
        #"total": 16,
        #"archive_channels": [{

        if has_more:  #判断是否有下一页
            url = 'https://api.bilibili.com/x/web-interface/web/channel/category/channel_arc/list?id={}&offset={}'.format(channel_id,offset)
            yield Request(url,meta = {'channel_id':channel_id,'archive_channels':archive_channels},callback=self.parse2)#这个应答调用parse,由再次解析
        else:
            for d in archive_channels:
                item = subChannelItem()
                item['pid'] = channel_id
                item['id'] = d.get('id',-1)
                yield item #向上传，pipeline来接收它
            pass   #到底了
