import json
import time

import scrapy


class InformationSpider(scrapy.Spider):
    name = "information"
    allowed_domains = ["bilibili.com"]

    # start_urls = ["https://bilibili.com"]

    def start_requests(self):
        for cate_id in [258, 227, 247, 245, 246, 240, 248, 176]:
            urls = [
                f'https://api.bilibili.com/x/web-interface/newlist_rank?main_ver=v3&search_type=video&view_type=hot_rank&copy_right=-1&new_web_tag=1&order=click&cate_id={cate_id}&page=1&pagesize=30&time_from=20241101&time_to=20241108']

            for url in urls:
                yield scrapy.Request(url=url, callback=self.parse1, meta={'cate_id': cate_id})

    def parse1(self, response):
        result = response.json()
        # print(result['data']['numPages'])
        try:
            num_pages = result['data']['numPages']
            cate_id = response.meta['cate_id']
            for page in range(num_pages):
                url = f'https://api.bilibili.com/x/web-interface/newlist_rank?main_ver=v3&search_type=video&view_type=hot_rank&copy_right=-1&new_web_tag=1&order=click&cate_id={cate_id}&page={page}&pagesize=30&time_from=20241101&time_to=20241108'

                yield scrapy.Request(url=url, callback=self.parse2)
        except:
            pass

    def parse2(self, response):
        result = response.json()
        information_list = result['data']['result']
        for information in information_list:
            title = information['description']
            author = information['author']
            pubdate = information['pubdate']
            play = information['play']
            yield {'title': title, 'author': author, 'pubdate': pubdate, 'play': play}
