import json

import scrapy


class A360movieSpider(scrapy.Spider):
    name = "360movie"
    async def start(self):
        urls =[ f"https://api.web.360kan.com/v1/filter/list?catid=1&rank=rankhot&cat=%E5%96%9C%E5%89%A7&year=&area=&act=&size=35&pageno={i}&callback=__jp{i}" for i in range(1,10)]
        for url in urls:
            yield scrapy.Request(url, headers={
                    "cookie": "__guid=121874957.1794855149851560400.1762222375785.957; __huid=11RHWV8TUMYV2iJHeP8dSF2JD1xHt9BXRq2LgxkWmj2OQ%3D; ___sid=121874957.3733684913432335000.1762860154783.4749; refer_scene=47005; monitor_count=3; __DC_gid=121874957.196826631.1762222375839.1762861595844.43",
                    "referer": "https://www.360kan.com/dianying/list?rank=rankhot&cat=%E5%96%9C%E5%89%A7&year=&area=&act=&pageno=20",
                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36"
            }, callback=self.parse)
    def parse(self, response):
        print(response.url)
        datas = json.loads(response.text[6:-2])
        items = datas["data"]["movies"]
        print(len(items))
        for item in items:
            print(item["title"])
