import scrapy


class A漫画Spider(scrapy.Spider):
    name = "漫画"
    async def start(self):
        yield scrapy.Request('https://www.webtoons.com/zh-hant/genres/romance?sortOrder=MANA')

    async def parse(self, response):
        items = response.xpath('//ul[@class="webtoon_list"]/li/a')

        for item in items:
            herf = item.attrib['href']
            link_id = item.attrib['data-title-no']
            name = item.xpath('./div[@class="info_text"]/strong/text()').get()

            yield scrapy.Request(herf,callback=self.parse2,meta={'name':name,'link_id':link_id})
            break
    async def parse2(self,response):
        datas = response.xpath('//li[@class="_episodeItem"]/a')
        data = datas[0]

        n = data.attrib['href'].split('/')[-3]

        for i in range(1,2):

            h_url =f"https://www.webtoons.com/zh-hant/romance-m/{n}/%E7%AC%AC{i}%E8%A9%B1/viewer?title_no=6405&episode_no={i}"
            print(h_url)
            p = data.xpath('./span[@class="subj"]/span/text()').get()
            print(p)

            headers = {
                "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
                "referer": "https://www.webtoons.com/zh-hant/romance-m/childhood-friend-complex/list?title_no=6405"
            }

            yield scrapy.Request(h_url,callback=self.parse3,meta={'name':response.meta['name'],'p':p},headers=headers)




    async def parse3(self,response):
        # print(response.text)
        imgs = response.xpath('//div[@class="viewer_img _img_viewer_area "]/img')
        # print(imgs)
        print(f"{response.meta['name']}开始爬取{response.meta['p']}")
        for img in imgs:
            img_url = img.attrib['data-url']
            headers = {
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
            }

            yield scrapy.Request(img_url, callback=self.parse4, meta={'name': response.meta['name']}, headers=headers)

    async def parse4(self, response):
        data = response.body
        name = response.url.split('/')[-1].split('?')[0].split('.')[0]
        with open(f'./static/{name}.png', 'wb') as f:
            f.write(data)
# ../../static/{name}.png
