import pandas as pd
import scrapy
from scrapy.http import HtmlResponse
from acgnScrapy.items import ComicItem
from .. import utils

# NG: 圖片無法下載
# 定义爬虫程式, scrapy crawl dm5Spider -o dm5-log.csv
class Dm5spiderSpider(scrapy.Spider):
    name = "dm5Spider"
    allowed_domains = ["www.dm5.com", "manhua1035zjcdn123.cdndm5.com"]
    debug_mode = False
    root_url = 'https://www.dm5.com/'

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        csv_file = f'dm5.csv'
        df = pd.read_csv(csv_file)
        df = df.drop_duplicates()
        df = df.loc[:, ['enable', 'title', 'topic']]
        # print(df.head())
        # 显示前几行数据
        for row in df.itertuples():
            enable = int(row.enable)
            title = str(row.title)
            topic = str(row.topic)
            if enable != 0:
                start_url = f'{self.root_url}{topic}/'
                print(f'start_requests url={start_url}')
                args = {
                    "title": title,
                    "topic": topic,
                    "id": 1,
                }
                yield scrapy.Request(
                    url = start_url,
                    callback = self.parse,
                    cb_kwargs = {'args': args},
                )

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        args = kwargs["args"]
        #print("args=", args)
        sel = scrapy.Selector(response)
        title = sel.css('div.title > span:nth-last-child(1)::text').extract_first() or ''
        item = ComicItem()
        item['site'] = self.name.replace("Spider","")
        item['id'] = args["id"]
        item['title'] = utils.trim(title)
        item['topic'] = args['title']
        image_list = []
        list_items = sel.css('div#barChapter > img')
        for list_item in list_items:
            image_url = list_item.css('::attr(data-src)').extract_first() or ''
            #print("image_url=", image_url)
            if image_url != '':
                image_list.append(image_url)
        item["image_list"] = image_list
        next_node = sel.css('div.view-paging > div.container > a:nth-last-child(1)')
        next_text = next_node.css('::text').extract_first() or ''
        if next_text == "下一章":
            next_url = next_node.css('::attr(href)').extract_first() or ''
            if next_url != "":
                next_url = f'{self.root_url}{next_url}'
                #print(f'next_requests url={next_url}')
                args["id"] = args["id"] + 1
                yield scrapy.Request(
                    url = next_url,
                    callback = self.parse,
                    cb_kwargs = {'args': args},
                )
        yield item


