import pandas as pd
import scrapy
from scrapy.http import HtmlResponse
from acgnScrapy.items import ComicItem
from .. import utils

# NG：缺失下一頁
# 定义 动漫之家漫画网 爬虫程式, scrapy crawl idmzjSpider -o idmzj-log.csv
class IdmzjspiderSpider(scrapy.Spider):
    name = "idmzjSpider"
    allowed_domains = ["comic.idmzj.com"]
    debug_mode = False
    root_url = 'https://www.idmzj.com/'

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        csv_file = f'idmzj.csv'
        df = pd.read_csv(csv_file)
        df = df.drop_duplicates()
        df = df.loc[:, ['enable', 'title', 'topic']]
        # print(df.head())
        # 显示前几行数据
        for row in df.itertuples():
            enable = int(row.enable)
            title = str(row.title)
            topic = str(row.topic)
            if enable != 0:
                topic_raw = topic
                topic_raw = topic_raw.replace(".html", "")
                topic_raw = topic_raw.replace(".htm", "")
                topic = topic_raw.split('/')[-1]
                start_url = f'{self.root_url}{topic_raw}.html'
                print(f'start_requests url={start_url}')
                args = {
                    "title": title,
                    "topic": topic,
                }
                yield scrapy.Request(
                    url = start_url,
                    callback = self.parse,
                    cb_kwargs = {'args': args},
                )

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        args = kwargs["args"]
        #print("args=", args)
        sel = scrapy.Selector(response)
        list_items = sel.css('div.tab-content > ul > li')
        for list_item in list_items:
            detail_url = list_item.css('a::attr(href)').extract_first() or ''
            id = list_item.css('a > span.list_con_zj::text').extract_first() or ''
            raw_id= id
            item = ComicItem()
            item['site'] = self.name.replace("Spider","")
            item['title'] = args['title']
            item['topic'] = args['topic']
            item['detail_url'] = detail_url
            id = id.replace("第", "")
            id_pos = id.find("话")
            if id_pos > 0:
                id = id[:id_pos]
            else:
                id = raw_id
            item["id"] = id
            if self.debug_mode:
                print(f'parse id={id}, detail_url={detail_url}')
            
            # 爬取下一层细节内容
            yield scrapy.Request(
                url = f'{self.root_url}{item["detail_url"]}',
                callback = self.parse_detail,
                cb_kwargs = {'item': item}
            )

    # 解析细节内容
    def parse_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        image_list = []
        sel = scrapy.Selector(response)
        image_url = sel.css('div.scrollbar-demo-item > img::attr(src)').extract_first() or ''
        if image_url != '':
            image_list.append(image_url)
        # 缺失下一頁
        item["image_list"] = image_list
        yield item
