import pandas as pd
import scrapy
from scrapy.http import HtmlResponse
from acgnScrapy.items import ComicItem
from .. import utils

# 定义 寶島漫畫 爬虫程式, scrapy crawl taiwanmhSpider -o taiwanmh-log.csv
# NG： 網站HTML文字為Unicode
class TaiwanmhspiderSpider(scrapy.Spider):
    name = "taiwanmhSpider"
    allowed_domains = ["www.taiwanmh.com"]

    debug_mode = True
    root_url = 'https://www.taiwanmh.com/'

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        csv_file = f'taiwanmh.csv'
        df = pd.read_csv(csv_file)
        df = df.drop_duplicates()
        df = df.loc[:, ['enable', 'title', 'topic']]
        # print(df.head())
        # 显示前几行数据
        for row in df.itertuples():
            enable = int(row.enable)
            title = str(row.title)
            topic = str(row.topic)
            if enable != 0:
                start_url = f'{self.root_url}{topic}'
                print(f'start_requests url={start_url}')
                args = {
                    "title": title,
                    "topic": topic,
                }
                yield scrapy.Request(
                    url = start_url,
                    callback = self.parse,
                    cb_kwargs = {'args': args},
                )

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        args = kwargs["args"]
        print("args=", args)
        print(response.body)
        sel = scrapy.Selector(response)
        list_items = sel.css('ul#ul_chapter1 > li')
        for list_item in list_items:
            detail_url = list_item.css('a::attr(href)').extract_first() or ''
            if detail_url != '':
                title = list_item.css('a::attr(title)').extract_first() or ''
                item = ComicItem()
                item['site'] = self.name.replace("Spider","")
                item['title'] = args['title']
                item['topic'] = args['topic']
                item['detail_url'] = detail_url
                item['id'] = title
                if self.debug_mode:
                    print(f'parse id={id}, detail_url={detail_url}')
                # 爬取下一层细节内容
                yield scrapy.Request(
                    url = f'{self.root_url}{item["detail_url"]}',
                    callback = self.parse_detail,
                    cb_kwargs = {'item': item}
                )

    # 解析细节内容
    def parse_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        last_page = sel.css('div.mypage > div.num > a:nth-last-child(1)::attr(href)').extract_first() or ''
        if last_page != "":
            last_list = last_page.split("-")
            if last_list >= 2:
                last_idx = last_list[-2]
                last_name = last_list[-1]
                pre_text = last_page.replace(f'-{last_idx}-{last_name}')
                if pre_text[0] == "/":
                    pre_text = pre_text[1:]
                last_idx = int(last_idx) - 1
                for idx in range(last_idx):
                    if idx == 0:
                        image_url = sel.css('div.ptview > img::attr(src)').extract_first() or ''
                        if image_url != '':
                            item["image_index"] = idx + 1
                            idx += 1
                            item["image_url"] = image_url
                            yield item
                    else:
                        url = f'{self.root_url}{pre_text}-{str(idx)}-{last_name}'
                        next_item = ComicItem()
                        next_item['site'] = item['site']
                        next_item['title'] = item['title']
                        next_item['topic'] = item['topic']
                        next_item['detail_url'] = item['detail_url']
                        next_item['id'] = item['id']
                        next_item["image_index"] = idx + 1
                        idx += 1
                        # 爬取下一頁细节内容
                        yield scrapy.Request(
                            url = url,
                            callback = self.parse_next_detail,
                            cb_kwargs = {'item': next_item}
                        )


    # 解析下一頁细节内容
    def parse_next_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        image_url = sel.css('div.ptview > img::attr(src)').extract_first() or ''
        if image_url != '':
            item["image_url"] = image_url
            yield item