import pandas as pd
import scrapy
from scrapy.http import HtmlResponse
from acgnScrapy.items import ComicItem
from .. import utils

# 定义 ACGN 漫画网爬虫程式, scrapy crawl acgnSpider -o acgn-log.csv
class AcgnspiderSpider(scrapy.Spider):
    name = "acgnSpider"
    allowed_domains = ["comic.acgn.cc"]
    debug_mode = False
    root_url = 'https://comic.acgn.cc/'
    # topic = 'manhua-chongfengxing'
    # title = "蟲奉行"

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        csv_file = f'comics.csv'
        df = pd.read_csv(csv_file)
        df = df.drop_duplicates()
        df = df.loc[:, ['enable', 'title', 'topic']]
        # print(df.head())
        # 显示前几行数据
        for row in df.itertuples():
            enable = int(row.enable)
            title = str(row.title)
            topic = str(row.topic)
            if enable != 0:
                topic = topic.replace(".html", "")
                topic = topic.replace(".htm", "")
                start_url = f'{self.root_url}{topic}.htm'
                print(f'start_requests url={start_url}')
                args = {
                    "title": title,
                    "topic": topic,
                }
                yield scrapy.Request(
                    url = start_url,
                    callback = self.parse,
                    cb_kwargs = {'args': args},
                )

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        args = kwargs["args"]
        #print("args=", args)
        sel = scrapy.Selector(response)
        list_items = sel.css('div#comic_chapter > ul > li')
        for list_item in list_items:
            detail_url = list_item.css('a::attr(href)').extract_first() or ''
            id = list_item.css('a::text').extract_first() or ''
            raw_id= id
            item = ComicItem()
            item['site'] = self.name.replace("Spider","")
            item['title'] = args['title']
            item['topic'] = args['topic']
            item['detail_url'] = detail_url
            if id == "創刊號":
                id = 1
            else:
                clear_list = "VOL,Vol_,CH,真.,真,第,話,卷,夜,完,試看,附錄,番外,高清版本,完全檔案,原作版,."
                clear_list = clear_list.split(",")
                for clear_str in clear_list:
                    id = id.replace(clear_str, "")
                id = utils.trim(id.split('(')[0])
                if utils.is_chinese_arabic(id):
                    id = utils.chinese_to_arabic(id)
                elif id.isnumeric():
                    id = int(id)
                else:
                    sep_str = "-"
                    pos = id.find(sep_str)
                    if pos < 0:
                        sep_str = "和"
                        pos = id.find(sep_str)
                    if pos >= 0:
                        id_list = id.split(sep_str)
                        id_str = ""
                        for id_no in id_list:
                            id_no = str(id_no).zfill(6)
                            if id_str != "":
                                id_str += "-"
                            id_str += str(id_no)
                        if id_str != "":
                            id = id_str
            if type(id) == int:
                id = str(id).zfill(6)
            if id == "":
                id = raw_id
            item["id"] = id
            if self.debug_mode:
                print(f'parse id={id}, detail_url={detail_url}')
            
            # 爬取下一层细节内容
            yield scrapy.Request(
                url = f'{self.root_url}{item["detail_url"]}',
                callback = self.parse_detail,
                cb_kwargs = {'item': item}
            )

    # 解析细节内容
    def parse_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        list_items = sel.css('div#pic_list > div.pic')
        image_list = []
        for list_item in list_items:
            image_url = list_item.css('::attr(_src)').extract_first() or ''
            if image_url != '':
                image_list.append(image_url)
        item["image_list"] = image_list
        yield item

