import scrapy
from scrapy.http import HtmlResponse
from comicsScrapy.items import ComicItem
from .. import utils, settings, middlewares

# 获取 Cookies
COOKIES_DICT = middlewares.get_cookies_dict()

# 定义爬虫程式, scrapy crawl comicsSpider -o comics.csv
class ComicsspiderSpider(scrapy.Spider):
    name = "comicsSpider"
    allowed_domains = ["hanime1.me"]

    debug_mode = False
    run_mode = "" # "<<TEST>>"
    title = "H漫画"
    topic_name = "comics"
    #start_url = 'https://hanime1.me/comics'
    start_url = 'https://hanime1.me/tags/Webtoon'
    tag_dict = {'標籤':'tag', '作者':'author', '語言':'language', '分類':'classify', '頁數':'pages', '上傳':'upload', '同人':'coterie', '角色':'role', '社團':'club'}

    # 静态指定爬虫页面（二选一）
    #start_urls = ['https://hanime1.me/comics?page=1']

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        print(f'start_requests url={self.start_url}')
        yield scrapy.Request(
            url = self.start_url,
            callback = self.request_detail,
            #cookies=COOKIES_DICT
        )

    # 解析细节内容
    def request_detail(self, response: HtmlResponse, **kwargs):
        print(f'request_detail :')
        print(response.body)
        sel = scrapy.Selector(response)
        last_pageno = int(sel.css('div.search-pagination.hidden-xs > ul > li:nth-last-child(2) > a::text').extract_first())
        print(f'{self.title} Page No: {last_pageno}   {self.run_mode}')
        if self.run_mode=="<<TEST>>":
            page = 0
            url = f'{self.start_url}&page={page+1}'
            print(f'request_detail page={page}, url={url}')
            yield scrapy.Request(url=url)
        else:
            for page in range(last_pageno):
                url = f'{self.start_url}&page={page+1}'
                print(f'request_detail page={page}, url={url}')
                yield scrapy.Request(url=url)

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        sel = scrapy.Selector(response)
        list_items = sel.css('div.comic-rows-videos-div')
        for list_item in list_items:
            item = ComicItem()
            item["topic"] = "comic"
            item["download_result"] = False
            item["is_pass"] = False
            detail_url = list_item.css('a::attr(href)').extract_first()
            item['detail_url'] = detail_url
            item['id'] = utils.trim(detail_url.split('/')[-1])
            # 爬取下一层细节内容
            yield scrapy.Request(
                url = item['detail_url'],
                callback = self.parse_detail,
                cb_kwargs = {'item': item}
            )

    # 解析细节内容
    def parse_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        title = sel.css('h3.comics-metadata-top-row > span.pretty::text').extract_first() or ''
        title = title.replace("（高画質 / 中出 /60fps）", "")
        title = title.replace("（高 画質 / 中出 /60fps）", "")
        item['title'] = utils.trim(title.split('|')[-1])
        subtitle = sel.css('h4.comics-metadata-margin-top > span.pretty::text').extract_first() or ''
        item['subtitle'] = utils.trim(subtitle.split('|')[-1])
        item['image_url'] = utils.trim(sel.css('div.comics-panel-margin > div > div > a > img::attr(data-src)').extract_first())

        # 解析标签
        list_items = sel.css('div.comics-metadata-margin-top > h5')
        for list_item in list_items:
            tag_name = list_item.css('::text').extract_first()
            name, value = tag_name.split("：", maxsplit=1)
            name = utils.trim(name)
            key = self.tag_dict[name]
            if name in self.tag_dict:
                tag_items = list_item.css('div')
                tag_list = []
                for tag_item in tag_items:
                    tag_list.append(utils.trim(tag_item.css('::text').extract_first()))
                if key in ('pages', 'upload'):
                    if len(tag_list)>0:
                        item[key] = utils.trim(tag_list[0])
                else:
                    item[key] = tag_list
        yield item
