import scrapy
from scrapy.http import HtmlResponse
from comicsScrapy.items import DmItem
from .. import utils, settings


# 定义 hanime1 裏番动画爬虫程式, scrapy crawl dmSpider -o dm.csv
class DmspiderSpider(scrapy.Spider):
    name = "dmSpider"
    allowed_domains = ["hanime1.me"]

    debug_mode = False
    run_mode = "" # "<<TEST>>"
    title = "裏番动画"
    topic_name = "DM"
    start_url = 'https://hanime1.me/search?genre=裏番'
    list_selector = 'div.home-rows-videos-wrapper > a'
    image_selector = 'div.home-rows-videos-div > img::attr(src)'
    
    # 静态指定爬虫页面（二选一）
    #start_urls = ['https://hanime1.me/search?genre=裏番&page=1']

    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        yield scrapy.Request(
            url = self.start_url,
            callback = self.request_detail
        )

    # 解析细节内容
    def request_detail(self, response: HtmlResponse, **kwargs):
        sel = scrapy.Selector(response)
        last_pageno = int(sel.css('div.search-pagination > ul.pagination > li:nth-last-child(2) > a::text').extract_first())
        print(f'{self.title} Page No: {last_pageno}   {self.run_mode}')
        if self.run_mode=="<<TEST>>":
            page = 0
            yield scrapy.Request(url=f'{self.start_url}&page={page+1}')
        else:
            for page in range(last_pageno):
                yield scrapy.Request(url=f'{self.start_url}&page={page+1}')

    # 解析网页内容
    def parse(self, response: HtmlResponse, **kwargs):
        #print("裏番动画 > parse")
        sel = scrapy.Selector(response)
        list_items = sel.css(self.list_selector)
        #print('漫画数量=', len(list_items))
        for list_item in list_items:
            item = DmItem()
            item["topic"] = self.topic_name
            item["download_result"] = False
            item["is_pass"] = False
            detail_url = list_item.css('::attr(href)').extract_first()
            item['detail_url'] = utils.trim(detail_url)
            item['id'] = utils.trim(detail_url.split('v=')[-1])
            item['image_url'] = utils.trim(list_item.css(self.image_selector).extract_first())
            #item['title'] = list_item.css('div > div::text').extract_first()
            if self.debug_mode:
                print(f'id={item["id"]}, detail_url={item["detail_url"]}, image={item["image_url"]}')
            # 爬取下一层细节内容
            yield scrapy.Request(
                url = item['detail_url'],
                callback = self.parse_detail,
                cb_kwargs = {'item': item}
            )

    # 解析细节内容
    def parse_detail(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        title = sel.css('h3.video-details-wrapper::text').extract_first() or ''
        title = title.replace("（高画質 / 中出 /60fps）", "")
        title = title.replace("（高 画質 / 中出 /60fps）", "")
        item['title'] = utils.trim(title)
        subtitle = sel.css('div.video-details-wrapper > div.video-description-panel > div:nth-child(2)::text').extract_first() or ''
        item['subtitle'] = utils.trim(subtitle)
        item['author'] = utils.trim(sel.css('a#video-artist-name::text').extract_first() or '')
        likes_text = utils.trim(sel.css('div#video-like-form-wrapper > button > div::text').extract()[-1] or '')
        likes = 0
        if likes_text != '':
            likes = int(likes_text)
        #print(f'**** id={item["id"]}, Likes={likes}')
        item['likes'] = likes
        group = sel.css('div#video-playlist-wrapper > div.video-playlist-top > h4:nth-child(1)::text').extract_first() or ''
        item['group'] = utils.trim(group.split('/', maxsplit=1)[0] or group)
        # if group == 'I':
        #     print(f'***** id={item["id"]}, group={group}')
        group_count = sel.css('div#video-playlist-wrapper > div.video-playlist-top > h4:nth-child(2)::text').extract_first() or ''
        item['group_count'] = int(group_count.split(' ', maxsplit=1)[0] or '0')
        download_url = sel.css('div.video-buttons-wrapper > a#downloadBtn::attr(href)').extract_first() or  ''
        item['download_url'] = utils.trim(download_url)

        # 解析标签
        tag_list = []
        list_items = sel.css('div.video-tags-wrapper > div.single-video-tag > a')
        for list_item in list_items:
            tag_name = list_item.css('::text').extract_first()
            tag_name = utils.trim(tag_name)
            if tag_name != '':
                tag_list.append(tag_name)
        item['tag'] = tag_list
        
        #print(f'id={item["id"]}, download_url={download_url}')
        if item['download_url'] is not None and item['download_url'] != '':
            # 爬取下载清单
            yield scrapy.Request(
                url = download_url,
                callback = self.parse_download,
                cb_kwargs = {'item': item}
            )
        else:
            yield item


    # 解析下载清单
    def parse_download(self, response: HtmlResponse, **kwargs):
        item = kwargs["item"]
        sel = scrapy.Selector(response)
        list_items = sel.css('table.download-table > tr')
        download_list = []
        download_url = ""
        idx = 0
        # 取得视频链接清单
        for list_item in list_items:
            if idx > 0:
                download_url = utils.trim(list_item.css('td > a::attr(href)').extract_first() or '')
                if download_url != "":
                    download_list.append(download_url)
            idx = idx + 1
        item["download_list"] = download_list
        yield item
