# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import pandas as pd
from itemadapter import ItemAdapter
from scrapy.pipelines.images import ImagesPipeline, FilesPipeline
import scrapy
from . import utils
# from .. import tools

ROOT_PATH = "F:\\Download\\mav\\hanime\\"
#ROOT_PATH = "D:\\Download\\mav\\"


# 下载图片管道, scrapy crawl comicsSpider
class HanimescrapyPipeline(ImagesPipeline):
    debug_mode = False
    # 取得图片请求
    def get_media_requests(self, item, info):
        topic = "comics"
        #author = "_other"
        author = "webtoon"
        csv_file = f'.\\{topic}.csv'
        #print("csv_file=", csv_file)
        df = pd.read_csv(csv_file)
        df = df.drop_duplicates()
        df = df.loc[:, ['id', 'pages', 'author', 'image_url', 'title']]
        # 显示前几行数据
        #print(df.head())
        # topic = "comics3"
        for row in df.itertuples():
            id = str(row.id)
            pages = int(row.pages)
            author = str(row.author)
            image_url = row.image_url
            title = row.title
            #if self.debug_mode:
            print(f'get_media_requests author={author}, id={id}, pages={pages}, image_url={image_url}')
            total = utils.check_directory(topic, author, id, ROOT_PATH)
            if pages > 0 and total < pages:
                info = utils.get_info(author, id, image_url)
                for page in range(pages):
                    info['page_no'] = page
                    result = utils.check_file(topic, info, ROOT_PATH)
                    if result["new_url"] != "":
                        if self.debug_mode:
                            print(f'HanimescrapyPipeline.get_media_requests page={info["page_no"]}, new_url={result["new_url"]}')
                        yield scrapy.Request(result["new_url"], meta=result)

    # 指定图片的档案名称
    def file_path(self, request, response=None, info=None):
        if self.debug_mode:
            print(f'HanimescrapyPipeline.file_path folder={request.meta["folder"]}, file_name={request.meta["file_name"]}, new_url={request.meta["new_url"]}')
        file_name = request.meta['file_name'] or ''
        return file_name

    # 结束下载
    def item_completed(self, results, item, info):
        # if self.debug_mode:
        #     print(f'results={results}')
        # tools.CheckFiles(".\\")
        return item
