import os
import scrapy
import pandas as pd

BASE_PATH = "D:\\Download\\mav\\comics\\"

# 取的目录下的档案数量
def file_count(directory):
    count = 0
    for file_name in os.listdir(directory):
        file_path = os.path.join(directory, file_name)
        if os.path.isfile(file_path):
            count += 1
    return count
    

# 定义爬虫程式, scrapy crawl comicsImageSpider
class ComicsimagespiderSpider(scrapy.Spider):
    name = "comicsImageSpider"
    allowed_domains = ["hanime1.me", "i.nhentai.net"]

    # 静态指定爬虫页面（二选一）
    # start_urls = ['https://hanime1.me/comics?page=1']
    # 动态指定爬虫页面（二选一）
    def start_requests(self):
        df = pd.read_csv(f'{BASE_PATH}comics.csv')
        df = df.drop_duplicates()
        df = df.head(2)
        print(df[['id', 'pages', 'image_url']])
        for index, row in df.iterrows():
            id = row['id'] or ''
            pages = int(row['pages'] or '0')
            image_url = row['image_url'] or  ''
            image_path = f'{BASE_PATH}{id}'
            # print('id=', id)
            # print('image_url=', image_url)
            # print('image_path=', image_path)
            if not os.path.exists(image_path):
                os.makedirs(image_path)
            total = file_count(image_path)
            # print('pages=', pages, ', total=', total)
            if pages > 0 and total < pages:
                first_image = image_url.split('/')[-1]
                image_ext = first_image.split('.')[-1]
                image_root_url = image_url.replace(first_image, '')
                for page in range(pages):
                    page_name = str(page+1).zfill(6)
                    file_name = f'{BASE_PATH}{id}\\{page_name}.{image_ext}'
                    # print('file_name=', file_name)
                    if not os.path.exists(file_name):
                        image_new_url = f'{image_root_url}{page+1}.{image_ext}'
                        # print('image_new_url=', image_new_url)

        url = 'https://hanime1.me/comics'
        yield scrapy.Request(url)

    def parse(self, response):
        pass
