from scrapy.pipelines.files import FilesPipeline
from scrapy.exceptions import DropItem
import os
import scrapy


class DownloadZipPipeline:
    def __init__(self):
        self.download_count = 0
        self.max_downloads = 6

    def process_item(self, item, spider):
        if self.download_count >= self.max_downloads:
            raise DropItem("已达到最大下载数量")

        self.download_count += 1
        spider.logger.info(f"准备下载第 {self.download_count} 个文件: {item['name']}")
        return item


class ZipFilePipeline(FilesPipeline):
    def get_media_requests(self, item, info):
        """生成带Referer的下载请求"""
        yield scrapy.Request(
            item["url"],  # 修正为url而非file_urls
            meta={"name": item["name"]},
            headers={"Referer": info.spider.start_urls[0]}
        )

    def file_path(self, request, response=None, info=None, *, item=None):
        """自定义存储路径"""
        filename = request.meta["name"]
        # 提取文件名（去除多余信息）
        if '(' in filename:
            filename = filename.split('(')[0].strip()

        # 确保文件名合法
        filename = ''.join(c for c in filename if c.isalnum() or c in '._- ').strip()
        return os.path.join("nirsoft_utils", filename + ".zip")

    def item_completed(self, results, item, spider):
        """处理下载结果"""
        if any(ok for ok, x in results):
            spider.logger.info(f"成功下载: {item['name']}")
            return item
        else:
            spider.logger.warning(f"下载失败: {item['name']}")
            raise DropItem("下载失败")