import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from nirsoft_crawler.items import NirsoftItem
from scrapy.exceptions import CloseSpider

class NirsoftSpider(CrawlSpider):
    name = 'nirsoft_spider'
    allowed_domains = ['www.nirsoft.net']
    start_urls = ['https://www.nirsoft.net/']
    max_download = 5
    downloaded_count = 0
    downloaded_urls = set()  # 记录已下载的URL，避免重复下载

    rules = (
        # 规则1：从主页面提取工具详情页链接
        Rule(
            LinkExtractor(
                allow=r'/utils/.*\.html',
                restrict_xpaths='//ul/li/ul/li/a'
            ),
            callback='parse_detail',
            follow=True
        ),
        # 规则2：从工具详情页提取downloadline类中的zip下载链接
        Rule(
            LinkExtractor(
                allow=r'.*\.zip$',  # 匹配任意位置以.zip结尾的链接
                restrict_css='.downloadline',  # 限制在downloadline类中
                deny_extensions=[]  # 允许所有文件扩展名，仅通过allow过滤
            ),
            callback='parse_item',
            process_request='process_request'  # 处理请求，增强重定向支持
        ),
    )

    def process_request(self, request, response):
        """处理请求，设置重定向处理和超时（修正参数为3个）"""
        request.meta['handle_httpstatus_list'] = [302]  # 显式处理302重定向
        request.meta['download_timeout'] = 60  # 增加下载超时时间
        return request

    def parse_detail(self, response):
        """调试详情页访问，提取ZIP链接"""
        self.logger.debug(f"进入详情页: {response.url}")
        # 提取当前页面的ZIP链接（调试用）
        zip_links = response.css('.downloadline::attr(href)').getall()
        self.logger.debug(f"此页面找到 {len(zip_links)} 个ZIP链接")
        for link in zip_links:
            self.logger.debug(f"发现ZIP链接: {link}")

    def parse_item(self, response):
        """处理下载链接，达到下载数量后停止爬虫"""
        if self.downloaded_count >= self.max_download:
            raise CloseSpider("已达到最大下载数量")  # 达到下载数量后停止爬虫

        file_url = response.url
        file_name = file_url.split('/')[-1]

        # 检查是否已下载该URL，避免重复下载
        if file_url in self.downloaded_urls:
            self.logger.debug(f"跳过重复下载: {file_url}")
            return
        self.downloaded_urls.add(file_url)

        # 处理重定向情况，记录重定向链
        redirect_chain = response.request.meta.get('redirect_chain', [])
        if redirect_chain:
            redirect_urls = [f"重定向步骤 {i+1}: {url}" for i, (url, status) in enumerate(redirect_chain)]
            self.logger.info(f"重定向链: {', '.join(redirect_urls)} -> {file_url}")

        self.downloaded_count += 1
        self.logger.info(f"[已下载 {self.downloaded_count}/{self.max_download}]: {file_name}")

        item = NirsoftItem()
        item['file_urls'] = [file_url]
        item['file_name'] = file_name
        yield item

    def closed(self, reason):
        """爬虫关闭时的日志记录"""
        if self.downloaded_count == self.max_download:
            self.logger.info(f"成功下载{self.max_download}个ZIP文件，爬虫正常关闭")
        else:
            self.logger.info(f"爬虫关闭，共下载{self.download}")