import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

class NirsoftSpider(CrawlSpider):
    name = 'nirsoft_spider'
    allowed_domains = ['www.nirsoft.net']
    start_urls = ['https://www.nirsoft.net/']

    rules = (
        Rule(LinkExtractor(allow=r'/utils/.*\.html', restrict_xpaths='//ul/li/ul/li/a'), callback='parse_item'),
    )

    zip_count = 0

    def parse_item(self, response):
        zip_links = LinkExtractor(
            allow=r'.*\.zip$',
            restrict_css='.downloadline',
            deny_extensions=[]
        ).extract_links(response)

        for link in zip_links:
            if self.zip_count >= 6:
                self.crawler.engine.close_spider(self, '已下载6个ZIP文件')
                return

            yield {
                'url': link.url,
                'name': link.text.strip() or link.url.split('/')[-1]
            }
            self.zip_count += 1