import scrapy
from firstBlood.items import FirstbloodItem
import re

class FirstSpider(scrapy.Spider):
    name = 'first'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.mevtu.com/tag/qiaotunmeinv.html']

    # 通用的url模板
    url = 'https://www.mevtu.com/tag/qiaotunmeinv/page_%d.html'
    page_num = 2

    def parse(self, response):
        li_list = response.xpath('//*[@id="index_ajax_list"]/li')
        for li in li_list:
            name = li.xpath('./a/img/@alt').extract_first()
            new_url = li.xpath('./a/@href').extract_first()
            many = int(li.xpath('./a/div/span/text()').extract_first())
            for i in range(1, many+1):
                item = FirstbloodItem()
                if i == 1:
                    item['name'] = name + '.jpg'
                    det_url = new_url
                else:
                    newName = name + str(i) + '.jpg'
                    item['name'] = newName
                    det_url = re.sub('(.*?)(\d+)(.html)', r'\1\2_%s\3', new_url) % str(i)
                i += 1
                yield scrapy.Request(url=det_url, callback=self.parse_detail, meta={'item':item})

        # if self.page_num < 6:
        #     new_url = format(self.url%self.page_num)
        #     self.page_num += 1
        #     # 对新的页码对应的url进行手动请求发送
        #     yield scrapy.Request(url=new_url, callback=self.parse)

    def parse_detail(self, response):
        item = response.meta['item']
        img_src = response.xpath('//*[@id="image_div"]/p/a/img/@src').extract_first()
        item['src'] = img_src
        print(item['src'])
        yield item


