import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from quanzhanPro.items import QuanzhanproItem

class BizhiSpider(CrawlSpider):
    name = 'bizhi'
    # allowed_domains = ['www.xxxx.com']
    start_urls = ['http://www.netbian.com/weimei/']
    start_link = LinkExtractor(allow='index_\d+?.htm')
    # start_links = start_link.extract_links(response)
    detail_link = LinkExtractor(allow='desk.+?\.htm')
    rules = (
        Rule(start_link, callback='parse_item', follow=False),
        Rule(detail_link,callback='detail_parse',follow=True)
    )
    #页码解析函数
    def parse_item(self, response):
        print(response)
        # pass
        # li_list = response.xpath('//*[@id="main"]/div[2]/ul/li')
        # item = QuanzhanproItem()
        # for li in li_list:
        #     img_name = li.xpath('./a/@title').extract_first()
        #     item['img_name'] = img_name

    def detail_parse(self,response):
        print(response)
        src = response.xpath('//*[@id="main"]/div[2]/div/p/a/img/@src').extract_first()
        print(src)
        item = QuanzhanproItem()
        item['src'] = src
        yield item

