# -*- coding: utf-8 -*-
import scrapy
from qsbk.items import bsbdjItem

class BsbdjSpider(scrapy.Spider):
    name = 'bsbdj'
    allowed_domains = ['budejie.com/']
    start_urls = ['http://www.budejie.com//']



    def parse(self, response):
        item=bsbdjItem() #实例化 存储对象   
        li=response.xpath('//div[@class="j-r-list"]//li')
        #所有数据列表集合
        listItem=[]
        for key in li:
            head=key.xpath('//div[@class="j-list-user"]//a//img/@src').extract()
            
            item['head']=head[0]
            listItem.append(item)
        return listItem

# if __name__ == '__main__':
#     from scrapy import cmdline
#     args = "scrapy crawl bsbdj".split()
#     cmdline.execute(args)