import scrapy
from liepin.items import LiepinItem

class ShellsSpider(scrapy.Spider):
    name = 'shells'
    allowed_domains = ['liepin.com']
    start_urls = ['https://www.liepin.com/zhaopin']

    def parse(self, response):
        #  pass
        for row in response.xpath("//div[@class='sojob-result ']/ul[@class='sojob-list']/li/div"):
            item = LiepinItem()
            # item["job_href"]=row.xpath("div[@class='job-info']/h3/a/href()").get()   #url
            item["p_name"] = row.xpath("div[@class='job-info']/h3/a/text()").get().strip()  # 岗位名称
            item["p_address"] = row.xpath("div[@class='job-info']/p/a/text()").get()  # 地址
            item["p_salary"] = row.xpath("div[@class='job-info']/p/span[@class='text-warning']/text()").get()  # 薪水
            item["p_edu"] = row.xpath("div[@class='job-info']/p/span[@class='edu']/text()").get()  # 学历
            item["p_time"] = row.xpath("div[@class='job-info']/p/span[3]/text()").get()  # 经历
            item["p_company"] = row.xpath("//div[@class='company-info nohover']/p/a/text()").get()  # 公司
            item["p_additional"]=row.xpath("//div[@class='company-info nohover']/p[3]/span/text()").get()
            print(item)
            yield item
