import scrapy
from boss.items import BossItem

class BossspiderSpider(scrapy.Spider):
    name = 'bossspider'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.zhipin.com/job_detail/?query=gis&city=101200100&industry=&position=']
    url = "https://www.zhipin.com/c101200100/?query=gis&page=%d"
    page_num = 2
    # 回调函数接受item
    def parse_detail(self,response):
        item = response.meta["item"]# 就可以接受传来的参数
        job_desc = response.xpath('/html/body/div[1]/div[2]/div[3]/div/div[2]/div[2]/div[1]/div//text()').extract()
        job_desc = "".join(job_desc)
        print(job_desc)
        item[job_desc]=job_desc
        yield item
    def parse(self, response):

        li_list = response.xpath('/html/body/div[1]/div[3]/div/div[3]/ul//li')
        for li in li_list:
            job_name = li.xpath('.//div/div[1]/div[1]/div/div[1]/span[1]/a/@title').extract_first()
            print(job_name)
            item = BossItem()
            detail_url = "https://www.zhipin.com/" + li.xpath('.//div/div[1]/div[1]/div/div[1]/span[1]/a/@href').extract_first()
            print(detail_url)
            item["job_name"]=job_name
            # 对详情页的的url发起请求获取详细页的页面源码数据
            # 手动请求的发送
            # 请求传参：meta = {} 将该字典 传递给请求对应的回调函数

            yield scrapy.Request(detail_url,callback=self.parse_detail,meta={'item':item})
        if self.page_num<=5:
            new_url = format(self.url % self.page_num)
            self.page_num+=1
            yield scrapy.Request(new_url,callback=self.parse)