import scrapy
from scrapy07.request import SeleniumRequest

class BeijingSpider(scrapy.Spider):
    name = 'zhipin'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/job_detail/?query=python&city=101010100&industry=&position=beijing']

    def start_requests(self):
        yield SeleniumRequest(
            url=BeijingSpider.start_urls[0],
            callback=self.parse,
        )

    def parse(self, resp, **kwargs):
        """解析数据"""
        li_list = resp.xpath('//*[@id="main"]/div/div[3]/ul/li')
        for li in li_list:
            href = li.xpath("./div[1]/div[1]/div[1]/div[1]/div[1]/span[1]/a[1]/@href").extract_first()
            name = li.xpath("./div[1]/div[1]/div[1]/div[1]/div[1]/span[1]/a[1]/text()").extract_first()

            print(name, href)
            print(resp.urljoin(href))

            # 发送 SeleniumRequest
            yield SeleniumRequest(
                url=resp.urljoin(href),
                callback=self.parse_detail, # 回调函数
            )

    def parse_detail(self, resp, **kwargs):
        """再次解析数据"""
        print("招聘人", resp.xpath('//*[@id="main"]/div[3]/div/div[2]/div[1]/h2').extract())
