import uuid
import scrapy
from lagou.items import JobItem


class LagouspiderSpider(scrapy.Spider):
    name = 'lagouspider'
    allowed_domains = ['www.lagou.com']
    start_urls = [
        'https://www.lagou.com/jobs/list_%s?labelWords=&fromSearch=true&suginput=' % "数据分析"
    ]

    #使用selenium
    def parse(self, response, **kwargs):
        self.logger.debug(response)
        lis = response.xpath('//div[contains(@class, "s_position_list")]/ul/li')
        self.logger.debug(lis)
        for li in lis:
            job_name = li.xpath('./div/div/div/a/h3/text()').extract_first()
            job_position = li.xpath('./div/div/div/a/span/em/text()').extract_first()
            job_money = li.xpath('./div/div/div/div/span/text()').extract_first()
            job_require = li.xpath('./div/div/div/div[@class="li_b_l"]/text()').extract()
            self.logger.debug(job_require)
            # job_time, job_edu = "".join(job_require).replace("\n", "").strip().split(" / ")
            job_require_clean = "".join(job_require).replace("\n", "").strip()
            index = job_require_clean.rfind("/")
            job_time = job_require_clean[:index].strip()
            job_edu = job_require_clean[index + 1:].strip()

            item = JobItem()
            item["job_name"] = job_name
            item["job_position"] = job_position
            item["job_money"] = job_money
            item["job_time"] = job_time
            item["job_edu"] = job_edu

            yield item
        next = response.xpath('//span[@action="next"]/@class').extract_first()
        if next.find("pager_next_disabled") == -1:
            yield scrapy.Request(url=self.start_urls[0] + "******", callback=self.parse)
            #这里不能发请求，只能yield给中间件传递信息，传什么不要紧，只要对面能识别就行