import scrapy
from scrapy_redis.spiders import RedisSpider


class QuotesSpider(scrapy.Spider):
    name = "1_lago"

    def start_requests(self):
        urls = [
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for a in range(3, 1000, 1)
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for b in range(1001, 2001, 1)
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for c in range(2002, 3002, 1)
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for d in range(3002, 4002, 1)
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for f in range(4002, 5002, 1)
                #  "https://www.lagou.com/jobs/%s.html" % str(x) + "" for g in range(5002, 6002, 1)
                "https://www.lagou.com/jobs/2465019.html"
            ]
        allowed_domains=['www.lagou.com']
        for url in urls:
            print("``````~~~~~~~~~~~~~~~~~````url",url)
            yield scrapy.Request(url, self.parse)
    def parse(self, response):
        # yield the items
        divs = response.xpath('//div[@class="position-head"]')
        for div in divs:
            company_name = div.xpath('.//div[@class="company"]/text()').extract_first()
            title = div.xpath('.//span[@class="name"]/text()').extract_first()
            salary = div.xpath('.//dd[@class="job_request"]/p/span[1]/text()').extract_first()
            experience = div.xpath('.//dd[@class="job_request"]/p/span[3]/text()').extract_first()
            education= div.xpath('.//dd[@class="job_request"]/p/span[4]/text()').extract_first()
            yield dict(company_name = company_name, title = title,
                    salary = salary, experience = experience, education = education)


        #  follow links
        for url in response.xpath('//a/@href').extract():
            url = response.urljoin(url)
            yield scrapy.Request(url=url, callback=self.parse)



