import scrapy
from scrapy import Selector, Request

# class NewjobSpider(scrapy.Spider):
#     name = 'newJob'
#     allowed_domains = ['zhaopin.com']
#
#     # start_urls = ['http://www.zhaopin.com/']
#
#     def start_requests(self):
#         yield Request(url='http://www.zhaopin.com/')
#         # for page in range():
#
#     def parse(self, response, **kwargs):
#         hot_job_list = Selector(text=response.text).xpath(
#             "//a[@class='zp-jobNavigater__pop--href']/text()").extract()
#         for job in hot_job_list:
#             yield Request(url=f'https://sou.zhaopin.com/?jl=822&kw={job}')
# print("hot_job_list", hot_job_list)
from sipderWorkScrapy.items import JobList


class GetJobList(scrapy.Spider):
    name = 'getJobList'
    allowed_domains = ['www.zhaopin.com']

    start_urls = ['https://www.zhaopin.com/']

    def parse(self, response, **kwargs):
        jobs_list = Selector(text=response.text).xpath(
            "//a[@class='zp-jobNavigater__pop--href']/text()").extract()

        for job in jobs_list:
            job_list = JobList()
            job_list['jobName'] = job
            yield job_list


