# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request

from zhilianjobs.items import ZhilianjobsItem


class JobsSpider(scrapy.Spider):
    name = 'jobs'
    allowed_domains = ['zhaopin.com']
    start_offset = 0
    page_size = 90
    # start_urls = ['http://www.zhilianzhaopin.com/']
    data_url = "https://fe-api.zhaopin.com/c/i/sou?start={}&pageSize={}&cityId=801&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=Python&kt=3&_v=0.06846830&x-zp-page-request-id=d8030079dc2a49b88c48c87d6a54e9e1-1559181341480-750762&x-zp-client-id=8645595c-0e72-442f-a3a4-8b96a629ee2f"

    def start_requests(self):
        new_url = self.data_url.format(self.start_offset, self.page_size)
        yield Request(url=new_url, callback=self.parse)

    def err_callback(self, *args, **kwargs):
        print("************", args, kwargs)


    def parse(self, response):
        import json
        import time
        time.sleep(0.5)

        jobs_data = json.loads(response.text)
        if jobs_data["code"] != 200:
            return

        reply_jobs = jobs_data["data"]["results"]
        if not reply_jobs:
            return

        for job in reply_jobs:
            try:
                item = ZhilianjobsItem()
                item["work_experience"] = job["workingExp"]["name"]
                item["salary_range"] = job['salary']
                yield item
            except Exception as e:
                print("xxxxxxxxxxxxx", job)

        self.start_offset += len(reply_jobs)
        new_url = self.data_url.format(self.start_offset, self.page_size)
        print("Requesting {}".format(new_url))
        yield Request(url=new_url,
                      callback=self.parse,
                      errback=self.err_callback)



