import logging
import time

import scrapy
import json

from spider_job.items import SpiderJobItem


class Job51Spider(scrapy.Spider):
    name = 'job51'
    allowed_domains = ['51job.com']
    # start_urls = ["https://search.51job.com/list/000000,000000,0000,00,9,99,重庆 大数据开发,2,1.html"]

    # 占位符{0}为检索关键字,{1}为页码
    urls_start = "https://search.51job.com/list/000000,000000,0000,00,9,99,{0},2,{1}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=&u_atoken=e0bf3424-2537-4b5c-a4c2-697e84199a29&u_asession=017VHdx9--fZySGbZXNxClQoejSG-y21Qz3kiiuf-XKmV0WQMLOguzTcWieQ62j_TgX0KNBwm7Lovlpxjd_P_q4JsKWYrT3W_NKPr8w6oU7K-ciQDzOVsGJ5aZBhlCnKZTAJD9xkkjypouMKwP8bE0AWBkFo3NEHBv0PZUm6pbxQU&u_asig=05C3Y5rfKfZP8Rhggch0Gw0DX9ajQk72CcrfCc78PLc6ugAUn5Bk310XpWCFVuJSDQTdHunh32PGjrX9VBqAg-C4Y5OPX13PiypHWiMmzB4kCteImY9boSf4Vpe8Nqy9NpCCGylST1jeAmPbwZ0bJHW-QZ1vLsJfWjl2tlTFGXt2r9JS7q8ZD7Xtz2Ly-b0kmuyAKRFSVJkkdwVUnyHAIJzaGJ1X5fonZ-Urp9RAA6l4TuiwfRhSzOALCatU-Fjc36xLcQCv2QbD8lw_Tf46jchO3h9VXwMyh6PgyDIVSG1W_6hJTjAGROBGT0lfHZSU-eosD3FwyVLlW2UoW0lJBzNXz66etzwwgQuCh9WqhxmJFzRyfOYvesd_-nQLF5oIpemWspDxyAEEo4kbsryBKb9Q&u_aref=bppaE44tlopRgMTEHeJ0zFP9I5w%3D"

    keyword = "重庆 大数据开发"

    # 覆盖 start_requests 方法，发送请求传递 meta 参数
    def start_requests(self):
        url = str.format(self.urls_start, self.keyword, 1)  # 组合请求 url 地址
        yield scrapy.Request(url, meta={"keyword": self.keyword}, callback=self.parse, dont_filter=True)  # 发送请求时传递检索关键字

    # 解析列表页
    def parse(self, response):
        js = response.xpath('//script[contains(@type,"text/javascript") and not(@src)]').extract_first()
        r0 = js.split("__SEARCH_RESULT__")[1]
        r1 = r0[3:-9]
        d = json.loads(r1)  # 把json格式字符串转换成python对象
        jobs = d.get("engine_jds")
        for job in jobs:
            itemdata = SpiderJobItem()
            itemdata["title"] = job.get("job_name")
            itemdata["company"] = job.get("company_name")
            itemdata["salary"] = job.get("providesalary_text")
            itemdata["address"] = job.get("workarea_text")
            itemdata["post"] = job.get("jobwelf")
            itemdata["experience"] = str(job.get("attribute_text"))
            # itemdata["message"] = None

            yield itemdata

        current_page = int(d.get('curr_page'))  # 当前页码
        if current_page == 1:  # 当前页为首页时获取一次最大页码
            total_page = int(d.get('total_page'))

        for i in range(2, total_page + 1):
            keyword = response.meta["keyword"]
            url = self.urls_start.format(keyword, i)
            yield scrapy.Request(url=url, meta={"keyword": keyword}, callback=self.parse, dont_filter=True)
