#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/7/18 10:01
# @Author  : 王凯
# @File    : qcwy_jobs.py
# @Project : scrapy_spider
import datetime
import hashlib
import hmac
import json
import multiprocessing
import re
import sys
import time
from pathlib import Path
from queue import Queue
from typing import Any
from urllib.parse import quote

import scrapy
from scrapy.settings import BaseSettings

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.jobs.jobs.items import (
    NetJobDetailItem,
    NetJobCompanyDetailItem,
    NetJobEnterpriseBusinessInfoItem,
)
from apps.jobs.jobs.spiders import run_mul
from utils.tools import compile_js
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB

import subprocess
from functools import partial

subprocess.Popen = partial(subprocess.Popen, encoding="utf-8")
detail_js = """
function get_url(original) {var host = RegExp("http[s]*://(.*?)/").exec(original)[1]; var _0x2f0fb9 = { "iRtvZ": "div", "fKyFL": function (_0x4ac651, _0x2d2b1f) { return _0x4ac651(_0x2d2b1f) }, "QOFII": function (_0xa47fee, _0x5473a7) { return _0xa47fee === _0x5473a7 }, "iOzvL": "duCiQ", "oIbkZ": function (_0x47c552, _0x14d319) { return _0x47c552 != _0x14d319 }, "WESZa": "MOvsy", "doOID": function (_0x3f0d70, _0x4191bf) { return _0x3f0d70 < _0x4191bf }, "OmlhX": "%25", "swqwF": function (_0x47d24b, _0x39718f) { return _0x47d24b + _0x39718f }, "ICAKP": function (_0x59f1fb, _0x547e21) { return _0x59f1fb + _0x547e21 }, "FAptw": function (_0x3ec169, _0x176ba2) { return _0x3ec169(_0x176ba2) }, "bLIMR": function (_0x5df840, _0x3f4fa7) { return _0x5df840(_0x3f4fa7) }, "YuNeF": function (_0x12f9e3, _0xe8c71b) { return _0x12f9e3 != _0xe8c71b }, "humMY": function (_0x1e7e1f, _0x3a23bb) { return _0x1e7e1f == _0x3a23bb }, "dxGKL": function (_0x4af3d1, _0x38946d) { return _0x4af3d1 - _0x38946d }, "NiAUl": function (_0x36bcb2, _0xe9c891) { return _0x36bcb2 + _0xe9c891 }, "nytuW": function (_0x55d0bc, _0x42c195, _0x4a0b6b) { return _0x55d0bc(_0x42c195, _0x4a0b6b) }, "YiOrB": function (_0x5216f6, _0x39103d) { return _0x5216f6 >> _0x39103d }, "ompsq": function (_0x589319, _0x25f1a9) { return _0x589319 & _0x25f1a9 }, "SoGqx": function (_0x5ed5a3, _0x546942) { return _0x5ed5a3 - _0x546942 }, "edfme": function (_0x3ba9a0, _0x56f846) { return _0x3ba9a0 == _0x56f846 }, "BCeqK": "XCHet", "jpFEq": "bBkTv", "AYNNc": "SEcKB", "nTFDs": "ylTCc", "uAMbL": function (_0x122076, _0x37e656) { return _0x122076 << _0x37e656 }, "Shhrx": function (_0x252322, _0x19be51) { return _0x252322(_0x19be51) }, "lzwgv": "NrXkr", "sJRhl": function (_0x4e4cdf, _0x1a1530) { return _0x4e4cdf << _0x1a1530 }, "KggED": "tOOOh", "TQzOd": function (_0x364e72, _0xa83fbf) { return _0x364e72 | _0xa83fbf }, "swGrN": function (_0x58fa46, _0x1e3a51) { return _0x58fa46 !== _0x1e3a51 }, "WTOkX": "pwjFI", "WjObh": "SolBZ", "riQFS": function (_0x443374, _0x467279) { return _0x443374 < _0x467279 }, "LxwcG": function (_0x2008fe, _0x171a43) { return _0x2008fe & _0x171a43 }, "ZzCwR": function (_0x23a287, _0x4616bb) { return _0x23a287 !== _0x4616bb }, "Yhfpo": function (_0x209da2, _0x201d1a) { return _0x209da2 | _0x201d1a }, "oOCUQ": function (_0x54af71, _0x26a573) { return _0x54af71(_0x26a573) }, "vPWUM": "JMWpW", "eRpye": function (_0x243816, _0x2cddfc) { return _0x243816 | _0x2cddfc }, "ErYTS": function (_0x1929e4, _0x2233ae) { return _0x1929e4 & _0x2233ae }, "HMxYp": function (_0x480f2c, _0x424071) { return _0x480f2c === _0x424071 }, "zSaNx": function (_0x176220, _0x1610de) { return _0x176220 >> _0x1610de }, "ZAIyX": function (_0x343d53, _0x2a11a8) { return _0x343d53 + _0x2a11a8 }, "wYIeA": function (_0x4806fc, _0x23d7c7) { return _0x4806fc << _0x23d7c7 }, "rzOeX": "zpYzt", "oKRSs": "kqDdd", "apeUz": "idPrefix_", "OFaZe": "cache_", "CtMGS": function (_0x370a3e, _0x289840) { return _0x370a3e !== _0x289840 }, "UiruY": "qxNiz", "qfKqm": "Owuvn", "MBlLj": function (_0x4c610f, _0x2587a8) { return _0x4c610f(_0x2587a8) }, "EMWnN": function (_0x446a98, _0x49bc01) { return _0x446a98(_0x49bc01) }, "gSZGR": function (_0xb5f24c, _0x21d595) { return _0xb5f24c << _0x21d595 }, "xvxFc": function (_0x38db8a, _0x2cafeb) { return _0x38db8a(_0x2cafeb) }, "NceiW": function (_0x4efca8, _0x1b0371) { return _0x4efca8 << _0x1b0371 }, "PiGWf": function (_0x155a8c, _0x3fa4c3) { return _0x155a8c(_0x3fa4c3) }, "NffVp": function (_0x4b5e64, _0x52e855) { return _0x4b5e64 << _0x52e855 }, "WAzVR": function (_0x48a0dc, _0x319bfa) { return _0x48a0dc(_0x319bfa) }, "XBcoZ": "type__", "CVWOa": "decode__", "uVMSs": function (_0x214ba9, _0x191e01) { return _0x214ba9 + _0x191e01 } }; var _0x56d97c = function (_0x8df421) { var _0x24a251 = { "HflGH": "eEKjo", "lwhEK": "QrDfB", "hNEOQ": function (_0x41ca69, _0x4ca3fd) { return _0x2f0fb9["edfme"](_0x41ca69, _0x4ca3fd) }, "HcKKx": function (_0x10e48e, _0x578886) { return _0x10e48e >> _0x578886 } }; if (_0x2f0fb9["BCeqK"] !== _0x2f0fb9["BCeqK"]) { var _0x343167 = _0x229eef["document"][_0x18eedd]; if (_0x343167 && _0x343167["hasOwnProperty"]("idPrefix_") && _0x343167["hasOwnProperty"]("cache_") && _0x343167["hasOwnProperty"]("nextId_")) { _0x5e911d = 1 } } else { if (_0x8df421 == null) { return "" } var _0x1a190e = String["fromCharCode"]; var _0x3219b6 = "DGi0YA7BemWnQjCl4+bR3f8SKIF9tUz/xhr2oEOgPpac=61ZqwTudLkM5vHyNXsVJ"; var _0x33fb4f = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-$"; var _0x5d342f = {}; var _0xc885ae = 6; var _0x1637ab = function (_0x329955) { if (_0x24a251["HflGH"] === _0x24a251["lwhEK"]) { _0x2be31f = 1 } else { return _0x3219b6["charAt"](_0x329955) } }; if (_0x8df421 == null) { return "" } var _0x2ba560, _0x1a3125, _0x56a845 = {}, _0x568d8a = {}, _0x17ecc2 = "", _0x4b03fb = "", _0x127752 = "", _0x55a847 = 2, _0x48b33e = 3, _0x318c3 = 2, _0x24e447 = [], _0x841ee6 = 0, _0x25adf9 = 0, _0x4afbc9; for (_0x4afbc9 = 0; _0x4afbc9 < _0x8df421["length"]; _0x4afbc9 += 1) { _0x17ecc2 = _0x8df421["charAt"](_0x4afbc9); if (!Object["prototype"]["hasOwnProperty"]["call"](_0x56a845, _0x17ecc2)) { if (_0x2f0fb9["jpFEq"] === "bBkTv") { _0x56a845[_0x17ecc2] = _0x48b33e++; _0x568d8a[_0x17ecc2] = !![] } else { _0x586061["search"] = _0x2f0fb9["nytuW"](_0x138f86, _0x32aa91["search"], { "alichlgref": _0x35fc62["referrer"] }) } } _0x4b03fb = _0x127752 + _0x17ecc2; if (Object["prototype"]["hasOwnProperty"]["call"](_0x56a845, _0x4b03fb)) { _0x127752 = _0x4b03fb } else { if (_0x2f0fb9["AYNNc"] === "TwzUG") { _0x4f3fe7++ } else { if (Object["prototype"]["hasOwnProperty"]["call"](_0x568d8a, _0x127752)) { if (_0x2f0fb9["nTFDs"] !== _0x2f0fb9["nTFDs"]) { _0x3579ce = _0x7a15ac << 1 | _0x596d29 & 1; if (_0x5e99c6 == _0x377b0a - 1) { _0x44fe7a = 0; _0x1ea166["push"](_0x2bf671(_0x57ff55)); _0x5b7cb8 = 0 } else { _0x5ef9d9++ } _0x29a523 = _0x2f0fb9["YiOrB"](_0x1f9864, 1) } else { if (_0x127752["charCodeAt"](0) < 256) { for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x841ee6 << 1; if (_0x25adf9 == 5) { _0x25adf9 = 0; _0x24e447["push"](_0x2f0fb9["bLIMR"](_0x1637ab, _0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } } _0x1a3125 = _0x127752["charCodeAt"](0); for (_0x2ba560 = 0; _0x2ba560 < 8; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["uAMbL"](_0x841ee6, 1) | _0x2f0fb9["ompsq"](_0x1a3125, 1); if (_0x2f0fb9["edfme"](_0x25adf9, _0x2f0fb9["SoGqx"](_0xc885ae, 1))) { _0x25adf9 = 0; _0x24e447["push"](_0x2f0fb9["Shhrx"](_0x1637ab, _0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = _0x1a3125 >> 1 } } else { _0x1a3125 = 1; for (_0x2ba560 = 0; _0x2f0fb9["doOID"](_0x2ba560, _0x318c3); _0x2ba560++) { if ("tGGyM" === _0x2f0fb9["lzwgv"]) { _0x4de593++ } else { _0x841ee6 = _0x841ee6 << 1 | _0x1a3125; if (_0x25adf9 == 5) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { if (_0x2f0fb9["QOFII"]("fQrHu", "fQrHu")) { _0x25adf9++ } else { if (_0x416bb0) { return 1 } else { return 0 } } } _0x1a3125 = 0 } } _0x1a3125 = _0x127752["charCodeAt"](0); for (_0x2ba560 = 0; _0x2ba560 < 16; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["sJRhl"](_0x841ee6, 1) | _0x1a3125 & 1; if (_0x2f0fb9["edfme"](_0x25adf9, _0x2f0fb9["SoGqx"](_0xc885ae, 1))) { _0x25adf9 = 0; _0x24e447["push"](_0x2f0fb9["Shhrx"](_0x1637ab, _0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = _0x1a3125 >> 1 } } _0x55a847--; if (_0x2f0fb9["edfme"](_0x55a847, 0)) { if (_0x2f0fb9["KggED"] === "tOOOh") { _0x55a847 = Math["pow"](2, _0x318c3); _0x318c3++ } else { _0x37b477 = _0x5d3ab9[_0xc17916]; for (_0x4ca415 = 0; _0x32bf09 < _0x9f6d5e; _0x43b89++) { _0x3f5dd9 = _0x525588 << 1 | _0x49d704 & 1; if (_0x41a525 == _0x1a9092 - 1) { _0x529efc = 0; _0x5f434a["push"](_0x3deab8(_0x11b2a6)); _0x30cfcf = 0 } else { _0x10a129++ } _0x187e07 = _0x4ffbd3 >> 1 } } } delete _0x568d8a[_0x127752] } } else { _0x1a3125 = _0x56a845[_0x127752]; for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["TQzOd"](_0x841ee6 << 1, _0x2f0fb9["ompsq"](_0x1a3125, 1)); if (_0x25adf9 == 5) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = _0x1a3125 >> 1 } } _0x55a847--; if (_0x55a847 == 0) { _0x55a847 = Math["pow"](2, _0x318c3); _0x318c3++ } _0x56a845[_0x4b03fb] = _0x48b33e++; _0x127752 = String(_0x17ecc2) } } } if (_0x2f0fb9["swGrN"](_0x127752, "")) { if (Object["prototype"]["hasOwnProperty"]["call"](_0x568d8a, _0x127752)) { if ("pwjFI" !== _0x2f0fb9["WTOkX"]) { _0x4f2452 = 0; _0x4d1bc3["push"](_0xaafa6d(_0x4c906d)); _0x443c56 = 0 } else { if (_0x2f0fb9["doOID"](_0x127752["charCodeAt"](0), 256)) { if (_0x2f0fb9["swGrN"]("pnZzb", _0x2f0fb9["WjObh"])) { for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x841ee6 << 1; if (_0x25adf9 == 5) { if (_0x2f0fb9["QOFII"]("lBGsP", "ojwnB")) { return 1 } else { _0x25adf9 = 0; _0x24e447["push"](_0x2f0fb9["Shhrx"](_0x1637ab, _0x841ee6)); _0x841ee6 = 0 } } else { _0x25adf9++ } } _0x1a3125 = _0x127752["charCodeAt"](0); for (_0x2ba560 = 0; _0x2f0fb9["riQFS"](_0x2ba560, 8); _0x2ba560++) { _0x841ee6 = _0x841ee6 << 1 | _0x2f0fb9["LxwcG"](_0x1a3125, 1); if (_0x2f0fb9["edfme"](_0x25adf9, _0x2f0fb9["SoGqx"](_0xc885ae, 1))) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = _0x2f0fb9["YiOrB"](_0x1a3125, 1) } } else { return 0 } } else { if (_0x2f0fb9["ZzCwR"]("CCOte", "IemYU")) { _0x1a3125 = 1; for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x841ee6 << 1 | _0x1a3125; if (_0x25adf9 == _0x2f0fb9["SoGqx"](_0xc885ae, 1)) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = 0 } _0x1a3125 = _0x127752["charCodeAt"](0); for (_0x2ba560 = 0; _0x2ba560 < 16; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["Yhfpo"](_0x841ee6 << 1, _0x1a3125 & 1); if (_0x25adf9 == 5) { _0x25adf9 = 0; _0x24e447["push"](_0x2f0fb9["oOCUQ"](_0x1637ab, _0x841ee6)); _0x841ee6 = 0 } else { _0x25adf9++ } _0x1a3125 = _0x1a3125 >> 1 } } else { _0x37f5c3 = _0x20ba01 << 1 | _0x52f125; if (_0x422e93 == _0x306c4f - 1) { _0x2b59f7 = 0; _0x5c18ea["push"](_0x17441d(_0x1d334e)); _0x503f27 = 0 } else { _0x345dd9++ } _0x53381c = 0 } } _0x55a847--; if (_0x55a847 == 0) { _0x55a847 = Math["pow"](2, _0x318c3); _0x318c3++ } delete _0x568d8a[_0x127752] } } else { if (_0x2f0fb9["ZzCwR"]("XhWEG", "JaiOO")) { _0x1a3125 = _0x56a845[_0x127752]; for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["Yhfpo"](_0x841ee6 << 1, _0x1a3125 & 1); if (_0x2f0fb9["edfme"](_0x25adf9, _0x2f0fb9["SoGqx"](_0xc885ae, 1))) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { if ("AHqWc" === _0x2f0fb9["vPWUM"]) { _0x545b2e = _0x4743e << 1 | _0x2f0fb9["ompsq"](_0x449caa, 1); if (_0x39e7a0 == _0x2f0fb9["SoGqx"](_0x5a30cb, 1)) { _0x50de0e = 0; _0x25ac9d["push"](_0x4df853(_0x157f91)); _0x8b84ed = 0 } else { _0x3b77c1++ } _0x4636d9 = _0x4147b3 >> 1 } else { _0x25adf9++ } } _0x1a3125 = _0x2f0fb9["YiOrB"](_0x1a3125, 1) } } else { return _0x15f98b } } _0x55a847--; if (_0x2f0fb9["edfme"](_0x55a847, 0)) { _0x55a847 = Math["pow"](2, _0x318c3); _0x318c3++ } } _0x1a3125 = 2; for (_0x2ba560 = 0; _0x2ba560 < _0x318c3; _0x2ba560++) { _0x841ee6 = _0x2f0fb9["eRpye"](_0x841ee6 << 1, _0x2f0fb9["ErYTS"](_0x1a3125, 1)); if (_0x25adf9 == 5) { _0x25adf9 = 0; _0x24e447["push"](_0x1637ab(_0x841ee6)); _0x841ee6 = 0 } else { if (_0x2f0fb9["HMxYp"]("yEKjY", "LipPO")) { _0x5a1b13 = 0; _0x26db1e["push"](_0x32e446(_0x23bb46)); _0x19eaec = 0 } else { _0x25adf9++ } } _0x1a3125 = _0x2f0fb9["zSaNx"](_0x1a3125, 1) } while (!![]) { _0x841ee6 = _0x841ee6 << 1; if (_0x25adf9 == 5) { _0x24e447["push"](_0x2f0fb9["oOCUQ"](_0x1637ab, _0x841ee6)); break } else { _0x25adf9++ } } return _0x24e447["join"]("") } }; var _0x420dc7 = 0; for (var _0x465be6 = 0; _0x465be6 < host["length"]; _0x465be6++) { _0x420dc7 = _0x420dc7 + host[_0x465be6]["charCodeAt"]() } var _0x3a7bdd = [_0x2f0fb9["XBcoZ"], "refer__", "ipcity__", "md5__", _0x2f0fb9["CVWOa"], "encode__", "time__", "timestamp__", "type__"]; var _0x3e621b = _0x3a7bdd[_0x420dc7 % _0x3a7bdd["length"]] + _0x420dc7 % 10000; var _0x318558 = 0; var _0x465be6, _0x44becc; for (_0x465be6 = 0; _0x465be6 < original["length"]; _0x465be6++) { _0x44becc = original["charCodeAt"](_0x465be6); _0x318558 = (_0x318558 << 7) - _0x318558 + 398 + _0x44becc; _0x318558 |= 0 } var _0x218d3d = 0; var _0x30f62c = _0x56d97c(_0x318558 + "|" + _0x218d3d + "|" + (new Date()["getTime"]() + "")); let new_original = `${original}?${_0x3e621b}=${_0x30f62c}`; return new_original }
// console.log(get_url("https://jobs.51job.com/all/coBmYAbFE3ADYAZ1MwBWI.html"))
"""


def get_real_url(original):
    return compile_js(detail_js)("get_url", original)


class RosterQCWYSpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:tyc:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }
    source = "前程无忧"
    cupid_sign_key = "abfc8f9dcf8c3f3d8aa294ac5f2cf2cc7767e5592590f39c3f503271dd68562b"
    client_key = "jmhbz4xv1p8tw9z6umt6cw93bri96iuo"
    hostname = 'https://we.51job.com'
    headers = {
        "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
        "Accept-Encoding": "gzip,deflate,br",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "From-Domain": "51job_web",
        "Origin": hostname,
        "Pragma": "no-cache",
        "Referer": hostname,
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-site",
    }

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ":id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id, id from "
                f"roster_jobs where id > {last_task_id} limit {batch}"
            )
        else:
            sql = f"SELECT company_name as query_key, used_name, taxpayer_id, id from roster_jobs limit {batch}"
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})
            last_task_id = max([i["id"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            time.sleep(60 * 5)
            self.add_task()
            # self.logger.info(f"数据加载完成,没有数据,准备下一轮")
            # self.server.set(record_task_table, "0")
            # self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        company_name = formatted_data["query_key"]
        timestamp = round(time.time())
        route = (
            f"/api/job/search-pc?api_key=51job&timestamp={timestamp}"
            f"&keyword={quote(company_name)}&searchType=2&function=&industry="
            "&jobArea=000000&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType="
            "&companySize=&jobType=&issueDate=&sortType=0&pageNum=1&requestId=&pageSize=20&source=1&accountId=&"
            "pageCode=sou%7Csou%7Csoulb"
        )
        headers_sign = {
            "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), route.encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
            **self.headers,
        }

        url = self.hostname + route
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name}, headers=headers_sign)

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}{self.source}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_jobs_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "source": self.source,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    counts = self.to_db.find(
                        f"""select count(*) as count
                                from net_job_company_detail
                                         left join net_job_detail on net_job_company_detail.company_id = net_job_detail.company_id
                                where company_name = '{query_key}'
                                  and net_job_company_detail.source = '{self.source}'
                                  and net_job_detail.state = 1""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_jobs_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class THSQueueQCWYSpider(RosterQCWYSpider):
    pg_db = None

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()

    def add_task(self):
        record_task_table = self.redis_key + ":ths:id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where seq > {last_task_id} "
                f" and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where seq <  {batch}"
                f" and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            last_task_id = max([i["seq"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            if self.count_size(self.redis_key):
                return
            self.server.set(record_task_table, "0")
            self.add_task()


class QCWYSSpider(RosterQCWYSpider):
    name = "qcwy_jobs"
    source = "前程无忧"
    local_task_queue = Queue(maxsize=1)
    auto_next = True
    url = 'www.51job.com'
    cupid_sign_key = "abfc8f9dcf8c3f3d8aa294ac5f2cf2cc7767e5592590f39c3f503271dd68562b"
    client_key = "jmhbz4xv1p8tw9z6umt6cw93bri96iuo"
    hostname = 'https://we.51job.com'
    headers = {
        "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
        "Accept-Encoding": "gzip,deflate,br",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "From-Domain": "51job_web",
        "Origin": hostname,
        "Pragma": "no-cache",
        "Referer": hostname,
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-site",
    }

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [302, 400, 404, 500, 200, 202, 502],
                "RETRY_TIMES": 40,
                "RETRY_HTTP_CODES": [],
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self):
        if self.local_task_queue.full():
            obj = self.local_task_queue.get_nowait()
            company_name = obj.get("query_key")
        else:
            company_name = "东莞市五方园信息科技有限公司"

        route = (
            f"/api/job/search-pc?api_key=51job&timestamp={round(time.time())}"
            f"&keyword={quote(company_name)}&searchType=2&function=&industry="
            "&jobArea=000000&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType="
            "&companySize=&jobType=&issueDate=&sortType=0&pageNum=1&requestId=&pageSize=20&source=1&accountId=&"
            "pageCode=sou%7Csou%7Csoulb"
        )
        headers_sign = {
            "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), route.encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
            **self.headers,
        }

        url = self.hostname + route
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name}, headers=headers_sign)

    def parse(self, response, **kwargs: Any) -> Any:
        query_key = kwargs.get("company_name")
        data = response.json().get("resultbody")
        jobs = data["job"].get("items", [])
        company_mapping = {i["fullCompanyName"]: i["companyHref"] for i in jobs}
        result_fileter = {
            company_name: href for company_name, href in company_mapping.items() if query_key in company_name
        }
        if result_fileter:
            company_name = list(result_fileter.keys())[0]
            company_url = result_fileter.get(company_name)
            company_url = get_real_url(company_url)
            self.logger.info(f"query_key {query_key} 查询到企业 {company_name} {company_url}")
            yield scrapy.Request(
                company_url,
                callback=self.parse_company,
                cb_kwargs={"company_name": company_name},
                headers=self.headers
            )
        else:
            self.logger.error(f"query_key {query_key} 未查询匹配到企业")

    def parse_company(self, response, **kwargs: Any) -> Any:
        coid = re.findall(".*all/(.*?).html", response.url)[0]
        timestamp = int(time.time())
        route = f"/open/noauth/company-info/pc-info?api_key=51job&timestamp={timestamp}&encryCompanyId={coid.lstrip('co')}"
        src_url = "https://cupid.51job.com" + route
        url = get_real_url(src_url)
        endswith = url.replace(src_url, '').replace("?", "&")
        url = src_url + endswith
        headers = {
            "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), route.encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
        }
        yield scrapy.Request(
            url,
            headers={**self.headers, **headers},
            callback=self.parse_company_detail,
            cb_kwargs=kwargs,
        )

    def parse_company_detail(self, response, **kwargs: Any) -> Any:
        job_num = kwargs.get("job_num")
        res = response.json()["resultbody"]
        data = res['coinfo']
        company_item = NetJobCompanyDetailItem()
        company_item['company_id'] = data['ctmId']
        company_item['source'] = self.source
        company_item['company_name'] = data.get("coname")
        company_item['scale'] = data.get("cosize")
        company_item['company_desc'] = data.get("coinfo")
        company_item['job_num'] = job_num
        company_item['address_list'] = [data.get("caddr")] if data.get("caddr") else None
        company_item['industry'] = data.get("indtype2") or data.get("indtype1")
        company_item['nature'] = data.get("cotype")
        company_item['credit_label'] = data.get("hrawards")
        company_item['url'] = f"https://jobs.51job.com/all/co{data['encryCompanyId']}.html"
        yield company_item

        business_item = NetJobEnterpriseBusinessInfoItem()
        business_item['source'] = self.source
        business_item['company_id'] = data['ctmId']
        license = res.get("license") or {}
        if license:
            business_item['company_name'] = license.get("businessName") or data.get("coname")
            business_item['address'] = data.get("caddr")
            business_item['industry'] = data.get("indtype2") or data.get("indtype1")
            business_item['start_time'] = license.get("startAt") or ""
            business_item['regist_capi'] = license.get("registCapi")
            business_item['oper_name'] = license.get("operName") or ""
            business_item['city'] = data.get("areaString")
            yield business_item

        kwargs.update({"encryCompanyId": data['encryCompanyId'], 'coid': data['coid'], "company_id": data['ctmId']})

        data = {
            "pageNum": 1,
            "pageSize": 20,
            "coId": str(data['coid']),
            "jobArea": "",
            "salaryType": "",
            "function": "",
        }
        route = f"/open/noauth/jobs/company?api_key=51job&timestamp={round(time.time())}"
        src_url = "https://cupid.51job.com" + route
        url = get_real_url(src_url)
        endswith = url.replace(src_url, '').replace("?", "&")
        url = src_url + endswith
        body = json.dumps(data)
        yield scrapy.FormRequest(
            url,
            body=body,
            method="POST",
            headers={
                **self.headers,
                **{
                    "Content-Type": "application/json",
                    "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), f"{route}{body}".encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
                },
            },
            callback=self.parse_job_list_search,
            cb_kwargs=kwargs,
        )

    def parse_job_list_search(self, response, **kwargs: Any) -> Any:
        coid = kwargs.get("coid")
        company_id = kwargs.get("company_id")
        company_name = kwargs.get("company_name")
        res = response.json()
        all_cate_dict = (res.get("resultbody") or {}).get("functionFilter", {}).get("items") or []
        all_cate_dict = [
            {
                "code": item.get("code"),
                "chinese": item.get("chinese"),
            }
            for item in all_cate_dict
        ]
        self.to_db.execute(f'UPDATE net_job_detail SET state = -1 WHERE company_id = "{company_id}" and source = "{self.source}"')
        self.logger.info(f"{company_name} {company_id} 职位列表获取成功 重置数据库职位状态")
        for item in all_cate_dict:
            data = {
                "pageNum": 1,
                "pageSize": 20,
                "coId": str(coid),
                "jobArea": "",
                "salaryType": "",
                "function": item['code'],
            }
            route = f"/open/noauth/jobs/company?api_key=51job&timestamp={round(time.time())}"
            src_url = "https://cupid.51job.com" + route
            url = get_real_url(src_url)
            endswith = url.replace(src_url, '').replace("?", "&")
            url = src_url + endswith
            body = json.dumps(data)
            kwargs['functional_type'] = item['chinese']
            kwargs['functional_type_code'] = item['code']
            yield scrapy.FormRequest(
                url,
                body=body,
                method="POST",
                headers={
                    **self.headers,
                    **{
                        "Content-Type": "application/json",
                        "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), f"{route}{body}".encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
                    },
                },
                callback=self.gen_job_list,
                cb_kwargs=kwargs,
            )

    def gen_job_list(self, response, **kwargs: Any) -> Any:
        coid = kwargs.get("coid")
        res = response.json()
        functional_type_code = kwargs.get("functional_type_code")
        page_total = res['resultbody'].get('totalCount') // 20
        for num in range(2, page_total + 1):
            data = {
                "pageNum": num,
                "pageSize": 20,
                "coId": str(coid),
                "jobArea": "",
                "salaryType": "",
                "function": functional_type_code,
            }
            route = f"/open/noauth/jobs/company?api_key=51job&timestamp={round(time.time())}"
            src_url = "https://cupid.51job.com" + route
            url = get_real_url(src_url)
            endswith = url.replace(src_url, '').replace("?", "&")
            url = src_url + endswith
            body = json.dumps(data)
            kwargs['functional_type'] = data['function']
            yield scrapy.FormRequest(
                url,
                body=body,
                method="POST",
                headers={
                    **self.headers,
                    **{
                        "Content-Type": "application/json",
                        "Sign": hmac.new(self.cupid_sign_key.encode("utf-8"), f"{route}{body}".encode("utf-8"), digestmod=hashlib.sha256).hexdigest(),
                    },
                },
                callback=self.parse_job_list,
                cb_kwargs=kwargs,
            )
        yield from self.parse_job_list(response, **kwargs)

    def parse_job_list(self, response, **kwargs: Any) -> Any:
        functional_type = kwargs.get("functional_type")
        company_id = kwargs.get("company_id")
        res = response.json()["resultbody"]
        datas = res.get("items", [])
        for data in datas:
            item = NetJobDetailItem()
            item["source"] = self.source
            item["name"] = data.get("jobName")
            item["job_id"] = data.get("jobId")
            item["company_id"] = company_id
            item["experience"] = data.get("workYearString")
            item["degree"] = data.get("degreeString")
            item["job_area"] = data.get("jobAreaString")
            item["salary"] = data.get("provideSalaryString")
            item["publish_time"] = data.get("issueDateString")
            item["evaluation_tags"] = data.get("jobTagsForOrder")
            item["job_desc"] = data.get("jobDescribe") or ""
            item["functional_type"] = functional_type
            item["keywords"] = data.get("jobTags")
            item["job_address"] = data.get("jobAreaString")
            item["url"] = f"https://jobs.51job.com/wuxi-wxxq/{item.job_id}.html"
            item["hr_name"] = data.get("hrName")
            item["hr_position"] = data.get("hrPosition")
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl qcwy_jobs".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
