#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/7/17 09:29
# @Author  : 王凯
# @File    : liepin_jobs.py
# @Project : scrapy_spider
import datetime
import hashlib
import json
import multiprocessing
import re
import sys
import time
from pathlib import Path
from queue import Queue
from typing import Any
from urllib import parse

import scrapy
from scrapy.http import Response
from scrapy.settings import BaseSettings

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.jobs.jobs.items import (
    NetJobDetailItem,
    NetJobCompanyDetailItem,
    NetJobEnterpriseBusinessInfoItem,
)
from apps.jobs.jobs.spiders import CustomException, run_mul
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB
from utils.demjson import decode

import subprocess
from functools import partial

subprocess.Popen = partial(subprocess.Popen, encoding="utf-8")


class RosterLiePinSpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:tyc:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }
    source = "猎聘"

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ":id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id, id from "
                f"roster_jobs where id > {last_task_id} limit {batch}"
            )
        else:
            sql = f"SELECT company_name as query_key, used_name, taxpayer_id, id from roster_jobs limit {batch}"
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})
            last_task_id = max([i["id"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            time.sleep(60 * 5)
            self.add_task()
            # self.logger.info("数据加载完成,没有数据,准备下一轮")
            # self.server.set(record_task_table, "0")
            # self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        company_name = formatted_data["query_key"]
        url = f"https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-suggest-list?keyword={company_name}"
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name}, dont_filter=True)

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}{self.source}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_jobs_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "source": self.source,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    counts = self.to_db.find(
                        f"""select count(*) as count
                                from net_job_company_detail
                                         left join net_job_detail on net_job_company_detail.company_id = net_job_detail.company_id
                                where company_name = '{query_key}'
                                  and net_job_company_detail.source = '{self.source}'
                                  and net_job_detail.state = 1""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_jobs_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class THSQueueLiePinSpider(RosterLiePinSpider):
    pg_db = None

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()

    def add_task(self):
        record_task_table = self.redis_key + ":ths:id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where seq > {last_task_id} "
                f" and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where seq <  {batch}"
                f" and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            last_task_id = max([i["seq"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            if self.count_size(self.redis_key):
                return
            self.server.set(record_task_table, "0")
            self.add_task()


class LiePinSpider(RosterLiePinSpider):
    name = "liepin_jobs"
    source = "猎聘"
    local_task_queue = Queue(maxsize=1)
    auto_next = True
    use_detail = True

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [302, 400, 404, 500, 200, 202, 502],
                "RETRY_TIMES": 40,
                "RETRY_HTTP_CODES": [],
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    # def start_requests(self):
    #     if self.local_task_queue.full():
    #         obj = self.local_task_queue.get_nowait()
    #         company_name = obj.get("query_key")
    #     else:
    #         company_name = "香格里拉藏龙生物开发股份有限公司"
    #         company_name = "杭州微风企科技有限公司"
    #     url = f"https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-suggest-list?keyword={company_name}"
    #     yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name})

    def parse(self, response: Response, **kwargs: Any) -> Any:
        company_name = kwargs.get("company_name")
        data = response.json()
        ecomp_id = data.get("data", {}).get("suggestComp4KeyWord", {}).get("ecompId")
        if ecomp_id:
            self.logger.info(f"查询到数据 {company_name} {ecomp_id}")
            company_url = f"https://www.liepin.com/company/{ecomp_id}/"
            yield scrapy.Request(
                url=company_url,
                callback=self.parse_company_info,
                cb_kwargs=dict(ecomp_id=ecomp_id, company_name=company_name),
            )
        else:
            if data.get("flag") == 0:
                raise CustomException(f"搜索 {company_name} 错误")
            if data.get("data", {}).get("suggestWords"):
                company_name = data.get("data", {}).get("suggestWords")[0]
                url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
                params = {
                    "data": {
                        "mainSearchPcConditionForm": {
                            "city": "",
                            "dq": "",
                            "pubTime": "",
                            "currentPage": 0,
                            "pageSize": 40,
                            "key": f"{company_name}",
                            "suggestTag": "",
                            "workYearCode": "",
                            "compId": "",
                            "compName": "",
                            "compTag": "",
                            "industry": "",
                            "salary": "",
                            "jobKind": "",
                            "compScale": "",
                            "compKind": "",
                            "compStage": "",
                            "eduLevel": "",
                            "otherCity": ""
                        },
                        "passThroughForm": {
                        }
                    }
                }
                yield scrapy.FormRequest(url, method='POST', body=json.dumps(params, ensure_ascii=False), headers={
                    "Content-Type": "application/json;charset=UTF-8",
                }, cb_kwargs=dict(company_name=company_name), callback=self.parse_company_search_info)
            else:
                self.logger.error(f"未找到数据 {company_name} {data}")

    def parse_company_search_info(self, response: Response, **kwargs: Any) -> Any:
        company_name = kwargs.get("company_name")
        res = response.json()
        datas = res.get("data", {}).get("data", {}).get("jobCardList", [])
        if datas:
            for i in datas:
                if i.get("comp", {}).get("compName") == company_name:
                    ecomp_id = i.get("comp", {}).get("compId")
                    self.logger.info(f"查询到数据 {ecomp_id} {company_name}")
                    company_url = f"https://www.liepin.com/company/{ecomp_id}/"
                    yield scrapy.Request(
                        url=company_url,
                        callback=self.parse_company_info,
                        cb_kwargs=dict(ecomp_id=ecomp_id, company_name=company_name),
                    )
                    break
        else:
            self.logger.error(f"未找到数据 {company_name} {res}")

    def parse_company_info(self, response: Response, **kwargs: Any) -> Any:
        src_company_name = kwargs.get("company_name")
        ecomp_id = kwargs.get("ecomp_id")
        company_name = response.xpath('//span[@data-selector="company-name"]/text()').get()
        industry = financing = enterprise_scale = None
        name_right_p = response.xpath('//div[@class="name-right"]/p/text()').getall()
        name_right_p_list = [i.strip() for i in name_right_p]
        name_right_p_list_2 = name_right_p_list.copy()
        for name_right in name_right_p_list:
            if re.search(r"融资|[A-D]轮|上市", name_right):
                financing = name_right
                name_right_p_list_2.remove(name_right)
            elif re.search(r"\d*人", name_right):
                enterprise_scale = name_right
                name_right_p_list_2.remove(name_right)

        industry = "-".join(name_right_p_list_2)

        top_list = response.xpath("//div[@class='item-title-box']/span/text()").getall()
        if top_list:
            top_str = "\n".join(top_list)  # 企业top榜单
        else:
            top_str = None

        working_time = response.xpath('//div[@class="time"]/text()').get()  # 工作时间
        working_time_type = response.xpath('//div[@class="time-type"]/span/text()').get()  # 工作时间类型

        company_labels_list = list()  # 企业标签
        tags_item_list = response.xpath('//div[@class="tags-container"]/div[contains(@class, "tags-item")]')
        for tags_item in tags_item_list:
            tags_dict = dict()
            span_text = tags_item.xpath(".//span/text()").get()
            tags_item_tips = tags_item.xpath('./div[@class="tags-item-tips"]/text()').get()
            if span_text:
                tags_dict["label"] = span_text
            if tags_item_tips:
                if tags_item_tips == "工作时间和福利信息由企业提供，每个岗位可能实际情况略有不同，具体内容可与企业招聘方确认":
                    pass
                else:
                    tags_dict["explain"] = tags_item_tips
            company_labels_list.append(tags_dict)
        describer = response.xpath('//div[@class="inner-text"]/text()').get()  # 企业介绍

        top_manager_list = list()  # 企业高管信息
        top_manager_tags = response.xpath('//div[contains(@id,"company-manage-text")]/h3/..')
        for top_manager_tag in top_manager_tags:
            top_manager_dict = dict()
            top_manager_name = top_manager_tag.xpath("./h3/text()").get()  # 企业高管名称
            top_manager_name = top_manager_name.strip()
            if top_manager_name:
                top_manager_dict["name"] = top_manager_name
            top_manager_position = top_manager_tag.xpath("./h3/span/text()").get()  # 企业高管职位
            if top_manager_position:
                top_manager_dict["position"] = top_manager_position
            top_manager_introduce = top_manager_tag.xpath("./p/text()").get()  # 企业高管介绍
            if top_manager_introduce:
                top_manager_dict["introduce"] = top_manager_introduce
            top_manager_list.append(top_manager_dict)

        config_list = re.findall(r"<script>\s*var\s*\$CONFIG\s*=\s*({.*?})\s*</script>", response.text, flags=re.S)
        if config_list:
            config_json = decode(config_list[0])
            comp_product_list_url_encode = config_json.get("compProductList")
            comp_product_list_url_decode = parse.unquote(comp_product_list_url_encode)
            comp_product_list = json.loads(comp_product_list_url_decode)  # 企业的产品说明

            address_url_encode = config_json.get("address")
            address = parse.unquote(address_url_encode)  # 公司地址

        else:
            comp_product_list = None
            address = None

        contact_address = company_mail = company_tel = company_web = "未公布"
        company_contact_content_list = response.xpath('//dl[@class="company-contact-item"]')
        if company_contact_content_list:
            for company_contact_content in company_contact_content_list:
                name_box_1 = company_contact_content.xpath('./dd/p[@class="name-box"]/text()').get()
                name_box = name_box_1.strip()
                p_text_1 = company_contact_content.xpath('./dd/p[@class="text"]/text()').get()
                p_text = p_text_1.strip()
                if p_text:
                    if name_box == "联系地址":
                        contact_address = p_text  # 企业联系地址
                    elif name_box == "企业邮箱":
                        company_mail = p_text  # 企业邮箱
                    elif name_box == "公司电话":
                        company_tel = p_text  # 公司电话
                    elif name_box == "官网":
                        company_web = p_text  # 企业官网

        # business_info_dict = {}  # 工商信息
        full_name_of_company = date_of_establishment = registered_capital = corporate_representative = None
        registered_address = taxpayer_id = industry_gs = location_area = registration_authority = None
        type_of_enterprise = management_forms = operating_scope = operating_period = None
        business_register_comp_list = response.xpath('//div[@class="business-register-content-item"]')
        if business_register_comp_list:
            for business_register_comp in business_register_comp_list:
                business_name_1 = business_register_comp.xpath('./p[@class="name-box"]')
                business_name_2 = business_name_1.xpath("string()").get()
                business_name = business_name_2.strip()
                business_text_1 = business_register_comp.xpath('./p[@class="text"]/text()').get()
                business_text = business_text_1.strip()
                if business_text:
                    if business_name == "企业全称":
                        full_name_of_company = business_text
                    elif business_name == "成立时间":
                        date_of_establishment = business_text
                    elif business_name == "注册资本":
                        registered_capital = business_text
                    elif business_name == "法人代表":
                        corporate_representative = business_text
                    elif business_name == "注册地址":
                        registered_address = business_text
                    elif business_name == "统一信用代码":
                        taxpayer_id = business_text
                    elif business_name == "所属行业":
                        industry_gs = business_text
                    elif business_name == "所在地":
                        location_area = business_text
                    elif business_name == "登记机关":
                        registration_authority = business_text
                    elif business_name == "营业期限":
                        operating_period = business_text
                    elif business_name == "企业类型":
                        type_of_enterprise = business_text
                    elif business_name == "经营状态":
                        management_forms = business_text
                    elif business_name == "经营范围":
                        operating_scope = business_text

        #  企业融资数据
        financing_info = list()
        financing_description = financing_amount = None
        financing_list = response.xpath('//div[@class="swiper-wrapper"]/div/div[@class="financing-step"]')
        for financing_ in financing_list:
            financing_dict = dict()
            month = financing_.xpath('./div[@class="date-box"]/span[@class="month"]/text()').get()
            year = financing_.xpath('./div[@class="date-box"]/span[@class="year"]/text()').get()
            financing_time = f"{year}-{month}"
            text_box_str = financing_.xpath('./div[@class="text-box"]/text()').get()
            if not text_box_str:
                continue
            text_box_list_ = text_box_str.split("\n")
            text_box_list = [i.strip() for i in text_box_list_ if i.strip()]

            if len(text_box_list) == 2:
                financing_description = text_box_list[0]
                financing_amount = text_box_list[1]

            investment_organization_list_1 = financing_.xpath('./div[@class="agency-box"]//text()').getall()

            investment_organization_list_2 = [i.replace("投资机构：", "") for i in investment_organization_list_1]
            investment_organization_list = [i.strip() for i in investment_organization_list_2 if i != "/"]

            if month and year:
                financing_dict["融资年月"] = financing_time
            if financing_description:
                financing_dict["融资描述"] = financing_description
            if financing_amount:
                financing_dict["融资金额"] = financing_amount
            if investment_organization_list:
                financing_dict["投资机构"] = investment_organization_list
            financing_info.append(financing_dict)

        # 企业在招职位数量
        jobs_num = response.xpath(
            "//*[contains(@class, 'company-header-content-tab')]//a[contains(text(), '职位')]/text()"
        ).re_first(r"\d+")

        company_item = NetJobCompanyDetailItem()
        company_item["company_id"] = ecomp_id  # 企业id(对应来源内部id)
        company_item["source"] = self.source  # 来源
        company_item["company_name"] = full_name_of_company or company_name or src_company_name  # 公司名称
        company_item["short_name"] = company_name or src_company_name  # 企业简称
        company_item["financing_now"] = financing  # 公司融资阶段
        company_item["scale"] = enterprise_scale  # 企业规模
        company_item["industry"] = industry  # 企业行业类型
        company_item["job_num"] = jobs_num  # 企业在招职位数量
        company_item["company_desc"] = describer  # 企业简介
        company_item["phone"] = company_tel  # 企业电话
        company_item["email"] = company_mail  # 企业邮箱
        company_item["web_url"] = company_web  # 企业官网
        company_item["work_time"] = working_time  # 企业工作时间
        company_item["work_time_extend"] = working_time_type  # 工作时间类型
        company_item["work_benefit_tags"] = company_labels_list  # 企业标签
        address_list_1 = []
        address_list_1.append(address)
        address_list_1.append(contact_address)
        address_list = [i for i in address_list_1 if i]
        if address_list:
            company_item["address_list"] = address_list  # 企业地址
        company_item["manager_list"] = top_manager_list  # 企业高管信息
        if financing_info:
            company_item["financing_info"] = financing_info  # 企业融资情况
        company_item["credit_label"] = top_str  # 企业增信标签
        if comp_product_list:
            company_item["products"] = comp_product_list  # 企业产品
        company_item["url"] = response.url  # 来源网址链接
        company_item["state"] = 1  # 1' COMMENT '数据状态（1可用，-1删除）
        yield company_item

        business_item = NetJobEnterpriseBusinessInfoItem()
        business_item["company_id"] = ecomp_id  # 企业id(对应来源内部id)
        business_item["source"] = self.source  # 来源
        business_item["taxpayer_id"] = taxpayer_id  # 统一信用代码
        business_item["company_name"] = full_name_of_company or src_company_name  # 企业名称
        business_item["start_time"] = date_of_establishment  # 成立时间
        business_item["regist_capi"] = registered_capital  # 注册资本
        business_item["oper_name"] = corporate_representative or ""  # 法人代表
        business_item["address"] = registered_address  # 注册地址
        business_item["industry"] = industry_gs  # 所属行业
        business_item["city"] = location_area  # 所在地
        business_item["record_authority"] = registration_authority  # 登记机关
        business_item["operating_dur"] = operating_period  # 营业期限
        business_item["corp_type"] = type_of_enterprise  # 企业类型
        business_item["operating_status"] = management_forms  # 经营状态
        business_item["operating_scope"] = operating_scope  # 经营范围
        business_item["state"] = 1  # 1' COMMENT '数据状态（1可用，-1删除）
        yield business_item

        url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-ecomp-homepage-search-condition"
        data = {"compId": str(ecomp_id)}
        yield scrapy.FormRequest(
            url, formdata=data, method="POST", callback=self.parse_jobs_search, cb_kwargs={"ecomp_id": ecomp_id, "company_name": company_name}
        )

    def parse_jobs_search(self, response: Response, **kwargs: Any):
        res = response.json()
        ecomp_id = kwargs.get("ecomp_id")
        company_name = kwargs.get("company_name")
        response_json_data = res.get("data", {})
        job_titles = response_json_data.get("jobTitles", [])
        url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-comp-homepage-search-job"
        self.to_db.execute(f'UPDATE net_job_detail SET state = -1 WHERE company_id = "{ecomp_id}" and source = "{self.source}"')
        self.logger.info(f"{company_name} {ecomp_id} 职位列表获取成功 重置数据库职位状态")
        if self.use_detail is True:
            for job_title_ in job_titles:
                job_title__ = job_title_.get("code")
                job_title__name = job_title_.get("name")
                data = {
                    "data": {
                        "compJobSearchCondition": {
                            "compId": ecomp_id,
                            "pageSize": 30,
                            "curPage": 0,
                            "jobTitleCode": job_title__,
                        },
                        "passThroughForm": {},
                    }
                }
                yield scrapy.FormRequest(
                    url,
                    body=json.dumps(data),
                    method="POST",
                    headers={
                        "Content-Type": "application/json",
                    },
                    callback=self.parse_all_jobs_list,
                    cb_kwargs=dict(
                        ecomp_id=ecomp_id,
                        job_title__name=job_title__name,
                        data=data["data"],
                    ),
                    dont_filter=True,
                )
        else:
            data = {
                "data": {
                    "compJobSearchCondition": {
                        "compId": ecomp_id,
                        "pageSize": 30,
                        "curPage": 0,
                    },
                    "passThroughForm": {},
                }
            }
            yield scrapy.FormRequest(
                url,
                body=json.dumps(data),
                method="POST",
                headers={
                    "Content-Type": "application/json",
                },
                callback=self.parse_all_jobs_list,
                cb_kwargs=dict(
                    ecomp_id=ecomp_id,
                    job_title__name=None,
                    data=data["data"],
                ),
                dont_filter=True,
            )

    def parse_all_jobs_list(self, response: Response, **kwargs: Any):
        res = response.json()
        comp_job_search_condition = kwargs.get("data", {}).get("compJobSearchCondition", {})
        ecomp_id = kwargs.get("ecomp_id")
        job_title__name = kwargs.get("job_title__name")
        yield from self.parse_one_jobs_list(response, **kwargs)
        total_page = res.get("data", {}).get("pagination", {}).get("totalPage", 0)
        total_page_int = int(total_page)
        if total_page_int > 100:
            max_page = 100
        else:
            max_page = total_page_int
        url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-comp-homepage-search-job"

        for page in range(1, max_page):
            data = {
                "data": {
                    "compJobSearchCondition": {
                        "compId": comp_job_search_condition.get("compId"),
                        "pageSize": 30,
                        "curPage": page,
                        "jobTitleCode": comp_job_search_condition.get("jobTitleCode"),
                    },
                    "passThroughForm": {},
                }
            }
            yield scrapy.FormRequest(
                url,
                body=json.dumps(data),
                method="POST",
                callback=self.parse_one_jobs_list,
                cb_kwargs=dict(
                    ecomp_id=ecomp_id,
                    job_title__name=job_title__name,
                ),
                dont_filter=True,
            )

    def parse_one_jobs_list(self, response: Response, **kwargs: Any):
        res = response.json()
        job_lists = res.get("data", {}).get("data", [])
        ecomp_id = kwargs.get("ecomp_id")
        job_title__name = kwargs.get("job_title__name")
        for job_ in job_lists:
            job__ = job_.get("job", {})
            job__link = job__.get("link")
            job_id = job__.get("jobId")
            # logger.debug(job__link)
            yield scrapy.Request(
                url=job__link,
                callback=self.parse_jobs_info,
                cb_kwargs=dict(
                    ecomp_id=ecomp_id,
                    job_id=job_id,
                    job_title__name=job_title__name,
                ),
            )

    def parse_jobs_info(self, response: Response, **kwargs: Any):
        # 职位名称
        ecomp_id = kwargs.get("ecomp_id")
        job_id = kwargs.get("job_id")
        job_title__name = kwargs.get("job_title__name")
        job_name = response.xpath(
            '//div[@class="job-apply-content"]/div[@class="name-box"]/span[contains(@class, "name")]/text()'
        ).get()
        # 薪资待遇
        job_salary = response.xpath(
            '//div[@class="job-apply-content"]/div[@class="name-box"]/span[@class="salary"]/text()'
        ).get()
        job_properties = response.xpath('//div[@class="job-properties"]/span[not(@*)]/text()').getall()
        job_area = job_properties[0] if job_properties else ""  # 职位地区
        job_working_time = job_properties[-2] if job_properties else ""  # 经验要求
        job_education = job_properties[-1] if job_properties else ""  # 学历
        # 职位福利标签
        job_welfare = response.xpath(
            '//div[@class="job-apply-container-left"]/div[@class="labels"]/span/text()'
        ).getall()
        # 招聘人员称呼
        job_avator_name = response.xpath(
            '//section[@class="recruiter-container"]/div[@class="content"]/div[@class="name-box"]/span[@class="name"]/text()'
        ).get()
        # 招聘者职位/描述
        avator_position = response.xpath(
            '//section[@class="recruiter-container"]/div[@class="content"]/div[@class="title-box"]/span/text()'
        ).get()
        # 职位搜索标签
        job_introduce_labels = response.xpath(
            '//section[@class="job-intro-container"]/dl[@class="paragraph"]/div[@class="tag-box"]/ul/li/text()'
        ).getall()
        # 职位介绍
        job_introduce = response.xpath(
            '//section[@class="job-intro-container"]/dl[@class="paragraph"]/dd[@data-selector="job-intro-content"]/text()'
        ).get()
        # 职位其他信息
        job_other_info = response.xpath(
            '//section[@class="job-intro-container"]/dl[@class="paragraph"]/dd[@class="ellipsis-1"]/text()'
        ).getall()
        # 职位地址
        job_address = response.xpath(
            '//div[@class="company-other"]/div/span[contains(text(), "职位地址")]/following-sibling::span/text()'
        ).get()
        # 来源公司
        # source_company = response.xpath('//div[@class="company-info-container"]/div[@class="company-card"]/div[@class="content"]/div[contains(@class, "name")]/text()').get()
        # 发布时间
        pub_date_list = re.findall(r'"pubDate": "{[\d\-T:]*}",', response.text, flags=re.S)
        if pub_date_list:
            pub_date = pub_date_list[0]
        else:
            pub_date = None

        job_item = NetJobDetailItem()
        job_item.company_id = ecomp_id  # 企业id(对应来源内部id)
        job_item.source = "猎聘"  # 来源
        job_item.job_id = job_id  # 职位id(对应来源内部id)
        job_item.name = job_name  # 岗位名称
        job_item.degree = job_education  # 学历要求
        job_item.experience = job_working_time  # 经验要求
        job_item.job_area = job_area  # 岗位地区
        job_item.job_address = job_address  # 岗位地址
        job_item.salary = job_salary  # 薪资范围
        job_item.functional_type = job_title__name  # 职能类别
        if job_introduce_labels:
            job_item.keywords = job_introduce_labels  # 岗位搜索标签
        job_item.publish_time = pub_date  # 发布时间
        job_item.job_desc = job_introduce  # 岗位描述
        if job_other_info:
            job_item.other_info = job_other_info  # 其他信息

        if job_welfare:
            job_item.evaluation_tags = job_welfare  # 岗位福利标签
        # job_item.recruitment_status =   # 招聘状态
        job_item.hr_name = job_avator_name  # 招聘官称呼
        job_item.hr_position = avator_position  # 招聘官岗位信息
        # job_item.hr_tags =   # 招聘者标签
        job_item.url = response.url  # 来源网址链接
        job_item.state = 1  # 1' COMMENT '数据状态（1可用，-1删除）
        yield job_item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl liepin_jobs".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
