#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/7/17 17:36
# @Author  : 王凯
# @File    : zhilian_jobs.py
# @Project : scrapy_spider
import datetime
import hashlib
import json
import multiprocessing
import re
import sys
import time
from pathlib import Path
from queue import Queue
from typing import Any

import scrapy
from scrapy.http import Response
from scrapy.settings import BaseSettings

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.jobs.jobs.items import (
    NetJobDetailItem,
    NetJobCompanyDetailItem,
    NetJobEnterpriseBusinessInfoItem, NetJobTasteItem,
)
from apps.jobs.jobs.spiders import run_mul
from utils.tools import urlencode
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB

import subprocess
from functools import partial

subprocess.Popen = partial(subprocess.Popen, encoding="utf-8")


class RosterZhiLianSpider(RedisTaskSpider):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:tyc:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }
    source = "智联招聘"

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ":id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id, id from "
                f"roster_jobs where id > {last_task_id} limit {batch}"
            )
        else:
            sql = f"SELECT company_name as query_key, used_name, taxpayer_id, id from roster_jobs limit {batch}"
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})
            last_task_id = max([i["id"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            time.sleep(60 * 5)
            self.add_task()
            # self.logger.info(f"数据加载完成,没有数据,准备下一轮")
            # self.server.set(record_task_table, "0")
            # self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        company_name = formatted_data["query_key"]
        url = f"https://zq.zhaopin.com/gongsidianping/0-0-{company_name}/"
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name})

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}{self.source}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_jobs_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "source": self.source,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    counts = self.to_db.find(
                        f"""select count(*) as count
                                from net_job_company_detail
                                         left join net_job_detail on net_job_company_detail.company_id = net_job_detail.company_id
                                where company_name = '{query_key}'
                                  and net_job_company_detail.source = '{self.source}'
                                  and net_job_detail.state = 1""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_jobs_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class THSQueueZhiLianSpider(RosterZhiLianSpider):
    pg_db = None

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()

    def add_task(self):
        record_task_table = self.redis_key + ":ths:id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where seq > {last_task_id} "
                f" and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where seq <  {batch}"
                f" and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            last_task_id = max([i["seq"] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            if self.count_size(self.redis_key):
                return
            self.server.set(record_task_table, "0")
            self.add_task()


class ZhiLianSpider(RosterZhiLianSpider):
    name = "zhilian_jobs"
    source = "智联招聘"
    local_task_queue = Queue(maxsize=1)
    auto_next = True
    url = 'https://zq.zhaopin.com/gongsidianping'

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [302, 400, 404, 500, 200, 202, 502],
                "RETRY_TIMES": 40,
                "RETRY_HTTP_CODES": [],
                "DOWNLOAD_FAIL_ON_DATALOSS": False,
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    def start_requests(self):
        if self.local_task_queue.full():
            obj = self.local_task_queue.get_nowait()
            company_name = obj.get("query_key")
        else:
            company_name = "杭州微风企科技有限公司"
        url = f"https://zq.zhaopin.com/gongsidianping/0-0-{company_name}/"
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name})

    def parse(self, response: Response, **kwargs: Any) -> Any:
        src_company_name = kwargs.get("company_name")
        companies_findall = re.findall(r'"companies":(\[\{.*?\}\])', response.text, flags=re.S)
        if companies_findall:
            companies_str = companies_findall[0]
            companies_list = json.loads(companies_str)
            for companies in companies_list:
                org_number = companies.get("orgNumber")
                company_name = companies.get("name")
                if company_name == src_company_name:
                    company_url = f"https://www.zhaopin.com/companydetail/{org_number}"
                    self.logger.info(f"query_key {company_name} 查询到企业 {company_name} {company_url}")
                    yield scrapy.Request(url=company_url, callback=self.parse_company_info, cb_kwargs={"company_name": company_name})
                    break
            else:
                self.logger.error(f"query_key {src_company_name} 未查询匹配到企业")
        else:
            self.logger.error(f"query_key {src_company_name} 未查询匹配到企业")

    def parse_company_info(self, response: Response, **kwargs: Any) -> Any:
        src_company_name = kwargs.get("company_name")
        initial_state_findall = response.xpath('.').re_first(r"__INITIAL_STATE__=({.*?})</script>")
        initial_state = json.loads(initial_state_findall) if initial_state_findall else {}
        company_detail = initial_state.get("companyDetail", {})
        company_base = company_detail.get("companyBase", {})
        company_name = company_base.get("companyName")  # 公司名称
        city_name = company_base.get("cityName")
        # 公司简称
        financing_stage_name = company_base.get("financingStageName")  # 公司融资情况
        company_property = company_base.get("property")  # 公司性质
        #  成立时间
        company_size = company_base.get("companySize", "")  # 公司规模
        industry_level = company_base.get("industryLevel", "")  # 公司行业
        company_website_url = company_base.get("companyWebsiteUrl")  # 公司官网
        # 在招职位数量
        online_position_module = company_detail.get("onlinePositionModule", {})
        online_position_numbers = online_position_module.get("onlinePositionNumbers")
        # interview_invite_count = company_base.get('interviewInviteCount')  # 共邀面试数量
        company_description = company_base.get("companyDescription")  # 公司信息
        # 公司优势标签
        company_advantage_label = list()
        bright_spot_label = initial_state.get("companyDetail", {}).get("tagModule", {}).get("brightSpotLabel", [])
        for advantage_label in bright_spot_label:
            company_advantage_label.append(advantage_label.get("value"))
        # 公司奖项
        company_honors_list = list()
        company_honors_dict = initial_state.get("companyDetail", {}).get("companyHonors", {})
        for k, v in company_honors_dict.items():
            if isinstance(v, list):
                company_honors_list.extend(v)

        # # 相关公司
        # affiliated_companies = company_detail.get('companyAffiliatedCompanies', {}).get('affiliatedCompanies', [])

        # 工商信息
        business_information_data = (
            initial_state.get("extraInfo", {}).get("businessInformation", {}).get("businessInformationData", {})
        )
        full_name_of_company = business_information_data.get("registeredName")  # 企业全称
        date_of_establishment = business_information_data.get("createDate")  # 成立时间
        registered_capital = business_information_data.get("registeredCapital")  # 注册资本
        corporate_representative = business_information_data.get("legalPerson")  # 法人代表
        registered_address = business_information_data.get("location")  # 注册地址
        taxpayer_id = business_information_data.get("epCertNo")  # 统一信用代码
        business_scope = business_information_data.get("businessScope")
        corp_type = business_information_data.get("epType")

        # 雇员点评
        best_employer = list()
        best_employer_label_list = company_base.get("bestEmployerLabel") or []
        for employe in best_employer_label_list:
            best_employer.append(employe.get("value"))
        # addresses = company_base.get('address')

        company_code = initial_state.get("companyCode")
        company_item = NetJobCompanyDetailItem()
        company_item["company_id"] = company_code
        company_item["source"] = self.source
        company_item["company_name"] = company_name
        company_item["financing_now"] = financing_stage_name
        company_item["scale"] = company_size
        company_item["industry"] = industry_level
        company_item["nature"] = company_property
        company_item["job_num"] = online_position_numbers
        company_item["company_desc"] = company_description
        company_item["web_url"] = company_website_url
        company_item["work_benefit_tags"] = company_advantage_label
        company_item["credit_label"] = company_honors_list
        company_item["url"] = response.url
        company_item["address_list"] = [company_base.get('address')] if company_base.get('address') else None
        company_item["state"] = 1  # 1' COMMENT '数据状态（1可用，-1删除）
        yield company_item

        company_business_item = NetJobEnterpriseBusinessInfoItem()
        company_business_item["company_id"] = company_code
        company_business_item["source"] = "智联招聘"
        company_business_item["taxpayer_id"] = taxpayer_id
        company_business_item["company_name"] = full_name_of_company
        company_business_item["industry"] = industry_level
        company_business_item["start_time"] = date_of_establishment
        company_business_item["regist_capi"] = registered_capital
        company_business_item["oper_name"] = corporate_representative
        company_business_item["address"] = registered_address
        company_business_item["city"] = city_name
        company_business_item["operating_scope"] = business_scope
        company_business_item["corp_type"] = corp_type
        yield company_business_item

        taste_url = "https://zq.zhaopin.com/discover-site/moment/0_1_0/getMomentListByCompanyOrgNumber"
        params = {
            "orderStr": "0",
            "orgNumber": company_code,
            "at": "22ce3df60acf49ef89fe2508ecd1f641",
            "rt": "9d5ce4e000404685ac65d8e5114be129",
            "x-zp-client-id": "556020b7-e25f-41ce-98bd-7cb525e59ab9",
        }
        yield scrapy.Request(url=taste_url + "?" + urlencode(params), callback=self.parse_task_one, cb_kwargs={"params": params})

        self.to_db.execute(f'UPDATE net_job_detail SET state = -1 WHERE company_id = "{company_code}" and source = "{self.source}"')
        self.logger.info(f"{company_name} {company_code} 职位列表获取成功 重置数据库职位状态")

        url = "https://fe-api.zhaopin.com/c/i/company/search-position"
        company_conditions = initial_state.get("companyConditions", {})
        city_list = company_conditions.get("city", [])
        job_type_level_lsit = company_conditions.get("jobTypeLevel", [])
        city_list.append({"code": "", "isDefault": "", "name": ""})
        job_type_level_lsit.append({"code": "", "isDefault": "", "name": ""})
        for city_ in city_list:
            city_code = city_.get("code")
            for job_ in job_type_level_lsit:
                job_code = job_.get("code")
                params = {
                    "S_SOU_COMPANY_ID": company_code,
                    "pageSize": "90",
                    "pageIndex": "1",
                    "S_SOU_SALARY_MIN": "",
                    "S_SOU_SALARY_MAX": "",
                    "S_SOU_WORK_CITY": city_code,
                    "S_SOU_JD_JOB_LEVEL": job_code,
                }
                yield scrapy.Request(url=url + "?" + urlencode(params), callback=self.parse_all_jobs_list, cb_kwargs={
                    "company_name": src_company_name,
                    "params": params
                })

    def parse_task_one(self, response, **kwargs: Any) -> Any:
        params = kwargs.get("params")
        yield from self.parse_task_info(response, **kwargs)
        res_data = response.json().get("data", {})
        total_num = res_data.get("totalNum", 0)
        total_num_int = int(total_num)
        max_page = total_num_int // 10 + 1
        taste_url = "https://zq.zhaopin.com/discover-site/moment/0_1_0/getMomentListByCompanyOrgNumber"
        org_number = params.get("orgNumber")
        for page in range(1, max_page):
            params_1 = {
                "orderStr": f"{page * 10}",
                "orgNumber": org_number,
                "at": "22ce3df60acf49ef89fe2508ecd1f641",
                "rt": "9d5ce4e000404685ac65d8e5114be129",
                "x-zp-client-id": "556020b7-e25f-41ce-98bd-7cb525e59ab9",
            }
            yield scrapy.Request(url=taste_url + "?" + urlencode(params_1), callback=self.parse_task_info)

    def parse_task_info(self, response, **kwargs: Any) -> Any:
        moments_list = response.json().get("data", {}).get("moments", [])
        for i in moments_list:
            company_star_score = i.get("companyStarScore", {})
            evaluation_content = company_star_score.get("positiveContent")
            user_info = i.get("user", {}) or {}
            name = user_info.get("nick")
            taste_id = i.get("id")
            company_id = i.get("company", {}).get("orgNumber")
            evaluation_tags = company_star_score.get("labelNameList")
            like_count = i.get("likeNum")
            see_count = i.get("readNumShow")
            job_taste_item = NetJobTasteItem()
            job_taste_item["company_id"] = company_id  # 企业id(对应来源内部id)
            job_taste_item["source"] = self.source  # 来源
            job_taste_item["taste_id"] = taste_id  # 评论id
            job_taste_item["name"] = name  # 企业职员称呼
            job_taste_item["evaluation_tags"] = evaluation_tags  # 评价标签
            job_taste_item["evaluation_content"] = {"desc": evaluation_content}  # 评价文案
            job_taste_item["like_count"] = like_count  # 评价的点赞数
            job_taste_item["see_count"] = see_count  # 评价的阅读数
            job_taste_item["state"] = 1  # 1' COMMENT '数据状态（1可用，-1删除）
            yield job_taste_item

    def parse_all_jobs_list(self, response, **kwargs: Any) -> Any:
        param = kwargs.get("params")
        yield from self.parse_one_jobs_list(response, **kwargs)
        # count = response.json().get("data", {}).get("count", 0) or 0
        # count_int = int(count)
        # remainder = count_int % 90
        # if remainder == 0:
        #     max_page = count_int // 90 + 1
        # else:
        #     max_page = count_int // 90 + 2
        # url = "https://fe-api.zhaopin.com/c/i/company/search-position"
        # city_id = param.get("S_SOU_COMPANY_ID")
        # s_sou_salary_min = param.get("S_SOU_SALARY_MIN")
        # s_sou_salary_max = param.get("S_SOU_SALARY_MAX")
        # s_sou_work_city = param.get("S_SOU_WORK_CITY")
        # s_sou_jd_job_level = param.get("S_SOU_JD_JOB_LEVEL")
        # for page in range(1, max_page):
        #     params2 = {
        #         "S_SOU_COMPANY_ID": city_id,
        #         "pageSize": "90",
        #         "pageIndex": page,
        #         "S_SOU_SALARY_MIN": s_sou_salary_min,
        #         "S_SOU_SALARY_MAX": s_sou_salary_max,
        #         "S_SOU_WORK_CITY": s_sou_work_city,
        #         "S_SOU_JD_JOB_LEVEL": s_sou_jd_job_level,
        #     }
        #     yield scrapy.Request(url + "?" + urlencode(params2), callback=self.parse_one_jobs_list, cb_kwargs=kwargs)

    def parse_one_jobs_list(self, response, **kwargs: Any) -> Any:
        job_lists = response.json().get("data", {}).get("list", [])
        for job_ in job_lists:
            job__link = job_.get("positionURL")
            job__link = job__link.replace("http://", "https://")
            kwargs["job_id"] = job_.get("jobId")
            yield scrapy.Request(url=job__link, callback=self.parse_jobs_info, cb_kwargs=kwargs)

    def parse_jobs_info(self, response, **kwargs: Any) -> Any:
        job_id = kwargs.get("job_id")
        initial_state_findall = response.xpath('.').re_first(r"__INITIAL_STATE__=({.*?})</script>")
        initial_state = json.loads(initial_state_findall) if initial_state_findall else {}
        detailed_position = initial_state.get("jobInfo", {}).get("jobDetail", {}).get("detailedPosition", {})
        # 岗位名称
        job_name = detailed_position.get("name")

        # 更新时间
        job_position_publish_time = detailed_position.get("positionPublishTime")

        # 薪资待遇
        job_salary = detailed_position.get("salary60")

        # 职位地区
        job_work_city = detailed_position.get("workCity")

        # 经验要求
        job_working_exp = detailed_position.get("workingExp")

        # 学历要求
        job_education = detailed_position.get("education")

        # 职位类型
        jobTypeLevelName = detailed_position.get("jobTypeLevelName")

        # 招聘状态
        recruitment_status = ""

        # 岗位招聘人数
        job_recruit_number = detailed_position.get("recruitNumber")

        # 职位描述
        job_desc = detailed_position.get("jobDescPC")

        # 职位标签
        job_skill_label = detailed_position.get("skillLabel")

        # 职位福利标签
        job_welfare_label = detailed_position.get("welfareLabel")

        # 工作地点
        job_work_address = detailed_position.get("workAddress")

        # 招聘者称呼
        avator = detailed_position.get("staff", {}).get("staffName")

        # 招聘者职位 / 描述
        avator_position = detailed_position.get("staff", {}).get("hrJob")

        job_item = NetJobDetailItem()
        job_item["company_id"] = detailed_position.get("companyNumber")  # 企业id(对应来源内部id)
        job_item["source"] = self.source  # 来源
        job_item["job_id"] = job_id  # 职位id(对应来源内部id)
        job_item["name"] = job_name  # 岗位名称
        job_item["degree"] = job_education  # 学历要求
        job_item["experience"] = job_working_exp  # 经验要求
        job_item["job_area"] = job_work_city  # 岗位地区
        job_item["job_address"] = job_work_address  # 岗位地址
        job_item["salary"] = job_salary  # 薪资范围
        job_item["functional_type"] = jobTypeLevelName  # 职能类别
        job_item["publish_time"] = job_position_publish_time  # 发布时间
        job_item["job_desc"] = job_desc  # 岗位描述
        job_item["keywords"] = job_skill_label  # 岗位搜索标签
        job_item["evaluation_tags"] = job_welfare_label  # 岗位福利标签
        job_item["hr_name"] = avator  # 招聘者称呼
        job_item["hr_position"] = avator_position  # 招聘者岗位/描述
        job_item["url"] = response.url  # 详情url
        yield job_item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl zhilian_jobs".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()

    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
