#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/7/8 15:40
# @Author  : 王凯
# @File    : boss.py
# @Project : scrapy_spider
import base64
import datetime
import hashlib
import json
import random
import re
import string
import sys
import time
from pathlib import Path
from queue import Queue
from typing import Any

import scrapy
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
from scrapy.http import Response
from scrapy.settings import BaseSettings

sys.path.append(Path(__file__).parent.parent.parent.parent.parent.as_posix())
from apps.jobs.jobs.items import (
    NetJobDetailItem,
    NetJobCompanyDetailItem,
    NetJobEnterpriseBusinessInfoItem,
    NetJobTasteItem, NetJobCompanyDetailBaseItem,
)
from utils.tools import timestamp_to_date
from apps.jobs.jobs.spiders import CustomException, run_mul
from components.component.scrapy_redis_task.spiders import RedisTaskSpider
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tonghuashun_tools import PostgresqlDB

import subprocess
from functools import partial

subprocess.Popen = partial(subprocess.Popen, encoding="utf-8")


class BossParser:
    key = "G$$QawckGfaLB97r"

    def gen_iv(self):
        return "".join(random.sample(string.ascii_letters + string.digits, 16))

    def kanzun_aes_encrypt(self, data, iv):
        # python aes 加密 模式 CBC padding pkcs7
        data = pad(data.encode("utf-8"), AES.block_size, style="pkcs7")
        cipher = AES.new(self.key.encode("utf-8"), AES.MODE_CBC, iv.encode("utf-8"))
        encrypted_data = cipher.encrypt(data)

        encrypted_data = base64.b64encode(encrypted_data)
        return encrypted_data.decode("utf-8").replace("/", "_").replace("+", "-").replace("=", "~")

    def kanzun_aes_decrypt(self, encrypted_data, iv):
        encrypted_data = base64.b64decode(encrypted_data)
        cipher = AES.new(self.key.encode("utf-8"), AES.MODE_CBC, iv.encode("utf-8"))
        decrypted_data = cipher.decrypt(encrypted_data)
        decrypted_data = unpad(decrypted_data, AES.block_size, style="pkcs7")
        return decrypted_data.decode("utf-8")


class RosterBossSpider(RedisTaskSpider, BossParser):
    save_mysql_log = True
    uk_id = None
    to_db = None
    wfq_source_db = None
    custom_settings = {
        "REDIS_START_URLS_KEY": "scrapy:task:tyc:%(name)s:start_urls",
        "LOG_LEVEL": "INFO",
    }
    source = "BOSS直聘"

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.to_db = MysqlDB()
        self.wfq_source_db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )

    def add_task(self):
        record_task_table = self.redis_key + ":id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT company_name as query_key, used_name, taxpayer_id, id from "
                f"roster_jobs where id > {last_task_id} limit {batch}"
            )
        else:
            sql = f"SELECT company_name as query_key, used_name, taxpayer_id, id from roster_jobs limit {batch}"
        datas = self.wfq_source_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    if len(new_task.get("query_key")) > 3:
                        self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})
            last_task_id = max([i['id'] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info(f"数据加载完成,没有数据,记录最后一条任务id {self.redis_key} {last_task_id}")
            self.server.set(record_task_table, last_task_id)
            if self.count_size(self.redis_key):
                return
            time.sleep(60 * 5)
            self.add_task()

    def make_request_from_data(self, formatted_data: str or dict):
        company_name = formatted_data["query_key"]
        url = "https://www.kanzhun.com/"
        yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name}, dont_filter=True)

    def start_callback(self, task_obj, *args, **kwargs):
        if self.save_mysql_log:
            try:
                query_key = task_obj.get("query_key")
                taxpayer_id = task_obj.get("taxpayer_id")
                self.uk_id = hashlib.md5(f"{query_key}{time.time()}{self.source}".encode("utf-8")).hexdigest()
                self.to_db.add_batch_smart(
                    "net_jobs_task_log",
                    [
                        {
                            "query_key": query_key,
                            "uk_id": self.uk_id,
                            "source": self.source,
                            "taxpayer_id": taxpayer_id,
                            "crawler_start": datetime.datetime.now(),
                        }
                    ],
                    update_columns=["crawler_start"],
                )
            except Exception as e:
                self.logger.error(f"spider start callback {e}")

    def end_callback(self, task_obj, *args, **kwargs):
        if task_obj:
            if self.save_mysql_log:
                try:
                    query_key = task_obj.get("query_key")
                    counts = self.to_db.find(
                        f"""select count(*) as count
                                from net_job_company_detail
                                         left join net_job_detail on net_job_company_detail.company_id = net_job_detail.company_id
                                where company_name = '{query_key}'
                                  and net_job_company_detail.source = '{self.source}'
                                  and net_job_detail.state = 1""",
                        to_json=True,
                    )
                    count = 0
                    if counts:
                        count = counts[0]["count"]

                    self.to_db.add_batch_smart(
                        "net_jobs_task_log",
                        [
                            {
                                "query_key": query_key,
                                "uk_id": self.uk_id,
                                "crawler_end": datetime.datetime.now(),
                                "count": count,
                            }
                        ],
                        update_columns=["crawler_end", "count"],
                    )

                except Exception as e:
                    self.logger.error(f"spider end callback {e}")


class THSQueueBossSpider(RosterBossSpider):
    pg_db = None

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.pg_db = PostgresqlDB()

    def add_task(self):
        record_task_table = self.redis_key + ":ths:id"
        last_task_id = self.server.get(record_task_table)
        batch = 1000
        last_task_id = int(last_task_id) if last_task_id else 0
        if last_task_id:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from"
                f" enterprise_basic_info where seq > {last_task_id} "
                f" and isvalid = 1 limit {batch}"
            )
        else:
            sql = (
                f"SELECT seq, corp_name as query_key, used_name, unified_social_credit_code as taxpayer_id from "
                f"enterprise_basic_info where seq <  {batch}"
                f" and isvalid = 1 limit {batch}"
            )
        datas = self.pg_db.find(sql, to_json=True)
        if datas:
            self.logger.info(
                "数据加载完成,{}, id从{}到{} 数量 {}".format(self.redis_key, last_task_id, last_task_id + batch, len(datas))
            )
            for data in datas:
                query_key = data.get("query_key")
                taxpayer_id = data.get("taxpayer_id")
                new_task_list = [{"query_key": query_key, "taxpayer_id": taxpayer_id}]

                used_name = data.get("used_name")
                if used_name:
                    for name in used_name.split(","):
                        new_task_list.append({"query_key": name, "taxpayer_id": taxpayer_id})

                for new_task in new_task_list:
                    self.server.zadd(self.redis_key, {json.dumps(new_task, ensure_ascii=False): 0})

            last_task_id = max([i['seq'] for i in datas])
            self.server.set(record_task_table, str(last_task_id))
        else:
            self.logger.info("数据加载完成,没有数据,准备下一轮")
            self.server.set(record_task_table, "0")
            if self.count_size(self.redis_key):
                return
            self.add_task()


class BossSpider(RosterBossSpider):
    name = "boss_jobs"
    source = "BOSS直聘"
    local_task_queue = Queue(maxsize=1)
    auto_next = True

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        new_settings = {
            **(cls.custom_settings or {}),
            **{
                "HTTPERROR_ALLOWED_CODES": [400, 404, 500, 200, 202, 502],
                "RETRY_TIMES": 40,
                "RETRY_HTTP_CODES": [],
            },
        }
        settings.setdict(new_settings or {}, priority="spider")

    # def start_requests(self):
    #     if self.local_task_queue.full():
    #         obj = self.local_task_queue.get_nowait()
    #         company_name = obj.get("query_key")
    #     else:
    #         company_name = "广州强久装饰工程有限公司"
    #         company_name = "青岛精诚致远信息技术有限公司"
    #
    #     url = "https://www.kanzhun.com/"
    #     yield scrapy.Request(url, callback=self.parse, cb_kwargs={"company_name": company_name})

    def parse(self, response: Response, **kwargs: Any) -> Any:
        company_name = kwargs.get("company_name")
        set_cookie_list = response.headers.getlist("Set-Cookie")
        set_cookie_dict = {}
        if set_cookie_list:
            for set_cookie in set_cookie_list:
                set_cookie_str = set_cookie.decode().split("; ")[0]
                if "=" in set_cookie_str:
                    set_cookie_dict[set_cookie_str.split("=")[0]] = set_cookie_str.split("=")[1]

        url = f"https://www.kanzhun.com/api/search/autoComplete_v3.json?query={company_name}"
        yield scrapy.Request(
            url,
            callback=self.parse_search,
            cookies=set_cookie_dict,
            headers=response.headers.to_unicode_dict(),
            cb_kwargs={"company_name": company_name},
        )

    def parse_search(self, response: Response, **kwargs: Any) -> Any:
        res = response.json()
        resdata = res.get("resdata") or []
        if resdata:
            enc_search_id = resdata[0].get("encSearchId")
            url = f"https://www.kanzhun.com/firm/info/{enc_search_id}.html"
            yield scrapy.Request(
                url, callback=self.parse_search_result, cb_kwargs={"company_name": kwargs.get("company_name")}
            )
        else:
            self.logger.error(f"{kwargs.get('company_name')} 没有搜索到结果")

    def parse_search_result(self, response: Response, **kwargs: Any) -> Any:
        context = response.xpath(".").re_first(r"window.context\s*=\s*{state:\s*({.*})}<")
        if context:
            data = json.loads(context.replace(' " ', ""))  # 奇奇怪怪的错误
            company_head_info = data["company"].get("companyHeadInfo") or data['company'].get("companyHead")
            boss_company_url = company_head_info.get("bossCompanyUrl") or (company_head_info.get("zpComBrandVO") or {}).get("bossBrandUrl")
            if boss_company_url:
                company_id = [i.strip(".html") for i in boss_company_url.split("/") if ".html" in i]
                if company_id:
                    company_id = company_id[0]
                    company_name = company_head_info["companyName"]
                    short_name = company_head_info.get("companySimpleName")
                    company_des = company_head_info.get("companyDes")
                    address_list = company_head_info.get("addressList")
                    work_time = company_head_info.get("workTime")
                    welfare_label_list = company_head_info.get("welfareLabelList")
                    kiv = self.gen_iv()
                    b = self.kanzun_aes_encrypt(
                        json.dumps({"encCompanyId": company_head_info["encCompanyId"]}, separators=(",", ":")), kiv
                    )
                    url = f"https://www.kanzhun.com/api_to/cbi/base_info.json?b={b}&kiv={kiv}"
                    yield scrapy.Request(
                        url,
                        callback=self.parse_kanzun_company_detail,
                        cb_kwargs={
                            "company_id": company_id,
                            "kiv": kiv,
                            "company_name": company_name,
                            "short_name": short_name,
                            "company_des": company_des,
                            "address_list": address_list,
                            "work_time": work_time,
                            "welfare_label_list": welfare_label_list,
                        },
                    )
                    url = f"https://m.zhipin.com/wapi/zpgeek/brand/job/querylist.json?brandId={company_id}&page=1&pageSize=15&positionLv1=&city=&experience=&salary="
                    yield scrapy.Request(url, callback=self.gen_job_list_next, cb_kwargs={"company_id": company_id, "company_name": company_name})
                    # yield from self.gen_company_taste_list(company_id)

    def parse_kanzun_company_detail(self, response: Response, **kwargs: Any) -> Any:
        company_id = kwargs.get("company_id")
        company_name = kwargs.get("company_name")
        short_name = kwargs.get("short_name")
        address_list = kwargs.get("address_list")
        company_des = kwargs.get("company_des")
        work_time_list = kwargs.get("work_time")
        welfare_label_list = kwargs.get("welfare_label_list") or []
        work_time_extend = ""
        work_time = ""
        if work_time_list:
            work_time = work_time_list[0]["text"]
            if len(work_time_list) > 1:
                work_time_extend = work_time_list[1]["text"]

        res = json.loads(self.kanzun_aes_decrypt(response.text, kwargs.get("kiv")))

        business_item = NetJobEnterpriseBusinessInfoItem()
        business_item["company_id"] = company_id or ""
        business_item["company_name"] = res["resdata"]["registerVO"]["companyName"] or company_name or ""
        business_item["source"] = self.source
        business_item["taxpayer_id"] = res["resdata"]["registerVO"]["socialCode"] or ""
        business_item["start_time"] = (res["resdata"]["registerVO"]["deadLine"] or "")[:10] or "0000-00-00"
        business_item["regist_capi"] = res["resdata"]["registerVO"]["registerMoney"] or ""
        business_item["oper_name"] = res["resdata"]["registerVO"]["legalPersonName"] or ""
        business_item["address"] = res["resdata"]["registerVO"]["registerAddress"] or ""
        business_item["province"] = res["resdata"]["registerVO"]["area"] or ""
        business_item["record_authority"] = res["resdata"]["registerVO"]["registerOffice"] or ""
        business_item["industry"] = res["resdata"]["registerVO"]["industry"] or ""
        business_item["operating_dur"] = res["resdata"]["registerVO"]["deadLine"]
        business_item["corp_type"] = res["resdata"]["registerVO"]["companyType"] or ""
        business_item["operating_status"] = res["resdata"]["registerVO"]["manageStatus"] or ""
        business_item["operating_scope"] = res["resdata"]["registerVO"]["manageScope"] or ""
        yield business_item

        company_item = NetJobCompanyDetailItem()
        company_item["source"] = self.source
        company_item["company_id"] = company_id or ""
        company_item["short_name"] = short_name or ""
        company_item["industry"] = business_item.industry
        company_item["company_desc"] = company_des or ""
        company_item["address_list"] = address_list or [business_item.address]
        company_item["company_name"] = business_item.company_name or ""
        company_item["work_time"] = work_time or ""
        company_item["work_time_extend"] = work_time_extend or ""
        company_item["work_benefit_tags"] = [i["text"] for i in welfare_label_list]
        company_item["url"] = f"https://www.zhipin.com/gongsi/{company_id}.html?ka=company-intro"
        yield company_item

    def gen_job_list_next(self, response: Response, **kwargs: Any) -> Any:
        company_id = kwargs.get("company_id")
        company_name = kwargs.get("company_name")
        data = response.json()["zpData"]
        self.to_db.execute(f'UPDATE net_job_detail SET state = -1 WHERE company_id = "{company_id}" and source = "{self.source}"')
        self.logger.info(f"{company_name} {company_id} 职位列表获取成功 重置数据库职位状态")
        all_cate = [i for i in data["positionList"] if str(i.get("name")) != "全部"]
        for cate in all_cate:
            functional_type = cate["name"]
            cate_code = cate["code"]
            cate_url = f"https://www.zhipin.com/wapi/zpgeek/brand/job/querylist.json?brandId={company_id}&page=1&pageSize=15&positionLv1={cate_code}&city=&degree=&experience=&salary=&query="
            yield scrapy.Request(
                cate_url,
                callback=self.parse_job_list,
                cb_kwargs={"company_id": company_id, "functional_type": functional_type},
            )

    def parse_job_list(self, response: Response, **kwargs: Any) -> Any:
        company_id = kwargs.get("company_id")
        functional_type = kwargs.get("functional_type")
        res = response.json()
        if res.get("code") == 0:
            job_list = (res.get("zpData") or {}).get("jobList", [])
            current_page = re.findall(r"&page=(\d+)&pageSize", response.request.url)
            if current_page:
                current_page = current_page[0]
                next_href = response.request.url.replace(
                    f"&page={current_page}&pageSize=15", f"&page={int(current_page) + 1}&pageSize=15"
                )
                if job_list and len(job_list) == 15:
                    yield scrapy.Request(
                        next_href,
                        callback=self.parse_job_list,
                        cb_kwargs={"company_id": company_id, "functional_type": functional_type},
                    )

                for job in job_list:
                    lid = job["lid"]
                    securityId = job["securityId"]
                    item = NetJobDetailItem()
                    item.source = self.source
                    item.company_id = company_id
                    item.functional_type = functional_type
                    item.job_id = job["encryptJobId"]
                    item.name = job["jobName"]
                    item.job_area = job["cityName"]
                    try:
                        publish_time = timestamp_to_date(job["lastModifyTime"] / 1000, time_format="%Y-%m-%d")
                    except:
                        publish_time = None
                    item.publish_time = publish_time
                    item.salary = job["salaryDesc"]
                    item.experience = job["jobExperience"]
                    item.degree = job["jobDegree"]
                    item.hr_name = job["bossName"]
                    item.hr_position = job["bossTitle"]
                    item.url = f"https://www.zhipin.com/job_detail/{item.job_id}.html?lid={lid}&securityId={securityId}&sessionId="
                    detail_url = f"https://www.zhipin.com/wapi/zpgeek/job/detail.json?securityId={securityId}&lid={lid}"
                    yield scrapy.Request(
                        detail_url,
                        callback=self.parse_job_detail,
                        cb_kwargs={
                            "job_item": item,
                            "lid": lid,
                            "securityId": securityId,
                        },
                    )
        else:
            raise CustomException(res.get("message"))

    def parse_job_detail(self, response: Response, **kwargs: Any) -> Any:
        data = response.json()["zpData"]
        if data:
            jobInfo = data.get("jobInfo") or {}
            brand_com_info = data.get("brandComInfo")
            job_item = kwargs.get("job_item")
            job_item.evaluation_tags = brand_com_info.get("labels")
            if jobInfo:
                job_item.keywords = jobInfo["showSkills"]
                job_item.job_desc = jobInfo["postDescription"]
                job_item.salary_detail = jobInfo["salaryDesc"]
                job_item.functional_type = jobInfo["positionName"]
                job_item.recruitment_status = jobInfo["jobStatusDesc"]
            yield job_item

            if brand_com_info:
                company_item = NetJobCompanyDetailBaseItem()
                company_item["source"] = self.source
                company_item["company_id"] = brand_com_info['encryptBrandId'] or ""
                company_item["short_name"] = brand_com_info['customerBrandName'] or ""
                company_item["industry"] = brand_com_info['industryName']
                company_item["company_desc"] = brand_com_info['introduce'] or ""
                company_item["financing_now"] = brand_com_info['customerBrandStageName'] or ""
                company_item["scale"] = brand_com_info['scaleName'] or ""
                yield company_item

    # #################################### 舆情信息 #####################################
    def gen_company_taste_list(self, company_id):
        company_url = (
            f"https://www.zhipin.com/wapi/zpboss/h5/pc/brandInfo/taste/getTasteInfo?"
            f"encryptBrandId={company_id}&page=1&pageSize=10"
        )
        yield scrapy.Request(
            company_url, callback=self.gen_company_taste_list_next, cb_kwargs=dict(company_id=company_id)
        )

    def gen_company_taste_list_next(self, response: Response, **kwargs: Any) -> Any:
        yield from self.parse_company_taste_list(response, **kwargs)
        company_id = kwargs.get("company_id")
        data = response.json()["zpData"]["tasteInfo"]
        totalCount = data["totalCount"]
        for i in range(2, (totalCount // 10) + 1 + 1):
            company_url = (
                f"https://www.zhipin.com/wapi/zpboss/h5/pc/brandInfo/taste/getTasteInfo?"
                f"encryptBrandId={company_id}&page={i}&pageSize=10"
            )
            yield scrapy.Request(
                company_url, callback=self.parse_company_taste_list, cb_kwargs=dict(company_id=company_id)
            )

    def parse_company_taste_list(self, response: Response, **kwargs: Any) -> Any:
        data = response.json()["zpData"]["tasteInfo"]
        company_id = kwargs.get("company_id")
        taste_list = data["contentList"]
        for taste in taste_list:
            yield from self.parse_company_state_detail(taste, company_id=company_id)

    def parse_company_state_detail(self, info, company_id):
        item = NetJobTasteItem()
        item.source = self.source
        item.company_id = company_id
        item.taste_id = info.get("brandWorkTasteId")
        item.name = info.get("userName")
        item.position = info.get("userTitle")
        item.work_year = info.get("workYears")
        item.evaluation_tags = [i["subTitle"] for i in info.get("content") or [] if i["subTitle"]]
        item.evaluation_content = info.get("content")
        item.like_count = info.get("likeCount")
        item.see_count = info.get("seeCount")
        # logger.info(item)
        yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl boss_jobs".split())


if __name__ == "__main__":
    from argparse import ArgumentParser

    parser = ArgumentParser(description="命令运行爬虫，此命令慎重修改")

    parser.add_argument("--worker", type=int, default=1, help="进程数量")

    run_args = parser.parse_args()
    if run_args.worker:
        run_mul(max_workers=run_args.worker, target=run)
