"""
    @Author  ：思念 
    @File    ：11.使用进程方式获取腾讯招聘.py
    @Date    ：2025/1/5 20:04 
"""

import time
import pymongo
import requests
import jsonpath
from multiprocessing import Process, JoinableQueue as Queue

url = 'https://careers.tencent.com/tencentcareer/api/post/Query'

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}


def get_work_info_json(page_num, queue):
    params = {
        'timestamp': 1696774900608,
        'countryId': '',
        'cityId': '',
        'bgIds': '',
        'productId': '',
        'categoryId': '',
        'parentCategoryId': '',
        'attrId': '',
        'keyword': 'python',
        'pageIndex': page_num,
        'pageSize': 10,
        'language': 'zh-cn',
        'area': 'cn'
    }

    response = requests.get(url, headers=headers, params=params).json()
    # 在某些页面中不存在当前的json数据会跑出异常
    try:
        for info in response['Data']['Posts']:
            work_info_dict = dict()
            work_info_dict['recruit_post_name'] = jsonpath.jsonpath(info, '$..RecruitPostName')[0]
            work_info_dict['country_name'] = jsonpath.jsonpath(info, '$..CountryName')[0]
            work_info_dict['location_name'] = jsonpath.jsonpath(info, '$..LocationName')[0]
            work_info_dict['category_name'] = jsonpath.jsonpath(info, '$..CategoryName')[0]
            work_info_dict['responsibility'] = jsonpath.jsonpath(info, '$..Responsibility')[0]
            work_info_dict['last_update_time'] = jsonpath.jsonpath(info, '$..LastUpdateTime')[0]

            queue.put(work_info_dict)
    except TypeError:
        print('数据不存在:', params.get('pageIndex'))


def save_work_info(queue):
    mongo_client = pymongo.MongoClient()
    collection = mongo_client['py_spider']['tx_work']
    while True:
        dict_data = queue.get()
        print(dict_data)
        collection.insert_one(dict_data)
        # 计数器减1, 为0解堵塞
        queue.task_done()


if __name__ == '__main__':
    dict_data_queue = Queue()
    # 创建进程对象列表
    process_list = list()

    for page in range(1, 50):
        p_get_info = Process(target=get_work_info_json, args=(page, dict_data_queue))
        process_list.append(p_get_info)

    # get_work_info_json不是无限循环任务, 无需设置守护进程直接启动即可
    for process_obj in process_list:
        process_obj.start()

    # save_work_info是无限循环任务, 则需要设置为守护进程让主进程可以正常退出
    p_save_work = Process(target=save_work_info, args=(dict_data_queue,))
    p_save_work.daemon = True
    p_save_work.start()

    # 让主进程等待有限任务执行完毕
    for process_obj in process_list:
        process_obj.join()

    # 等待队列任务完成
    dict_data_queue.join()
    print('爬虫任务完成...')

