# coding=utf-8
"""
    @project: 15python_spider
    @Author：frank
    @file： 01_tencent.py
    @date：2024/2/27 20:35
"""
import requests
import json
import time
import random
from fake_useragent import UserAgent


class TencentSpider(object):
    def __init__(self):
        self.headers = {'User-Agent': UserAgent().random}
        self.one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1709038915125&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=1&keyword=&pageIndex={}&pageSize=10&language=zh-cn&area=cn'
        self.two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp={}&postId={}&language=zh-cn'

    # 请求函数
    def get_page(self, url):
        res = requests.get(url, headers=self.headers)
        res.encoding = 'utf-8'
        return json.loads(res.text)

    def get_data(self, html):
        # 先解析一级页面html
        job_info = {}
        # 依次遍历10个职位,再通过postId的值拼接二级页面地址
        for job in html['Data']['Posts']:
            # 职位名称
            job_info['job_name'] = job['RecruitPostName']
            # postId
            post_id = job['PostId']
            two_url = self.two_url.format(time.time(), post_id)
            # 发请求，解析出职责和要求
            job_info['job_duty'], job_info['require'] = \
                self.parse_two_page(two_url)
            print(job_info)

    #
    def parse_two_page(self, two_url):
        two_html = self.get_page(two_url)
        duty = two_html['Data']['Responsibility']
        require = two_html['Data']['Requirement']
        return duty, require

    def main(self):
        for index in range(1, 11):
            url = self.one_url.format(index)
            one_html = self.get_page(url)
            self.get_data(one_html)
            time.sleep(random.uniform(0.5, 2))


if __name__ == '__main__':
    spider = TencentSpider()
    spider.main()
