''' 
@文件：2.使用线程池获取百度招聘信息.py
@作者：Miss丶念
@时间：2025/3/21：20:30
'''

import pymysql
import requests
from dbutils.pooled_db import PooledDB
from concurrent.futures import ThreadPoolExecutor, as_completed


class BaiduJob:
    def __init__(self):
        self.url = 'https://talent.baidu.com/httservice/getPostListNew'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
            'Referer': 'https://talent.baidu.com/jobs/social-list?search=python'
        }
        self.pool = PooledDB(
            creator=pymysql,  # 使用链接数据库的模块
            maxconnections=6,  # 连接池允许的最大连接数，0和None表示不限制连接数
            mincached=2,  # 初始化时，链接池中至少创建的空闲的链接，0表示不创建
            maxcached=5,  # 链接池中最多闲置的链接，0和None不限制
            maxshared=3,  # 设置线程之间的共享连接
            blocking=True,  # 连接耗尽则等待直至有可用的连接为止
            maxusage=None,  # 一个链接最多被重复使用的次数，None表示无限制
            setsession=[],  # 开始会话前执行的命令列表。如：["set datestyle to ...", "set time zone ..."]
            ping=0,
            host='localhost',
            port=3306,
            user='root',
            password='root',
            database='py_spider',
            charset='utf8'
        )

    def get_work_info(self, page_num):
        post_param = {
            'recruitType': 'SOCIAL',
            'pageSize': '10',
            'keyWord': 'python',
            'curPage': page_num,
            'projectType': ''
        }
        response = requests.post(self.url, data=post_param, headers=self.headers).json()
        return response

    def parse_data(self, data):
        works = data['data']['list']
        for work_info in works:
            education = work_info['education'] if work_info['education'] else '无学历要求'
            name = work_info['name']
            service_condition = work_info['serviceCondition']
            self.save_work_info(education, name, service_condition)

    def create_table(self):
        with self.pool.connection() as conn:
            with conn.cursor() as cursor:
                sql = """
                    CREATE TABLE IF NOT EXISTS baidu_job (
                        id INT AUTO_INCREMENT PRIMARY KEY,
                        education VARCHAR(255),
                        name VARCHAR(255),
                        service_condition text
                    )
                """
                try:
                    cursor.execute(sql)
                    conn.commit()
                    print('创建表成功')
                except Exception as e:
                    print('创建表失败', e)

    def save_work_info(self, education, name, service_condition):
        with self.pool.connection() as conn:
            with conn.cursor() as cursor:
                sql = """
                    INSERT INTO baidu_job (id,education, name, service_condition)
                    VALUES (%s, %s, %s, %s)
                """
                try:
                    cursor.execute(sql, (0, education, name, service_condition))
                    conn.commit()
                    print('保存成功', education, name, service_condition)
                except Exception as e:
                    print('保存失败', e)
                    conn.rollback()

    def main(self):
        self.create_table()
        with ThreadPoolExecutor(max_workers=5) as executor:
            futures = [executor.submit(self.get_work_info, page_num) for page_num in range(1, 11)]
            """
                        future.result()方法会造成主线程堵塞
                        需要使用as_completed转为非阻塞
                        """
            for future in as_completed(futures):
                executor.submit(self.parse_data, future.result())


if __name__ == '__main__':
    baidu_job = BaiduJob()
    baidu_job.main()
