from requests_html import HTMLSession
import json
import time
import pandas as pd


class JobWebsite:
    API_URL = "http://www.gaoxiaojob.com/job/home-list"
    HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                      '(KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.57'
    }
    COLUMNS = ['jobName', 'companyName', 'amount', 'province', 'city', 'companyTypeName', 'companyNatureName',
               'education', 'jobCategory', 'jobRecord', 'releaseTime', 'shortRefreshTime', 'maxWage', 'minWage',
               'welfareTagArr']

    def __init__(self):
        self.session = HTMLSession()

    @staticmethod
    def get_params(keyword: str, page_num: int):
        params = {
            'keyword': keyword,
            'companyType': '1,2,3,13,14',
            'currentPage': page_num
        }
        return params

    def get_response(self, keyword: str, page_num: int):
        res = self.session.get(url=self.API_URL,
                               params=self.get_params(keyword, page_num),
                               headers=self.HEADERS)
        return json.loads(res.text).get('data').get('list')

    def save_page(self, page_data: list, judge: int):
        # 每间隔1页存一下
        page = list()
        for info_dict in page_data:
            row = list()
            row.append(info_dict['jobName'])
            row.append(info_dict['companyName'])
            row.append(info_dict['amount'])
            area_list = self.split_area(info_dict['areaName'])
            row.append(area_list[0])
            row.append(area_list[1])
            row.append(info_dict['companyTypeName'])
            row.append(info_dict['companyNatureName'])
            row.append(info_dict['education'])
            row.append(info_dict['jobCategory'])
            row.append(info_dict['jobRecord'])
            row.append(info_dict['releaseTime'])
            row.append(info_dict['shortRefreshTime'])
            row.append(info_dict['maxWage'])
            row.append(info_dict['minWage'])
            welfare_list = info_dict['welfareTagArr']
            if len(welfare_list) == 0:
                row.append('无')
            else:
                row.append(','.join(welfare_list))
            page.append(row)
        df = pd.DataFrame(page)
        df.columns = self.COLUMNS
        if judge == 1:
            df.to_csv("teacher_job_data1.csv", index=False)
        else:
            df.to_csv("teacher_job_data1.csv", index=False, header=False, mode='a')
        # df.to_csv("teacher_job_data.csv", index=False, header=False, mode='a')
        print(f"第{judge}页已存入csv...")

    @staticmethod
    def split_area(area_name: str):
        result = area_name.split('-')
        return result


if __name__ == '__main__':
    provinces = ['上海', '北京', '河北', '陕西', '广东']
    end_page_nums = [81, 81, 53, 28, 81]
    site = JobWebsite()
    for j in range(len(provinces)):
        for i in range(1, end_page_nums[j]):
            data = site.get_response(provinces[j], i)
            site.save_page(data, i)
            time.sleep(3)
