import liepindata
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd


# 准备headers和session
headers = liepindata.headers()
session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",页码=""):
    # 综上所述
    # crawler
    crawl_liepin_data =  []

    # 1.确定页码数量
    # payload
    payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模)

    r = session.post(
                    url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job",
                     data = json.dumps(payload),      # POST请求带的参数
                     headers = headers)        # 模拟真实用户
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2.翻页请求（按照上述获取到的页面数量进行翻页请求）

    for i in range(totalPage):
        wait_time = random.randint(3,10)

        # 1.翻页请求的准备工作
        payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,页码=str(i))
        # print(payload)

        # 2.发起请求
        r = session.post(
                    url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job",
                     data = json.dumps(payload),      # POST请求带的参数
                     headers = headers)        # 模拟真实用户
        print("这是第%s页,接下来将先等待%s秒...然后继续抓取"%(i+1,wait_time))
        # print(r.json())

        df= pd.json_normalize(r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3.分批次发起请求
        time.sleep(wait_time)
        df = pd.concat(crawl_liepin_data)
        return df