import liepin_data
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd

# 准备headers和session
headers = liepin_data.headers()
session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",页码=""):
    crawl_liepin_data = []

    
    # 1. 确定页码数量
    payload = liepin_data.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模)
    r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), 
                headers = headers    
    )
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2. 翻页请求
    for i in range(totalPage):
        wait_time = random.randint(3,10)
        # 1. 翻页请求的准备工作
        payload = liepin_data.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,页码=str(i))
    #     print(payload)
        # 2. 发起请求
        r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = headers    # 模拟真实用户
            )
        print("这是第%s页，接下来将先等待%s秒...然后以继续抓取"%(i+1,wait_time))
    #     print(r.json())
        r.json()
        df = pd.json_normalize( r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3. 分批次发起等待时间

        time.sleep(wait_time)
    df = pd.concat(crawl_liepin_data)

    return df
