import liepindata
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd


# 未登录cookie （模拟浏览器）
cookie_未登录 ="XSRF-TOKEN=zIARnRM0QQaLudJk_4OBXw; __gc_id=e936feed5bd343e5a73b304fa0d4ad5f; _ga=GA1.1.504776059.1697021947; __uuid=1697021948310.51; __tlog=1697021948354.48%7C00000000%7C00000000%7C00000000%7C00000000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1697021951; acw_tc=2760828416970219810274366e53a098388abcfc886c89f5cbf2332344893f; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1697021975; __session_seq=4; __uv_seq=4; __tlg_event_seq=24; _ga_54YTJKWN86=GS1.1.1697021946.1.1.1697023027.0.0.0"

# 准备headers和session
def create_headers(cookie):

    return liepindata.headers(cookie)

session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",页码="",登录cookie=cookie_未登录):
    ## 综上所述
    # crawler 
    crawl_liepin_data = []
    print("登录cookie",登录cookie)
    # 1. 确定页码数量
    payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模)
    r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = create_headers(登录cookie)    # 模拟真实用户
    )
 #   print("create_headers(登录cookie",create_headers(登录cookie))
    print(r.json())
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2. 翻页请求（按照上述获取到的页面数量进行翻页请求）
    for i in range(totalPage):
        wait_time = random.randint(3,10)
        # 1. 翻页请求的准备工作
        payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,页码=str(i))
    #     print(payload)
        # 2. 发起请求
        r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = create_headers(登录cookie)    # 模拟真实用户
            )
        print("这是第%s页，接下来将先等待%s秒...然后以继续抓取"%(i+1,wait_time))
    #     print(r.json())
        r.json()
        df = pd.json_normalize( r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3. 分批次发起等待时间

        time.sleep(wait_time)
    df = pd.concat(crawl_liepin_data)

    return df
