import liepindata
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd


# # 未登录cookie(模拟浏览器)
# cookie_未登录 = "XSRF-TOKEN=zIARnRM0QQaLudJk_4OBXw; __gc_id=e936feed5bd343e5a73b304fa0d4ad5f; _ga=GA1.1.504776059.1697021947; __uuid=1697021948310.51; __tlog=1697021948354.48%7C00000000%7C00000000%7C00000000%7C00000000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1697021951; acw_tc=2760828416970219810274366e53a098388abcfc886c89f5cbf2332344893f; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1697021975; __session_seq=4; __uv_seq=4; __tlg_event_seq=24; _ga_54YTJKWN86=GS1.1.1697021946.1.1.1697023027.0.0.0"

# 建立登录cookie
cookie = "inited_user=49eb67c114840477b15f334bfa8abf6c; __uuid=1685532036831.42; __gc_id=35a80e770a2f48fbac273c3799a10914; need_bind_tel=false; new_user=false; c_flag=18cb83a18db2f77c28d02584bc00c471; _ga=GA1.1.373058038.1685532080; XSRF-TOKEN=2n-tdpFFS_6XoTgmHyq26w; __tlog=1698838602934.94%7C00000000%7C00000000%7C00000000%7C00000000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1697021981,1697626870,1698231325,1698838603; acw_tc=276077cb16988386028382754e29e9a39379cae2eb23acf240cea17d37e305; UniqueKey=e7e8f2a2a066997d7ecd4408bb1e9b04; liepin_login_valid=0; lt_auth=vehZM30Dxw6v5CXfjmpc5a5O2tL8AW3B83wPgRFS0t67Dfzj4P%2FhQA2GprYB%2FCoIqxghIvUzMLb3PeD9wXVJ6EQR%2FlGnlZ6utf6k0HsDUeFnHuyflMXuqsjQQ5wtrXo6ykpgn2si0HU%3D; access_system=C; user_roles=0; user_photo=5f8fa3bddfb13a7dee343d7608u.png; user_name=%E6%9E%97%E5%B9%BC%E8%BE%9E; inited_user=49eb67c114840477b15f334bfa8abf6c; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1698838634; imId=bfac249da59fd5b7c2d4eeeb65574f2f; imId_0=bfac249da59fd5b7c2d4eeeb65574f2f; imClientId=bfac249da59fd5b78837b9416ddb1057; imClientId_0=bfac249da59fd5b78837b9416ddb1057; imApp_0=1; fe_im_socketSequence_new_0=1_1_1; __tlg_event_seq=12; fe_im_opened_pages=; fe_im_connectJson_0=%7B%220_e7e8f2a2a066997d7ecd4408bb1e9b04%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; _ga_54YTJKWN86=GS1.1.1698838602.10.1.1698838653.0.0.0; __session_seq=3; __uv_seq=3"

# 准备headers和session
def create_headers(cookie):
    return liepindata.headers(cookie)
    
session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",学历="",页码="",登录cookie=cookie):
    ## 综上所述
    # crawler 
    crawl_liepin_data = []
    print("登录cookie",登录cookie)

    # 1. 确定页码数量
    payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,学历=学历)
    r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = create_headers(登录cookie)    # 模拟真实用户
    )
#    print(create_headers(登录cookie))
    print(r.json())
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2. 翻页请求（按照上述获取到的页面数量进行翻页请求）
    for i in range(totalPage):
        wait_time = random.randint(3,10)
        # 1. 翻页请求的准备工作
        payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,学历=学历,页码=str(i))
    #     print(payload)
        # 2. 发起请求
        r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = create_headers(登录cookie)    # 模拟真实用户
            )
        print("这是第%s页，接下来将先等待%s秒...然后继续抓取"%(i+1,wait_time))
    #     print(r.json())
        r.json()
        df = pd.json_normalize( r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3. 分批次发起等待时间

        time.sleep(wait_time)
    df = pd.concat(crawl_liepin_data)

    return df