import liepindata
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd


# 未登录cookie（模拟浏览器）
cookie_未登录 = "XSRF-TOKEN=zIARnRM0QQaLudJk_4OBXw; __gc_id=e936feed5bd343e5a73b304fa0d4ad5f; _ga=GA1.1.504776059.1697021947; __uuid=1697021948310.51; __tlog=1697021948354.48%7C00000000%7C00000000%7C00000000%7C00000000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1697021951; acw_tc=2760828416970219810274366e53a098388abcfc886c89f5cbf2332344893f; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1697021975; __session_seq=4; __uv_seq=4; __tlg_event_seq=24; _ga_54YTJKWN86=GS1.1.1697021946.1.1.1697023027.0.0.0"

# 准备headers和session
def create_headers(cookie):

    return liepindata.headers(cookie)



headerss = {
    "Accept": "application/json, text/plain, */*",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Content-Length": "319",
    "Content-Type": "application/json;charset=UTF-8;",
    "Cookie": "inited_user=21326f5719161b33e8af2ef1e6c39e9c; __gc_id=ca5daf4a0cf640f2b5fdc8cf0bc45cc1; _ga=GA1.1.836451569.1681903087; __uuid=1681903087304.37; need_bind_tel=false; c_flag=518a24009ce3958d4e818ccb2658f819; new_user=false; imId=b5e1273d5cb093f4bd01d94a8ce8c34f; imId_0=b5e1273d5cb093f4bd01d94a8ce8c34f; imClientId=b5e1273d5cb093f45dc8fe47cac96bc7; imClientId_0=b5e1273d5cb093f45dc8fe47cac96bc7; XSRF-TOKEN=4wZlEtCWTVOCS94-gBS-Pg; __tlog=1700659440222.41%7C00000000%7C00000000%7C00000000%7C00000000; acw_tc=2760828717006594407961455e94fbd5b1445252e574d9c44f44da522be9e9; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1700481245,1700642309,1700642500,1700659441; hpo_role-sec_project=sec_project_liepin; hpo_sec_tenant=0; UniqueKey=92e8ad0c82debad0a9dba665e9526a14; liepin_login_valid=0; lt_auth=6rpZOHUGylv95XDZjmJZ5vocitOuUWjNpigIhB9TgdPvDvPk4P%2FmQAOHqrUA%2FioIq098IfszMLf%2BNe77wHZL6UUT8FGnlZ6utf6k0HsCUeZlL8W2vezHg%2FXUQp0lkkAA8nJbpEIL%2BVzO; user_roles=0; user_photo=5f8fa3b979c7cc70efbf445908u.png; user_name=%E5%90%B4%E6%85%A7%E7%BA%AF; inited_user=21326f5719161b33e8af2ef1e6c39e9c; imApp_0=1; _ga_54YTJKWN86=GS1.1.1700659439.26.1.1700659546.0.0.0; __session_seq=4; __uv_seq=10; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1700659547; fe_im_opened_pages=_1700659547837; fe_im_connectJson_0=%7B%220_92e8ad0c82debad0a9dba665e9526a14%22%3A%7B%22socketConnect%22%3A%221%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; fe_im_socketSequence_new_0=2_2_2; __tlg_event_seq=20",
    "Host": "api-c.liepin.com",
    "Origin": "https://www.liepin.com",
    "Referer": "https://www.liepin.com/",
    "Sec-Ch-Ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
    "Sec-Ch-Ua-Mobile": "?0",
    "Sec-Ch-Ua-Platform": "\"Windows\"",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-site",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
    "X-Client-Type": "web",
    "X-Fscp-Bi-Stat": "{\"location\": \"https://www.liepin.com/zhaopin/?inputFrom=head_navigation&scene=init&workYearCode=1&ckId=5zyezywpsw1hvvuq8e61scqquy4xxzwb\"}",
    "X-Fscp-Fe-Version": "",
    "X-Fscp-Std-Info": "{\"client_id\": \"40108\"}",
    "X-Fscp-Trace-Id": "2ce94bbe-d50a-41dc-8e7d-60637b17f537",
    "X-Fscp-Version": "1.1",
    "X-Requested-With": "XMLHttpRequest",
    "X-Xsrf-Token": "4wZlEtCWTVOCS94-gBS-Pg"
}
session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",页码=""):
    ## 综上所述
    # crawler 
    crawl_liepin_data = []
    # 1. 确定页码数量
    payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模)
    r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = headerss    # 模拟真实用户
    )
#     print("create_headers(登录cookie",create_headers(登录cookie))
#     print(r.json())
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2. 翻页请求（按照上述获取到的页面数量进行翻页请求）
    for i in range(totalPage):
        wait_time = random.randint(3,10)
        # 1. 翻页请求的准备工作
        payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,页码=str(i))
    #     print(payload)
        # 2. 发起请求
        r = session.post(
                url = 'https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job',
                data = json.dumps(payload), # POST请求带的参数
                headers = headerss    # 模拟真实用户
            )
        print("这是第%s页，接下来将先等待%s秒...然后以继续抓取"%(i+1,wait_time))

    #     print(r.json())
        r.json()
        df = pd.json_normalize( r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3. 分批次发起等待时间

        time.sleep(wait_time)
    df = pd.concat(crawl_liepin_data)

    return df
