import liepindata
import json
from requests_html import HTMLSession
import time
import random
import pandas as pd


#未登录cookie(模拟浏览器)
cookie_未登录 = "XSRF-TOKEN=-Y-naG4MRduq7J7ounyV4w; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1697022119,1697626812,1698231299,1698835812; acw_tc=276077be16988384337067379e8d89f07772038374ae9a83b114588ee59fb4; access_system=C; user_roles=0; user_photo=5f8fa3a9dfb13a7dee343d4808u.png; user_name=%E6%9D%8E%E4%BC%9F%E9%B8%BF; new_user=false; inited_user=c0137a17cca1bb5b4c8e7779fb4f4c27; imId=441740984653098f9fc0ef4d544ec462; imId_0=441740984653098f9fc0ef4d544ec462; imClientId=441740984653098f9ae867bd6e15cbed; imClientId_0=441740984653098f9ae867bd6e15cbed; imApp_0=1; fe_im_socketSequence_new_0=3_3_3; fe_im_opened_pages=; fe_im_connectJson_0=%7B%220_46e08ae76ec43cda7ecd4408bb1e9b04%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1698840161; __session_seq=23; __uv_seq=23; __tlg_event_seq=179; _ga_54YTJKWN86=GS1.1.1698838442.8.1.1698840204.0.0.0"


# 准备headers和session
def create_headers(cookie):
   
    return liepindata.headers(cookie)

    
    
session = HTMLSession()

def crawl(城市,关键词,工作经验="",公司规模="",页码="",登录cookie=cookie_未登录):
    # 综上所述
    # crawler
    crawl_liepin_data =  []
    print("登录cookie",登录cookie)

    # 1.确定页码数量
    # payload
    payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模)

    r = session.post(
                    url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job",
                     data = json.dumps(payload),      # POST请求带的参数
                     headers = create_headers(登录cookie)
    )        # 模拟真实用户
    print(r.json())
    totalPage = r.json()['data']['pagination']['totalPage']

    # 2.翻页请求（按照上述获取到的页面数量进行翻页请求）

    for i in range(totalPage):
        wait_time = random.randint(3,10)

        # 1.翻页请求的准备工作
        payload = liepindata.request_payload(城市=城市,关键词=关键词,工作经验=工作经验,公司规模=公司规模,页码=str(i))
        # print(payload)

        # 2.发起请求
        r = session.post(
                    url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job",
                     data = json.dumps(payload),      # POST请求带的参数
                     headers = create_headers(登录cookie)
        )        # 模拟真实用户
        print("这是第%s页,接下来将先等待%s秒...然后继续抓取"%(i+1,wait_time))
        # print(r.json())
        r.json()
        df= pd.json_normalize(r.json()['data']['data']['jobCardList'])
        crawl_liepin_data.append(df)
        # 3.分批次发起请求
        time.sleep(wait_time)
        df = pd.concat(crawl_liepin_data)
        return df