# 该项目为猎聘数据挖掘和分析项目
import requests
import json
import pandas as pd
import time


地区编码字典 = {
    '广州':'050020',
    '深圳':'050090'
}

def liepin_data(用户输入的地区, 用户输入职位):
    url = "https://apic.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
    
    payload = {
        "data": {
            "mainSearchPcConditionForm": {
                "city": 地区编码字典[用户输入的地区],
                "dq": 地区编码字典[用户输入的地区],
                "pubTime": "",
                "currentPage": 0,
                "pageSize": 40,
                "key": 用户输入职位,
                "suggestTag": "",
                "workYearCode": "0",
                "compId": "",
                "compName": "",
                "compTag": "",
                "industry": "",
                "salary": "",
                "jobKind": "",
                "compScale": "",
                "compKind": "",
                "compStage": "",
                "eduLevel": ""
            },
            "passThroughForm": {
                "scene": "input",
                "skId": "",
                "fkId": "",
                "ckId": "h2c8pxojavrmo1w785z7ueih2ybfpux8",
                "suggest": None
            }
        }
    }

    # set the headers
    headers = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Content-Length': '412',
        'Content-Type': 'application/json;charset=UTF-8;',
        'Cookie':'__gc_id=a7a8074d044b4d7f878d465462b9f47e; __uuid=1679623973977.58; need_bind_tel=false; _ga=GA1.1.1765007085.1682058856; new_user=false; access_system=C; c_flag=e3ad47dc0137ea28d99d002eec8317a5; imId=0a4a6b7509c52663bbbf42f1dd8ed06d; imId_0=0a4a6b7509c52663bbbf42f1dd8ed06d; imClientId=0a4a6b7509c526636c850e2750d0a23b; imClientId_0=0a4a6b7509c526636c850e2750d0a23b; __tlog=1687483014350.88%7C00000000%7C00000000%7Cs_00_t00%7Cs_00_t00; XSRF-TOKEN=OlK3CRiMRC6rNeepZ9NU1Q; UniqueKey=40719fae50dd806441344312b1f2ca2b; liepin_login_valid=0; lt_auth=sugLPyFQnVj57STf3GNb4akZht%2BgV2nL9HsP0RhRhdG6XKaw4P%2FmQgqGrbMA%2BioIqx8mI6gzMLb2Muz2ynNN4kYQ%2B1GnlZ6utf6k1X4eTuZnHuyflMXuqsjQQJgirXo6ykpgn2si0HU%3D; inited_user=cf1617b489506e1c05ec3579b9442ddd; user_roles=0; user_photo=5f8fa3bc8dbe6273dcf85e5e08u.png; user_name=%E9%83%91%E7%A7%8B%E7%8F%8D; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1686277854,1687440496,1687444840,1687483080; imApp_0=1; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1687483095; fe_im_opened_pages=_1687483095523; fe_im_connectJson_0=%7B%220_40719fae50dd806441344312b1f2ca2b%22%3A%7B%22socketConnect%22%3A%221%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; fe_im_socketSequence_new_0=2_2_2; __session_seq=8; __uv_seq=8; _ga_54YTJKWN86=GS1.1.1687483094.11.1.1687483129.0.0.0',
        'Host': 'apic.liepin.com',
        'Origin': 'https://www.liepin.com',
        'Pragma': 'no-cache',
        'Referer': 'https://www.liepin.com/',
        'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-site',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
        'X-Client-Type': 'web',
        'X-Fscp-Bi-Stat': '{"location": "https://www.liepin.com/zhaopin/?inputFrom=www_index&workYearCode=0&key=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&scene=input&ckId=htihov8m2frxgy6ywo2wsg2gncnydzlb&dq="}',
        'X-Fscp-Fe-Version': '',
        'X-Fscp-Std-Info': '{"client_id": "40108"}',
        'X-Fscp-Trace-Id': 'd33575dc-bfcf-4269-bbb2-0058a4fa3e5a',
        'X-Fscp-Version': '1.1',
        'X-Requested-With': 'XMLHttpRequest',
        'X-XSRF-TOKEN': 'OlK3CRiMRC6rNeepZ9NU1Q'
    }
    # 1.通过首次请求获取数据页面信息
    r = requests.post(url, data=json.dumps(payload), headers=headers)
    response_data = r.json()
    page = response_data['data']['pagination']['totalPage']
    
    # 2.翻页获取所有数据
    response_df = []
    for i in range(page): # 需要判断页面的数据有多少页
        payload['data']['mainSearchPcConditionForm']['currentPage']=i
        r = requests.post(url, data=json.dumps(payload), headers=headers)
        response_data = r.json()
        df = pd.json_normalize(response_data['data']['data']['jobCardList'])
        response_df.append(df)
        
    # 3.整理表格并输出表格数据
    df = pd.concat(response_df)
    key = payload['data']['mainSearchPcConditionForm']['key']
    output_time = str(time.localtime().tm_mon)\
             +str(time.localtime().tm_mday)+'_'\
             +str(time.localtime().tm_hour) \
             +str(time.localtime().tm_min)
    df.to_excel( key +'_liepin_'+output_time+'.xlsx')
    # 4.返回值
    return "当前数据已导出，数据量为：",len(df),"行"

def liepin_dq(用户输入的地区):
    """lliepin数据地区分布数据分析及可视化"""
    df = pd.read_excel('产品经理_liepin_624_1129.xlsx')
    df_PM_gz =  df[['job.labels','job.refreshTime','job.title','job.salary','job.dq','job.topJob','job.requireWorkYears','job.requireEduLevel','comp.compStage','comp.compName','comp.compIndustry','comp.compScale']]
    地区 = [ i.split('-')[1] for i in df_PM_gz['job.dq'].value_counts().index.tolist() if '-'  in i]
    岗位个数 = df_PM_gz['job.dq'].value_counts().values.tolist()[1:]
    c = (
    Map()
    .add(用户输入的地区, [list(z) for z in zip(地区, 岗位个数)], )
    .set_global_opts(
        title_opts=opts.TitleOpts(title='Map-'+用户输入的地区+'地图'), visualmap_opts=opts.VisualMapOpts()
    )
    
)
    return c





