#!/usr/bin/env python
# coding: utf-8

# # 数据准备-liepin-PM

# ## 请求页面准备
# > 1. 找到页面的数据API接口
# > 2. 提供正确的用户请求酬载（payload）
# > 3. 准备请求的headers，增加cookie信息（用户登录之后的cookie），保证数据的合理性
# 

# In[2]:


import requests
import json

url = "https://apic.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
payload = {
    "data": {
        "mainSearchPcConditionForm": {
            "city": "050020",
            "dq": "050020",
            "pubTime": "",
            "currentPage": 0,
            "pageSize": 40,
            "key": "产品经理",
            "suggestTag": "",
            "workYearCode": "0",
            "compId": "",
            "compName": "",
            "compTag": "",
            "industry": "",
            "salary": "",
            "jobKind": "",
            "compScale": "",
            "compKind": "",
            "compStage": "",
            "eduLevel": ""
        },
        "passThroughForm": {
            "scene": "input",
            "skId": "",
            "fkId": "",
            "ckId": "h2c8pxojavrmo1w785z7ueih2ybfpux8",
            "suggest": None
        }
    }
}



# set the headers
headers = {
    'Accept': 'application/json, text/plain, */*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Length': '412',
    'Content-Type': 'application/json;charset=UTF-8;',
    'Cookie': 'inited_user=daf7251f92024e8969feb28b0e9ad34c; XSRF-TOKEN=ioYE87NtQmKVBO3G0Y0HCg; __gc_id=37822670cebb460693955108ff86d124; __uuid=1679636647293.55; __tlog=1679636647295.49%7C00000000%7C00000000%7C00000000%7C00000000; _ga=GA1.1.1687403970.1681896657; acw_tc=276077d816837105124564397e9c8b7118940ba709aa4ba34680cbb786f8bc; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1683710513; UniqueKey=95507c72a8d5ae141a667e00ad0d9493; liepin_login_valid=0; lt_auth=s%2Bpfa3QGxlzxtXfR3zQN4vociI39UWvIpX8EhE0Ahoe%2BCqG04PngSwOGq7EExAMhxB8mc8ULN7j9Mun%2BzHBP6UIbwGqnl4CyvOW92GECS%2B1cN8W2vezHl8zRQpQcl0AC8nFbtkIL%2BQ%3D%3D; access_system=C; user_roles=0; user_photo=5f8fa3a679c7cc70efbf444e08u.png; user_name=%E8%AE%B8%E6%99%BA%E8%B6%85; need_bind_tel=false; new_user=false; c_flag=fa43f4d55f3df63a96a7b4f194e214d4; inited_user=daf7251f92024e8969feb28b0e9ad34c; imId=c5f9b89f8466dffe6882ca1e5431db9c; imId_0=c5f9b89f8466dffe6882ca1e5431db9c; imClientId=c5f9b89f8466dffeb1921abcfab3aed0; imClientId_0=c5f9b89f8466dffeb1921abcfab3aed0; imApp_0=1; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1683710561; __session_seq=24; __uv_seq=4; fe_im_socketSequence_new_0=2_2_2; fe_im_opened_pages=; fe_im_connectJson_0=%7B%220_95507c72a8d5ae141a667e00ad0d9493%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; _ga_54YTJKWN86=GS1.1.1683710511.3.1.1683710585.0.0.0',
    'Host': 'apic.liepin.com',
    'Origin': 'https://www.liepin.com',
    'Pragma': 'no-cache',
    'Referer': 'https://www.liepin.com/',
    'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"macOS"',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
    'X-Client-Type': 'web',
    'X-Fscp-Bi-Stat': '{"location": "https://www.liepin.com/zhaopin/?inputFrom=www_index&workYearCode=0&key=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&scene=input&ckId=htihov8m2frxgy6ywo2wsg2gncnydzlb&dq="}',
    'X-Fscp-Fe-Version': '',
    'X-Fscp-Std-Info': '{"client_id": "40108"}',
    'X-Fscp-Trace-Id': '296c6ffc-320c-4ab2-ab90-29e44a2664d4',
    'X-Fscp-Version': '1.1',
    'X-Requested-With': 'XMLHttpRequest',
    'X-XSRF-TOKEN': 'ioYE87NtQmKVBO3G0Y0HCg'
}

# send a POST request with headers
r = requests.post(url, data=json.dumps(payload), headers=headers)

# extract the JSON data from the response
response_data = r.json()

# example: print the number of job postings returned
print(response_data)


# ## 翻页获取数据

# In[3]:



import pandas as pd
response_df = []
for i in range(21):
    payload['data']['mainSearchPcConditionForm']['currentPage']=i
    # send a POST request with headers
    r = requests.post(url, data=json.dumps(payload), headers=headers)

    # extract the JSON data from the response
    response_data = r.json()
    print(response_data)
    df = pd.json_normalize(response_data['data']['data']['jobCardList'])
    response_df.append(df)


# In[4]:


response_df


# ## 数据整理成为表格
# > 1. pandas 中的concat方法

# In[5]:


df = pd.concat(response_df)
df


# In[6]:


df.to_excel('liepin_PM_0510.xlsx')


# # 数据分析
# 
# > 1. Pandas/Numpy
# > 2. Pyecharts(bokeh、matplotlab、seaborn、echarts、Tebleau)/更考虑用户的体验

# In[7]:


df = pd.read_excel('liepin_PM_0510.xlsx')
df


# In[8]:


df.info()


# ## 筛选存在数据分析价值的列

# In[27]:


df_PM_gz =  df[['job.labels','job.refreshTime','job.title','job.salary','job.dq','job.topJob','job.requireWorkYears','job.requireEduLevel','comp.compStage','comp.compName','comp.compIndustry','comp.compScale']]
df_PM_gz


# ## 广州的PM地区分布

# In[32]:


df_PM_gz['job.dq'].value_counts()


# In[44]:


广州地区 = [  i.split('-')[1]       for i in df_PM_gz['job.dq'].value_counts().index.tolist()[1:]]
广州地区


# In[40]:


广州_岗位个数 = df_PM_gz['job.dq'].value_counts().values.tolist()[1:]
广州_岗位个数


# In[45]:


# 可视化：以可视化工具数据形态符合的数据进行输入

from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Faker

c = (
    Map()
    .add("商家A", [list(z) for z in zip(广州地区, 广州_岗位个数)], "广州")
    .set_global_opts(
        title_opts=opts.TitleOpts(title="Map-广州地图"), visualmap_opts=opts.VisualMapOpts()
    )
    
)
c.render_notebook()


# ## 职位分布
# 
# * 知识点：dataframe字符串处理
# > 1. [pandas.series.str](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.html)

# In[65]:


df_PM_gz['job.title'][df_PM_gz['job.title'].str.contains('（')].str.split('（')


# In[47]:


df_PM_gz['job.title'].value_counts()


# In[ ]:




