#!/usr/bin/env python
# coding: utf-8

# # 数据准备-liepin-PM

# ## 请求页面准备
# > 1. 找到页面的数据API接口
# > 2. 提供正确的用户请求酬载（payload）
# > 3. 准备请求的headers，增加cookie信息（用户登录之后的cookie），保证数据的合理性
# 

# In[1]:


用户输入职位 = input("请输入你要查询的职位：")


# In[2]:


import requests
import json

url = "https://apic.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
payload = {
    "data": {
        "mainSearchPcConditionForm": {
            "city": "050020",
            "dq": "050020",
            "pubTime": "",
            "currentPage": 0,
            "pageSize": 40,
            "key": 用户输入职位,
            "suggestTag": "",
            "workYearCode": "0",
            "compId": "",
            "compName": "",
            "compTag": "",
            "industry": "",
            "salary": "",
            "jobKind": "",
            "compScale": "",
            "compKind": "",
            "compStage": "",
            "eduLevel": ""
        },
        "passThroughForm": {
            "scene": "input",
            "skId": "",
            "fkId": "",
            "ckId": "h2c8pxojavrmo1w785z7ueih2ybfpux8",
            "suggest": None
        }
    }
}

# set the headers
headers = {
    'Accept': 'application/json, text/plain, */*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Length': '412',
    'Content-Type': 'application/json;charset=UTF-8;',
    'Cookie':'inited_user=daf7251f92024e8969feb28b0e9ad34c; __gc_id=0baa2ddaa7774d8fba2b9c2c3d8ba166; __uuid=1670205465393.76; XSRF-TOKEN=XMz5EHIASaeNsiKARaDj0g; _ga=GA1.1.1094788215.1685529645; __tlog=1685529652687.84%7C00000000%7C00000000%7C00000000%7C00000000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1685529653; acw_tc=276077ce16855296532255003e6b8997d19c6391bf98b9112b34575fce6981; access_system=C; user_roles=0; user_photo=5f8fa3a679c7cc70efbf444e08u.png; user_name=%E8%AE%B8%E6%99%BA%E8%B6%85; need_bind_tel=false; new_user=false; c_flag=fa43f4d55f3df63a96a7b4f194e214d4; inited_user=daf7251f92024e8969feb28b0e9ad34c; imId=c5f9b89f8466dffe6882ca1e5431db9c; imId_0=c5f9b89f8466dffe6882ca1e5431db9c; imClientId=c5f9b89f8466dffeb1921abcfab3aed0; imClientId_0=c5f9b89f8466dffeb1921abcfab3aed0; imApp_0=1; UniqueKey=95507c72a8d5ae141a667e00ad0d9493; liepin_login_valid=0; lt_auth=v%2BcIPHQGxlzxtXfR3zQN4vociI39UWvIpX8EhE0Ahoe%2BCqG04PngSwOGq7EExAMhkRggdMULN7j7MOv%2FyndL7kUVwGqnl4CyvOW92GECSuBcN8W2vezHl8zRQpQcl0AC8nFbtkIL%2BQ%3D%3D; __session_seq=11; __uv_seq=11; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1685531395; fe_im_socketSequence_new_0=8_7_7; fe_im_connectJson_0=%7B%220_95507c72a8d5ae141a667e00ad0d9493%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; fe_im_opened_pages=; _ga_54YTJKWN86=GS1.1.1685529644.1.1.1685531410.0.0.0',
    'Host': 'apic.liepin.com',
    'Origin': 'https://www.liepin.com',
    'Pragma': 'no-cache',
    'Referer': 'https://www.liepin.com/',
    'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"macOS"',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
    'X-Client-Type': 'web',
    'X-Fscp-Bi-Stat': '{"location": "https://www.liepin.com/zhaopin/?inputFrom=www_index&workYearCode=0&key=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&scene=input&ckId=htihov8m2frxgy6ywo2wsg2gncnydzlb&dq="}',
    'X-Fscp-Fe-Version': '',
    'X-Fscp-Std-Info': '{"client_id": "40108"}',
    'X-Fscp-Trace-Id': 'fea335b6-f4a4-42fd-9bd8-6fe41ffec413',
    'X-Fscp-Version': '1.1',
    'X-Requested-With': 'XMLHttpRequest',
    'X-XSRF-TOKEN': 'XMz5EHIASaeNsiKARaDj0g'
}

# send a POST request with headers
r = requests.post(url, data=json.dumps(payload), headers=headers)

# extract the JSON data from the response
response_data = r.json()

# example: print the number of job postings returned
print(response_data)


# In[3]:


import math


# In[4]:


# 向上取整
page = math.ceil(response_data['data']['pagination']['totalCounts']/40)
page


# In[5]:


601/40


# In[6]:


math.ceil(601/40)


# ## 翻页获取数据

# In[7]:


import pandas as pd
response_df = []
for i in range(page): # 需要判断页面的数据有多少页
    payload['data']['mainSearchPcConditionForm']['currentPage']=i
    # send a POST request with headers
    r = requests.post(url, data=json.dumps(payload), headers=headers)

    # extract the JSON data from the response
    response_data = r.json()
    print(response_data)
    df = pd.json_normalize(response_data['data']['data']['jobCardList'])
    response_df.append(df)


# In[8]:


response_df


# ## 数据整理成为表格
# > 1. pandas 中的concat方法

# In[9]:


df = pd.concat(response_df)
df


# In[10]:


key = payload['data']['mainSearchPcConditionForm']['key']
key


# ## 数据存储

# In[11]:


import time


# In[12]:


time.localtime()


# In[13]:


output_time = str(time.localtime().tm_mon)             +str(time.localtime().tm_mday)+'_'             +str(time.localtime().tm_hour)              +str(time.localtime().tm_min)
output_time 


# In[14]:


# 按照职位名称和时间导出文件
df.to_excel( key +'_liepin_'+output_time+'.xlsx')


# # 数据分析
# 
# > 1. Pandas/Numpy
# > 2. Pyecharts(bokeh、matplotlab、seaborn、echarts、Tebleau)/更考虑用户的体验

# In[15]:


import pandas as pd


# In[16]:


df = pd.read_excel(key+'_liepin_'+output_time+'.xlsx')
df


# In[17]:


df.info()


# ## 筛选存在数据分析价值的列

# In[18]:


df_PM_gz =  df[['job.labels','job.refreshTime','job.title','job.salary','job.dq','job.topJob','job.requireWorkYears','job.requireEduLevel','comp.compStage','comp.compName','comp.compIndustry','comp.compScale']]
df_PM_gz


# ## 广州的PM地区分布

# In[19]:


df_PM_gz['job.dq'].value_counts()


# In[20]:


广州地区 = [  i.split('-')[1]       for i in df_PM_gz['job.dq'].value_counts().index.tolist()[1:]]
广州地区


# In[21]:


广州_岗位个数 = df_PM_gz['job.dq'].value_counts().values.tolist()[1:]
广州_岗位个数


# In[22]:


# 可视化：以可视化工具数据形态符合的数据进行输入

from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Faker

c = (
    Map()
    .add("广州", [list(z) for z in zip(广州地区, 广州_岗位个数)], "广州")
    .set_global_opts(
        title_opts=opts.TitleOpts(title="Map-广州地图"), visualmap_opts=opts.VisualMapOpts()
    )
    .render( key+"_dq_map_地区分布_"+output_time+".html")
)


# ## 职位分布
# 
# * 知识点：dataframe字符串处理
# > 1. [pandas.series.str](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.html)

# In[23]:


df_PM_gz['job.title']


# In[24]:


# 还要合并回去原来的行
df_PM_gz['job.title'][   df_PM_gz['job.title'].str.contains('（')   ].str.split('（').apply(lambda x:x[0])


# In[25]:


# 处理过一些，清洗后的数据
df_job_title = df_PM_gz['job.title'].apply(lambda x:x.split('（')[0].split('/')[0].split('(')[0]).value_counts()
df_job_title


# In[26]:


df_job_title.index.tolist()


# In[27]:


len(df_job_title.index.tolist())


# In[28]:


df_job_title.values.tolist()


# In[29]:


# 未处理字符串的数据（不太整洁和干净的数据）
df_PM_gz['job.title'].value_counts()


# In[30]:


# 列表推导式
PM_title_words = [(  df_job_title.index.tolist()[i]   ,   df_job_title.values.tolist()[i]  )    for i in range(1,len(df_job_title.index.tolist())) ]
PM_title_words


# In[31]:


from pyecharts import options as opts
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType

c = (
    WordCloud()
    .add("", PM_title_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
    .set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
    .render( key +"_wordcloud_map_岗位名称_"+ output_time+".html")
)


# ## job.labels
# 
# * 目标：统计labels的数量并做词云图

# In[32]:


df_PM_gz['job.labels']


# In[33]:


df_PM_gz['job.labels'].values


# In[34]:


df_PM_gz['job.labels'].apply(lambda x:eval(x)).tolist()


# In[35]:


# 列表的推导式
PM_labels_list = [j     for i in df_PM_gz['job.labels'].apply(lambda x:eval(x)).tolist()       for j in i    ]
PM_labels_list


# In[36]:


# 创建words

PM_labels_words = [ (i,PM_labels_list.count(i)) for i in set(PM_labels_list)]
PM_labels_words


# In[37]:


# 可视化词云图
from pyecharts import options as opts
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType

c = (
    WordCloud()
    .add("", PM_labels_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
    .set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
    .render( key +"_wordcloud_map_职位标签_"+ output_time+".html")
)


# ## 薪资-（平均薪资）

# In[38]:


# columns 重命名
df_PM_gz = df_PM_gz.rename(columns={
    'job.labels':'职位标签',
    'job.refreshTime':'职位更新时间',
    'job.title':'职位',
    'job.salary':'薪资',
    'job.dq':'地区',
    'job.topJob':'是否top职位',
    'job.requireWorkYears':'工作年限',
    'job.requireEduLevel':'学历',
    'comp.compStage':'公司融资情况',
    'comp.compName':'公司名称',
    'comp.compIndustry':'行业',
    'comp.compScale':'规模'
})
df_PM_gz


# In[39]:


非薪资面议 = df_PM_gz [ ~df_PM_gz['薪资'].str.contains("面议|元/天")]
非薪资面议


# In[40]:


非薪资面议_detail = 非薪资面议['薪资'].apply(lambda x:x.split('薪')[0].split('·')).tolist()
非薪资面议_detail


# In[41]:


(10+15)/2*13/12


# In[42]:


平均薪资 = [ (int(i[0].split('-')[0]) +int(i[0].split('-')[1].split('k')[0]))/2     if len(i)==1 else round((int(i[0].split('-')[0]) +int(i[0].split('-')[1].split('k')[0]))/2*int(i[1])/12,1)      for i in 非薪资面议_detail        ] 
平均薪资


# In[43]:


len(平均薪资)


# In[44]:


非薪资面议['平均薪资']=平均薪资


# In[45]:


非薪资面议


# In[46]:


# 分地区平均薪资
分地区_平均薪资 = 非薪资面议.groupby('地区').agg({'平均薪资':'median'})
分地区_平均薪资


# In[47]:


分地区_平均薪资_values =  [round(i[0],1) for i in 分地区_平均薪资.values.tolist()]
分地区_平均薪资_values


# In[48]:


分地区_平均薪资_index = 分地区_平均薪资.index.tolist()
分地区_平均薪资_index


# In[49]:


from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.faker import Faker


c = (
    Bar()
    .add_xaxis([i.split('-')[1] for i in 分地区_平均薪资_index[1:]])
    .add_yaxis("地区",分地区_平均薪资_values[1:])
    .set_global_opts(
        title_opts=opts.TitleOpts(title="PM-分地区-中位数薪资"),
        brush_opts=opts.BrushOpts(),
    )
    .render( key + "_bar_with_brush_地区薪资中位数_"+output_time+'.html')
)
# c.render_notebook()


# In[50]:


df_year_salary = 非薪资面议.groupby('工作年限').agg({'平均薪资':'mean'})
df_year_salary


# In[51]:


# 分工作时间和学历平均薪资
df_year_edulevel =  非薪资面议.groupby(['工作年限','学历']).agg({'平均薪资':'mean'})
df_year_edulevel


# In[52]:


# 分行业
df_industry = 非薪资面议.groupby('行业').agg({'平均薪资':'mean'})
df_industry


# In[53]:


with pd.ExcelWriter(key+'_'+output_time+'_.xlsx') as writer:  
    df_year_salary.to_excel(writer, sheet_name='分工作年限平均薪资')
    df_year_edulevel.to_excel(writer, sheet_name='分学历平均薪资')
    df_industry.to_excel(writer, sheet_name='分行业平均薪资')


# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:




