#!/usr/bin/env python
# coding: utf-8

# # 数据准备-liepin-PM

# ## 请求页面准备
# > 1. 找到页面的数据API接口
# > 2. 提供正确的用户请求酬载（payload）
# > 3. 准备请求的headers，增加cookie信息（用户登录之后的cookie），保证数据的合理性
# 

# In[ ]:


用户输入职位 = input("请输入你要查询的职位：")


# In[75]:


城市编码 = {
    '北京':'010',
    '上海':'020',
    '广州':'050020',
    '深圳':'050090',
    '香港':'320'
}


# In[77]:


用户输入城市 = input("请输入你要查询的城市：")


# In[78]:


import requests
import json

url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
payload = {
    "data": {
        "mainSearchPcConditionForm": {
            "city": 城市编码[用户输入城市],
            "dq":城市编码[用户输入城市],
            "pubTime": "",
            "currentPage": 0,
            "pageSize": 40,
            "key": 用户输入职位,
            "suggestTag": "",
            "workYearCode": "0",
            "compId": "",
            "compName": "",
            "compTag": "",
            "industry": "",
            "salary": "",
            "jobKind": "",
            "compScale": "",
            "compKind": "",
            "compStage": "",
            "eduLevel": ""
        },
        "passThroughForm": {
            "scene": "input",
            "skId": "",
            "fkId": "",
            "ckId": "h2c8pxojavrmo1w785z7ueih2ybfpux8",
            "suggest": None
        }
    }
}

# set the headers
headers = {
    'Accept': 'application/json, text/plain, */*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Length': '399',
    'Content-Type': 'application/json;charset=UTF-8;',
    'Cookie':'inited_user=937bf2abf05c0a4a0822049e5236f8b8; XSRF-TOKEN=VnIEspU2RLCvt1ELhpvrFg; __gc_id=3d60283fdf5448d6a5b23c1476f8db03; __uuid=1687336218332.97; acw_tc=276077d416873362192927982ec0da50e1cff2a01c4cef279ab5f0537cd2d7; __tlog=1687336218424.35%7C00000000%7C00000000%7Cs_o_009%7Cs_o_009; UniqueKey=879111b4b92f3b4e6bc56763ae71c6f2; liepin_login_valid=0; lt_auth=6elcPXYMzw7953iMiGtZsPkb24quAz2b9X4L1BxV0tXvXvWz4P%2FmQAOHqrcE%2BCoIqx19I6ozMLb2Muv9z3FO4koT%2FVGnlZ6utf6k1X0eTudiHuyflMXuqsjQQ5wtrXo6ykpgn2si0HU%3D; access_system=C; user_roles=0; user_photo=5f8fa3a6f6d1ab58476f322808u.png; user_name=%E8%82%96%E5%AE%8F%E6%B4%B2; need_bind_tel=false; new_user=false; c_flag=6d4c6ba33bf56f287e429fe20bb15815; inited_user=937bf2abf05c0a4a0822049e5236f8b8; imId=b5e1273d5cb093f4aeee0209628fef64; imId_0=b5e1273d5cb093f4aeee0209628fef64; imClientId=b5e1273d5cb093f46446a6d1ef9dc5d9; imClientId_0=b5e1273d5cb093f46446a6d1ef9dc5d9; imApp_0=1; __session_seq=3; __uv_seq=3; fe_im_socketSequence_new_0=2_2_2; fe_im_opened_pages=; fe_im_connectJson_0=%7B%220_879111b4b92f3b4e6bc56763ae71c6f2%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D' ,
    'Host': 'apic.liepin.com',
    'Origin': 'https://www.liepin.com',
    'Pragma': 'no-cache',
    'Referer': 'https://www.liepin.com/',
    'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
    'X-Client-Type': 'web',
    'X-Fscp-Bi-Stat': '{"location": "https://www.liepin.com/zhaopin/?inputFrom=www_index&workYearCode=0&key=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&scene=input&ckId=htihov8m2frxgy6ywo2wsg2gncnydzlb&dq="}',
    'X-Fscp-Fe-Version': '',
    'X-Fscp-Std-Info': '{"client_id": "40108"}',
    'X-Fscp-Trace-Id': 'efb01166-77db-44f8-8596-3aadd07423a6',
    'X-Fscp-Version': '1.1',
    'X-Requested-With': 'XMLHttpRequest',
    'X-XSRF-TOKEN': 'VnIEspU2RLCvt1ELhpvrFg'
}

# send a POST request with headers
r = requests.post(url, data=json.dumps(payload), headers=headers)

# extract the JSON data from the response
response_data = r.json()

# example: print the number of job postings returned
print(response_data)


# In[79]:


page = response_data['data']['pagination']['totalPage']
page


# ## 翻页获取数据

# In[80]:


import pandas as pd
response_df = []
for i in range(page): # 需要判断页面的数据有多少页
    payload['data']['mainSearchPcConditionForm']['currentPage']=i
    # send a POST request with headers
    r = requests.post(url, data=json.dumps(payload), headers=headers)

    # extract the JSON data from the response
    response_data = r.json()
    print(response_data)
    df = pd.json_normalize(response_data['data']['data']['jobCardList'])
    response_df.append(df)


# In[81]:


response_df


# ## 数据整理成为表格
# > 1. pandas 中的concat方法

# In[82]:


df = pd.concat(response_df)
df


# In[83]:


key = payload['data']['mainSearchPcConditionForm']['key']
key


# ## 数据存储

# In[84]:


import time


# In[85]:


time.localtime()


# In[86]:


output_time = str(time.localtime().tm_mon)             +str(time.localtime().tm_mday)+'_'             +str(time.localtime().tm_hour)              +str(time.localtime().tm_min)
output_time 


# In[87]:


# 按照职位名称和时间导出文件
df.to_excel( key +'_liepin_'+output_time+'.xlsx')


# # 数据分析
# 
# > 1. Pandas/Numpy
# > 2. Pyecharts(bokeh、matplotlab、seaborn、echarts、Tebleau)/更考虑用户的体验

# In[ ]:


import pandas as pd


# In[ ]:


df = pd.read_excel(key+'_liepin_'+output_time+'.xlsx')
df


# In[ ]:


df.info()


# ## 筛选存在数据分析价值的列

# In[ ]:


df_PM_gz =  df[['job.labels','job.refreshTime','job.title','job.salary','job.dq','job.topJob','job.requireWorkYears','job.requireEduLevel','comp.compStage','comp.compName','comp.compIndustry','comp.compScale']]
df_PM_gz


# ## 地区分布

# In[ ]:


df_PM_gz['job.dq'].value_counts()


# In[ ]:


df_PM_gz['job.dq'].value_counts().index.tolist()


# In[ ]:


地区 = [  df_PM_gz['job.dq'].value_counts().index.tolist()[i].split('-')[1]      for i,v in enumerate(df_PM_gz['job.dq'].value_counts().index.tolist()) if '-' in v]
地区


# In[ ]:


岗位个数 = [  df_PM_gz['job.dq'].value_counts().values.tolist()[i]             for i,v in enumerate(df_PM_gz['job.dq'].value_counts().index.tolist()) if '-' in v]
岗位个数


# In[ ]:


# 可视化：以可视化工具数据形态符合的数据进行输入

from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Faker

c = (
    Map()
    .add(用户输入城市, [list(z) for z in zip(地区, 岗位个数)], 用户输入城市)
    .set_global_opts(
        title_opts=opts.TitleOpts(title="Map-"+用户输入城市+"地图"), visualmap_opts=opts.VisualMapOpts()
    )
    .render( key+"_dq_"+用户输入城市+"_地区分布_"+output_time+".html")
)


# ## 职位分布
# 
# * 知识点：dataframe字符串处理
# > 1. [pandas.series.str](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.html)

# In[ ]:


df_PM_gz['job.title']


# In[ ]:


# 还要合并回去原来的行
df_PM_gz['job.title'][   df_PM_gz['job.title'].str.contains('（')   ].str.split('（').apply(lambda x:x[0])


# In[ ]:


# 处理过一些，清洗后的数据
df_job_title = df_PM_gz['job.title'].apply(lambda x:x.split('（')[0].split('/')[0].split('(')[0]).value_counts()
df_job_title


# In[ ]:


df_job_title.index.tolist()


# In[ ]:


len(df_job_title.index.tolist())


# In[ ]:


df_job_title.values.tolist()


# In[ ]:


# 未处理字符串的数据（不太整洁和干净的数据）
df_PM_gz['job.title'].value_counts()


# In[ ]:


# 列表推导式
PM_title_words = [(  df_job_title.index.tolist()[i]   ,   df_job_title.values.tolist()[i]  )    for i in range(1,len(df_job_title.index.tolist())) ]
PM_title_words


# In[ ]:


from pyecharts import options as opts
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType

c = (
    WordCloud()
    .add("", PM_title_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
    .set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
    .render( key +"_wordcloud_map_岗位名称_"+ output_time+".html")
)


# ## job.labels
# 
# * 目标：统计labels的数量并做词云图

# In[ ]:


df_PM_gz['job.labels']


# In[ ]:


df_PM_gz['job.labels'].values


# In[ ]:


df_PM_gz['job.labels'].apply(lambda x:eval(x)).tolist()


# In[ ]:


# 列表的推导式
PM_labels_list = [j     for i in df_PM_gz['job.labels'].apply(lambda x:eval(x)).tolist()       for j in i    ]
PM_labels_list


# In[ ]:


# 创建words

PM_labels_words = [ (i,PM_labels_list.count(i)) for i in set(PM_labels_list)]
PM_labels_words


# In[ ]:


# 可视化词云图
from pyecharts import options as opts
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType

c = (
    WordCloud()
    .add("", PM_labels_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
    .set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
    .render( key +"_wordcloud_map_职位标签_"+ output_time+".html")
)


# ## 薪资-（平均薪资）

# In[ ]:


# columns 重命名
df_PM_gz = df_PM_gz.rename(columns={
    'job.labels':'职位标签',
    'job.refreshTime':'职位更新时间',
    'job.title':'职位',
    'job.salary':'薪资',
    'job.dq':'地区',
    'job.topJob':'是否top职位',
    'job.requireWorkYears':'工作年限',
    'job.requireEduLevel':'学历',
    'comp.compStage':'公司融资情况',
    'comp.compName':'公司名称',
    'comp.compIndustry':'行业',
    'comp.compScale':'规模'
})
df_PM_gz


# In[ ]:


非薪资面议 = df_PM_gz [ ~df_PM_gz['薪资'].str.contains("面议|元/天")]
非薪资面议


# In[ ]:


非薪资面议_detail = 非薪资面议['薪资'].apply(lambda x:x.split('薪')[0].split('·')).tolist()
非薪资面议_detail


# In[ ]:


(10+15)/2*13/12


# In[ ]:


平均薪资 = [ (int(i[0].split('-')[0]) +int(i[0].split('-')[1].split('k')[0]))/2     if len(i)==1 else round((int(i[0].split('-')[0]) +int(i[0].split('-')[1].split('k')[0]))/2*int(i[1])/12,1)      for i in 非薪资面议_detail        ] 
平均薪资


# In[ ]:


len(平均薪资)


# In[ ]:


非薪资面议['平均薪资']=平均薪资


# In[ ]:


非薪资面议


# In[ ]:


# 分地区平均薪资
分地区_平均薪资 = 非薪资面议.groupby('地区').agg({'平均薪资':'median'})
分地区_平均薪资


# In[ ]:


分地区_平均薪资_values =  [round(i[0],1) for i in 分地区_平均薪资.values.tolist()]
分地区_平均薪资_values


# In[ ]:


分地区_平均薪资_index = 分地区_平均薪资.index.tolist()
分地区_平均薪资_index


# In[ ]:


from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.faker import Faker


c = (
    Bar()
    .add_xaxis([i.split('-')[1] for i in 分地区_平均薪资_index[1:]])
    .add_yaxis("地区",分地区_平均薪资_values[1:])
    .set_global_opts(
        title_opts=opts.TitleOpts(title="PM-分地区-中位数薪资"),
        brush_opts=opts.BrushOpts(),
    )
    .render( key + "_bar_with_brush_地区薪资中位数_"+output_time+'.html')
)
# c.render_notebook()


# In[ ]:


df_year_salary = 非薪资面议.groupby('工作年限').agg({'平均薪资':'mean'})
df_year_salary


# In[ ]:


# 分工作时间和学历平均薪资
df_year_edulevel =  非薪资面议.groupby(['工作年限','学历']).agg({'平均薪资':'mean'})
df_year_edulevel


# In[ ]:


# 分行业
df_industry = 非薪资面议.groupby('行业').agg({'平均薪资':'mean'})
df_industry


# In[ ]:


with pd.ExcelWriter(key+'_'+output_time+'_.xlsx') as writer:  
    df_year_salary.to_excel(writer, sheet_name='分工作年限平均薪资')
    df_year_edulevel.to_excel(writer, sheet_name='分学历平均薪资')
    df_industry.to_excel(writer, sheet_name='分行业平均薪资')


# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:




