#!/usr/bin/env python
# coding: utf-8

# ## 51job作业要求
# > * 1.url模板可以自动生成关键词 （构建参数模板）
# - 2.分类部分（任选3个构建参数模板）希望有工作职能
# - 3.页面内容 ** 3额外.如果批量时出现网站抓取慢或者报错，可以尝试time.sleep(?)+ random(15,30)
# - 4.尝试翻页(登录会有更多数据)
# - 5.详细页内容

# ## 准备
# - 网络请求使用r，equests、request_html、urllib

# In[90]:


import pandas as pd
from requests_html import HTMLSession
import requests_html
import urllib.parse
import requests,re


# In[91]:


url = 'https://search.51job.com/list/030200,000000,0000,00,9,99,%2B,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
session = HTMLSession()
res = session.get(url)
res


# In[92]:


search_footer = "http:"+res.html.xpath('//script') [-3].html.split("\"")[3]
search_footer


# In[93]:


session.get(search_footer).html.html


# In[94]:


resp = requests.get(search_footer)
resp.content.decode("gbk","ignore")
js参数模板 = resp.content.decode("gbk","ignore")
js参数模板


# ## 公司性质

# In[95]:


cottype_01 = js参数模板.split("search_cottype=")[1].split(",window.d_search_workyear")[0]
cottype_01


# In[96]:


cottype_02 = cottype_01.replace('k:"','').replace('}','').replace("{","").replace('[','').replace(']','').replace('"','')
cottype_03 = cottype_02.replace(',v:','=')
cottype = cottype_03.split(',')
cottype


# In[97]:


cottype_str = ["所有","国企","外资（欧美）","外资（非欧美）","上市公司","合资","民营公司","外企代表处","政府机关","事业单位","非营利组织","创业公司"]
cottype_str 


# In[98]:


cottype_参数构建 = {i:cottype[k].split('=')[0] for k,i in enumerate(cottype_str)}
cottype_参数构建


# ## 工作年限

# In[99]:


workyear_01 = js参数模板.split("search_workyear=")[1].split(",window.d_search_providesalary")[0]
workyear_02 = workyear_01.replace('k:"','').replace('}','').replace("{","").replace('[','').replace(']','').replace('"','')
workyear_03 = workyear_02.replace(',v:','=')
workyear = workyear_03.split(',')


# In[100]:


workyear_str = ["所有","在校生/应届生","1-3年","3-5年","5-10年","10年以上","无需经验"]
workyear_参数构建 = {workyear_str[i]:workyear[i].split('=')[0] for i in range(len(workyear))}
workyear_参数构建


# ## 学历要求

# In[101]:


degreefrom_01 = js参数模板.split("search_degreefrom=")[1].split(",window.d_search_jobterm")[0]
degreefrom_02 = degreefrom_01.replace('k:"','').replace('}','').replace("{","").replace('[','').replace(']','').replace('"','')
degreefrom_03 = degreefrom_02.replace(',v:','=')
degreefrom = degreefrom_03.split(',')


# In[102]:


degreefrom_str = ["所有","初中及以下","高中/中技/中专","大专","本科","硕士","博士","无学历要求"]
degreefrom_参数构建 = {degreefrom_str[i]:degreefrom[i].split('=')[0] for i in range(len(degreefrom))}
degreefrom_参数构建


# ## 构建参数模板

# ### 解析url

# In[103]:


参数模版 = urllib.parse.urlparse(url)
参数模版_list = pd.Series(参数模版).tolist()
print("参数模版_list=",参数模版_list,'\n')


# In[104]:


参数模版_dict = {i.split("=")[0]:i.split("=")[1] for i in 参数模版.query.split("&")}
参数模版_dict


# ### 生成参数模板-·公司性质·工作年限·学历要求

# In[105]:


def url_参数模板生成(cotype,workyear,degreefrom):
    cotype_values = cottype_参数构建[cotype]
    workyear_values = workyear_参数构建[workyear]
    degreefrom_values = degreefrom_参数构建[degreefrom]
    参数模版_dict["cotype"] = cotype_values
    参数模版_dict["workyear"] = workyear_values
    参数模版_dict["degreefrom"] = degreefrom_values
    query = "&".join([k+"="+v for k,v in 参数模版_dict.items()])
    参数模版_list[4] = query
    url_参数 =  urllib.parse.urlunparse(参数模版_list)      
    return (url_参数)


# ## 调用参数

# In[106]:


url_参数模板生成('所有','在校生/应届生','本科')


# In[107]:


url_01 = "https://search.51job.com/list/030200,000000,0000,00,9,99,%2B,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=05&degreefrom=04&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="
url_01 


# ## 加入关键词·翻页

# ### 关键词

# In[109]:


keyword = urllib.parse.quote("运营")
cotype = "所有"
workyear = "在校生/应届生"
degreefrom = "本科"
url = url_参数模板生成(cotype,workyear,degreefrom)
url_end = url.replace('%2B',keyword)
url_end


# ### 翻页

# In[392]:


pages = []
for p in range(1,10):
    pages.append("https://search.51job.com/list/030200,000000,0000,00,9,99,%E8%BF%90%E8%90%A5,2,{page}.html?lang=c&postchannel=0000&workyear=01&cotype=99&degreefrom=04&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=".format(page = p))
pages


# In[393]:


page_all=[]
for i in pages:
    session = HTMLSession()
    page_all.append(session.get(i))
page_all


# ## 第一页详情内容

# ### 法一

# In[358]:


session = HTMLSession()
res = session.get(url_end)
res


# In[359]:


example_01  = res.html.xpath('//script')[-4].html.split("__SEARCH_RESULT__ =")[1].split("</script>")[0]
results = eval(__SEARCH_RESULT__ )
results


# In[360]:


len(results['engine_search_result'])
zhaopin_content = pd.DataFrame(results['engine_search_result'])
zhaopin_content


# ### 法二 正则公式

# In[375]:


example_02 = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', res.text, re.S)
string = ''.join(example_02)


# In[376]:


info_dict = json.loads(string)
dit_py = info_dict['engine_search_result']
dit_py


# In[413]:


pd.DataFrame(dit_py)


# ## 翻页导出

# In[402]:


content_01 = []
for pg in page_all:
    content = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', pg.text, re.S)
    string = ''.join(content)
    content_01.append(eval(__SEARCH_RESULT__))
content_01


# In[411]:


content_02 = []
for pg in range(9):
    content_02.append(content_01[pg])
content_02


# In[ ]:




