import requests
import jsoimport csfrom urllib.parse 
import urlencode
import timdef saveHtml(file_name,file_content): #保存conten对象为html⽂件
  with open(file_name.replace('/','_')+'.html','wb') as f:
  f.write(file_content)
def GetData(url,writer):#解析并将数据保存为CSV⽂件
  response= requests.get(url)
  data=response.content
  saveHtml('zlzp',data) #保存html⽂件
  jsondata=json.loads(data)
  dataList=jsondata['data']['results']
  #print(jsondata)
  for dic in dataList:
  jobName=dic['jobName'] #岗位名称
  company=dic['company']['name'] #公司名称
  salary=dic['salary'] #薪⽔
  city=dic['city']['display'] #城市
  jobtype = dic['jobType']['display'] #所属⾏业
  eduLevel=dic['eduLevel']['name'] #学历要求
  workingExp=dic['workingExp']['name'] #⼯作经验
  print(jobName,company,salary,city,jobtype,eduLevel,workingExp)writer.writerow([jobName,company,salary,city,jobtype,eduLevel,workingExp])
param={ 'start':0,
  'pageSize':60,
  'cityId':489,
  'workExperience':-1,
  'education':-1,
  'companyType': -1,
  'employmentType': -1,
  'jobWelfareTag': -1,
  'kw': 'BI⼯程师', #搜索关键词，可以根据你需要爬取的岗位信息进⾏更换
  'kt': 3,
  'lastUrlQuery': {"p":1,"pageSize":"60","jl":"681","kw":"python","kt":"3"}
  }#参数配置
pages=range(1,31)#爬取1-30⻚数据
out_f = open('test.csv', 'w', newline='')
writer = csv.writer(out_f)
writer.writerow(['jobName','company','salary','city','jobtype','eduLevel',for p in pages: #⾃动翻⻚
  param['start']=(p-1)*60
  param['lastUrlQuery']['p']=p
  url = 'https://fe-api.zhaopin.com/c/i/sou?' + urlencode(param)
  GetData(url,writer)
  time.sleep(3)#间隔休眠3秒，防⽌IP被封
  print(p)
out_f.close()