import requests
from bs4 import BeautifulSoup
import csv
import time
go = True
count = 1
url = 'https://www.liepin.com/zhaopin/?compkind=&dqs=&pubTime=&pageSize=40&salary=&compTag=&sortFlag=&degradeFlag=0&compIds=&subIndustry=&jobKind=&industries=&compscale=&key=python&siTag=I-7rQ0e90mv8a37po7dV3Q%7EfA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_fp&d_ckId=2885e594ba58993d0eb1010ed763aa11&d_curPage=0&d_pageSize=40&d_headId=2885e594ba58993d0eb1010ed763aa11&curPage=9' 
while go:
    html = requests.get(url,headers={'user-agent':'Mozilla/5.0'})

    soup = BeautifulSoup(html.text,'html.parser')
    #print(html.text)

    content_list = soup.find_all(class_ = 'job-info')
    #print(content_list)
    
    print('正在抓取第%d页' % count)
    count += 1
    for content in content_list:
        
        job_info=[]
        direction = (content.h3.a).get_text(strip=True)
        job_info.append(direction)
        job_url = content.h3.a['href']
        if 'http' not in job_url:
            job_url = 'https://liepin.com' + job_url
        #print(job_url)
        title = content.p['title']

        for i in title.split('_'):
            job_info.append(i)
        job_info.append(job_url)
        #print(job_info)
        job_html = requests.get(job_url,headers={'user-agent':'Mozilla/5.0'})
        soup1 = BeautifulSoup(job_html.text,'html.parser')
        company_info = soup1.find(class_ = 'title-info')
        company_name = ((soup.find(class_ = 'company-name'))).a.get_text(strip=True)
        job_info.append(company_name)
        company_url = (soup.find(class_ = 'company-name')).a['href']
        #print(company_url)
        job_info.append(company_url)
        job_requirement = soup1.find(class_ = 'content content-word')
        #print(job_requirement.get_text(strip=True))
        job_info.append(job_requirement.get_text(strip=True))
        #print(job_info)
        with open('爬虫\\招聘1.csv','a+',encoding='utf8',newline='') as f:
            f_csv = csv.writer(f)
            f_csv.writerow(job_info)
        time.sleep(10)
        
    next_page = (soup.find(class_ = 'pagerbar').find_all('a'))[7]['href']
    if next_page == 'javascript:;':
        print('抓取完毕')
        break
    else:
        url = 'https://www.liepin.com' + next_page
    #print(url)
        
   
        
