import requests as req
import time
from bs4 import BeautifulSoup
import re

def download(url):
    time.sleep(1)
    if url is None:
        return None
        # 加进header，伪装成浏览器访问
    header = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding': 'none',
        'Accept-Language': 'en-US,en;q=0.8',
        'Connection': 'keep-alive'}
    response = req.get(url, headers=header)
    if response.status_code == 200:
        return response.text
    else:
        return None

def get_list():
    url="https://www.job001.cn/jobs?keyType=0&keyWord=%E8%BD%AF%E4%BB%B6%E5%BC%80%E5%8F%91&jobTypeId=&jobType=%E8%81%8C%E4%BD%8D%E7%B1%BB%E5%9E%8B&industry=&industryname=%E8%A1%8C%E4%B8%9A%E7%B1%BB%E5%9E%8B&workId=&workPlace=&salary=&salaryType=&entType=&experience=&education=&entSize=&benefits=&reftime=&workTypeId=&sortField=&pageNo=2&curItem="
    res = download(url)
    all_links=[]
    soup = BeautifulSoup(res,"lxml")
    links = soup.findAll(class_='jobNameCon')
    for link in links:
        href = link.get('href')
        if '-' in href:

            all_links.append('https://www.job001.cn'+href)


    get_detail(all_links)

def re_space(str):
    s = re.compile(r"\s+")
    st = re.sub(s, "", str)
    return st

def get_detail(all_links):
    for link in all_links:
        print(link)
        res = download(link)
        if res is not None:
            soup = BeautifulSoup(res,'lxml')
            title = soup.find('h1').get_text().strip()
            salary = soup.find(class_='name_Salary').get_text().strip().replace('(面议)','')
            salary = re_space(salary)
            jobinfo = re_space(soup.find(class_='jobsInfo').get_text().strip())
            job_info_arr = jobinfo.split('·')
            city = job_info_arr[0]
            exprise=job_info_arr[1]
            job_type = job_info_arr[2]
            welfare=soup.findAll(class_='welfare_label') #福利数据
            wlist = []
            for w in welfare:
                winfo = re_space( w.contents[0].strip().replace('...','')) #获取所有的后代中的第一个节点（文）
                if len(winfo) >0:
                    wlist.append(winfo)
            wstr = ','.join(wlist)
            print(wstr)
            job_content = soup.findAll(class_='jobs_content')
            c1='' #岗位职责
            c2='' #任职资格
            c1 = job_content[0].get_text()
            c2 = job_content[1].get_text()

if __name__ == '__main__':
    get_list()
