import re
import requests
from lxml import html
from urllib import parse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
# 爬虫目标，获取岗位的 地区分布，薪资水平、职位要求等信息
# 定义搜索关键词 与 headers
key = '大数据'
key = parse.quote(parse.quote(key))
headers = {'Host': 'search.51job.com',
           'Upgrade-Insecure-Requests': '1',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}

# 模拟浏览器渲染数据，获取总页数
def getPageNum(url):
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--disable-gpu')
    driver = webdriver.Chrome(chrome_options=chrome_options)
    driver.get(url)
    driver.implicitly_wait(2)  # 等待两秒
    page = driver.page_source
    pageNum = re.search(r'<span class="td">共 (\d+) 页</span>', page, re.S).group(1)
    driver.close()
    return pageNum


# 模拟浏览器渲染数据，获取所有链接地址
def get_links(page):
    url = 'https://search.51job.com/list/040000,000000,0000,00,9,99,' + key + ',2,' + str(page) + '.html'
    print(url)
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--disable-gpu')
    driver = webdriver.Chrome(chrome_options=chrome_options)
    driver.get(url)
    driver.implicitly_wait(2)  # 等待两秒
    page = driver.page_source
    links = re.findall(
        r'<div class="e">.*?<a href="(.*?)" target=".*?" class="el">.*?</div>'
        , page,re.S)
    driver.close()
    return links

# 多页处理，下载到文件
def get_content(link):
    r1 = requests.get(link,headers=headers, timeout=2)
    s = requests.session()
    s.keep_alive = False
    r1.encoding = 'gbk'
    page = html.fromstring(r1.text)
    try:
        job = page.xpath('//div[@class="tHeader tHjob"]//h1/text()')[0]
        company = page.xpath('//p[@class="cname"]/a/text()')[0]
        label = page.xpath('//div[@class="t1"]/span/text()')
        education = page.xpath('//div[@class="cn"]/p[2]/text()')[2]
        salary =  page.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/strong/text()')[0]
        area = page.xpath('//div[@class="cn"]/p[2]/text()')[0]
        companytype = page.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[1]/text()')[0]
        companyScale = page.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[2]/text()')[0]
        scope = page.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[3]/a[1]/text()')[0]
        workyear = page.xpath('//div[@class="cn"]/p[2]/text()')[1]
        require = re.findall(r'(?:工作|任职)要求(.*)<div class="mt10">',r1.text,re.S)
        try:
            # 构建数据列表，并将数据作为返回值返回
            row = [str(company),str(job) ,str(education) ,str(label),str(
                salary),str(companytype),str(companyScale),str(scope),str(workyear),str(area),str(require)]
            return row
        except Exception as e:
            print(e)
            return None
    except Exception as e:
        print(e)
        return None


if __name__ == '__main__':
    # 获取总页数
    url = "https://search.51job.com/list/040000,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,1.html"
    pageNum = getPageNum(url)
    # 将标题写入文件
    with open("51job_bigdata.csv", 'w', encoding="utf-8") as f:
        writer = csv.writer(f)
        title = ["公司","职位","学历","福利","工资","公司类型","公司规模","经营范围","工作经验","地区","任职要求"]
        writer.writerow(title)

    # 爬虫开始爬取数据
    for i in range(1, int(pageNum)):
        print('正在爬取第{}页信息'.format(i))
        links = get_links(i) # 获取每一页的所有招聘详情链接
        items = [] # 存放招聘详情数据
        for link in links:
            item = get_content(link) # 通过链接获取岗位详情数据
            if item != None:
                items.append(item)
        # 循环结束将数据写到文件
        with open("51job_bigdata.csv", 'a+', encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerows(items)