# -*- coding: utf-8 -*-
import requests
import csv
from bs4 import BeautifulSoup
import time

'''抓猎聘网的招聘信息'''

class lie_pin_wang:

    def __init__(self, name, num):
        self.headers = {'Cookie': '__uuid=1585485337525.17; need_bind_tel=false; new_user=false; c_flag=cc566f6dc32331ba3fa8b0853b4b5763; gr_user_id=07b263de-707e-4fea-aa86-9f87265ab91f; bad1b2d9162fab1f80dde1897f7a2972_gr_last_sent_cs1=774bd4ecf9b701951a667e00ad0d9493; imClientId=045b977d455760f670c5cdd4c6549f7f; imId=045b977d455760f6a77c18111a6bff96; imClientId_0=045b977d455760f670c5cdd4c6549f7f; imId_0=045b977d455760f6a77c18111a6bff96; grwng_uid=68143ccd-8fd9-4b9d-8dc8-5dab2448bcde; __s_bid=4f0f6f385d6a2d0ebbdc0bbaf6599b22486b; UniqueKey=774bd4ecf9b701951a667e00ad0d9493; lt_auth=uL1cOyEFmVqs7XPc22EKtf5JjtihVD7P%2FXUKjUgBhN7qU%2F3g4PnkRQ%2BBrrcCxAMhmhlzfsULNLn7Pen8wXpD7UQVwGmklICxv%2Fuk1XgeTuZsHuyflMXuqs7QQpUsrXg6ykpgn2si; inited_user=2e592f7d8e89bcf2170274e776c8ca59; user_roles=0; user_photo=5d5513d34ebeb1284dfc774b07u.png; user_name=%E5%BC%A0%E4%BA%9A%E5%8D%97; bad1b2d9162fab1f80dde1897f7a2972_gr_session_id=8efabd69-991e-4e8e-b2c9-e234353b9882; bad1b2d9162fab1f80dde1897f7a2972_gr_last_sent_sid_with_cs1=8efabd69-991e-4e8e-b2c9-e234353b9882; bad1b2d9162fab1f80dde1897f7a2972_gr_session_id_8efabd69-991e-4e8e-b2c9-e234353b9882=true; _fecdn_=1; __tlog=1595812146533.76%7C00000000%7CR000000058%7C00000000%7C00000000; __session_seq=9; __uv_seq=9; bad1b2d9162fab1f80dde1897f7a2972_gr_cs1=774bd4ecf9b701951a667e00ad0d9493; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1595812147,1595812905,1595813143,1595813316; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1595813316', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
        self.bianHao = 0
        self.name = name
        self.num = num

    def wang_ye(self):
        # name是需要爬取的关键词，num是爬几页
        r0 = []
        for i in range(self.num):
            url = f'https://www.liepin.com/zhaopin/?compkind=&dqs=&pubTime=&pageSize=40&salary=&compTag=&sortFlag=&degradeFlag=0&compIds=&subIndustry=&jobKind=&industries=&compscale=&key={self.name}&siTag=I-7rQ0e90mv8a37po7dV3Q%7EfA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_fp_bar&d_ckId=e8c68a9a47231f1bf53b7855741c091b&d_curPage=1&d_pageSize=40&d_headId=e8c68a9a47231f1bf53b7855741c091b&curPage={i}'
            try:
                r = requests.get(url, headers=self.headers)
                r0.append(r.text)
                print(f'抓取第{i+1}页成功,保存成功。')
            except:
                print(f'抓取第{i+1}页失败！')
            time.sleep(1)
        return r0
        # print(r0)

    def ti_qu(self, r0):

        # 遍历网页存储列表
        for r in r0:
            # 提取招聘信息列表
            soup = BeautifulSoup(r, 'lxml')
            sojob_list = soup.find_all(class_="job-info")

            # 循环招聘列表
            for i in sojob_list:
                # 加入编号
                self.bianHao += 1
                print(self.bianHao)
                # 初始化列表
                xx = [f'{self.bianHao}']
                # 提取各招聘信息的详细链接并添加进列表
                xiangQing_url = i.h3.deng['href']
                xx.append(xiangQing_url)
                # print(xiangQing_url)
                # 提取职位标题
                zhiWei = (i.h3.deng).text.strip()
                xx.append(zhiWei)
                # print(zhiWei)
                # 提取薪资和工作地点
                xinZi = i.p['title']
                xx.append(xinZi)
                # print(xinZi)
                try:
                    # 获取职位详情页面（正常链接）
                    url = requests.get(xiangQing_url, headers=self.headers)
                except:
                    # 获取职位详情页面（简化链接）
                    xiangQing_url = f'https://www.liepin.com/{xiangQing_url}'
                    url = requests.get(xiangQing_url, headers=self.headers)
                    print('简化链接！')
                soup1 = BeautifulSoup(url.content, 'lxml')
                miaoShu = (soup1.find(class_="content content-word")).get_text(strip=True)
                xx.append(miaoShu)
                # print(miaoShu)
                self.xie_ru(xx)
                # print(xx)
                # 控制爬取时间
                time.sleep(1)

    def chuang_jian(self):
        headers = ['0', '链接', '招聘岗位', '薪资', '职位描述']
        try:
            with open('招聘信息.csv', 'w', encoding='utf8') as f:
                f_csv = csv.writer(f)
                f_csv.writerow(headers)
                print('创建文件成功！')
        except:
            print('创建文件失败！')

    def xie_ru(self, xx):
        try:
            with open('招聘信息.csv', 'a', encoding='utf8') as f:
                f_csv = csv.writer(f)
                f_csv.writerow(xx)
        except:
            print('写入文件内容失败！')

    def run(self):
        self.chuang_jian()
        self.ti_qu(self.wang_ye())


if __name__ == '__main__':
    zp = lie_pin_wang('python', 3)
    zp.run()