import re
import time
import requests
import urllib3
from bs4 import BeautifulSoup
import xlwt
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# url='https://www.lagou.com/wn/jobs?gj=%E5%9C%A8%E6%A0%A1%2F%E5%BA%94%E5%B1%8A&gm=150-500%E4%BA%BA&kd=%E5%89%8D%E7%AB%AF&fromSearch=true&city=%E7%A6%8F%E5%B7%9E&pn=1'
url = 'https://www.lagou.com/wn/jobs?pn=1&gj=3%E5%B9%B4%E5%8F%8A%E4%BB%A5%E4%B8%8B&gm=150-500%E4%BA%BA&kd=%E5%89%8D%E7%AB%AF&fromSearch=true&city=%E5%B9%BF%E5%B7%9E'
headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
   }
date = {
        '__lg_stoken__':'d68685c3b049a61aa2b859d085106a80c6f2e695613c2d7e5ae7073c19008bdf69fb5e4a6eece2622e02627ebe2859a5222371409d168a503f115ef6c239a46b2348f92696a3',
        'X_HTTP_TOKEN':' 42daf4b72327b2810802347461bf5e71415983ed09',
        'JSESSIONID':'ABAAAECABFAACEA8440B880856F1D2E85C1AEE70BE1EA44',
        'WEBTJ-ID':'20220316200124-17f929aac8f5f0-0946ee9b10d426-113f645d-3686400-17f929aac90f74',
        'sajssdk_2015_cross_new_user':'1',
        'sensorsdata2015session':'%7B%7D',
        'sensorsdata2015jssdkcross':'%7B%22distinct_id%22%3A%2217f929aafd7cc8-0a64961186fcef-113f645d-3686400-17f929aafd816e7%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24os%22%3A%22MacOS%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%2299.0.4844.51%22%7D%2C%22%24device_id%22%3A%2217f929aafd7cc8-0a64961186fcef-113f645d-3686400-17f929aafd816e7%22%7D'
}
urlLists = []
def urlList():
    for i in range(1,7):# 获取7个
        u = 'https://www.lagou.com/wn/jobs?pn='+str(i)+'&gm=150-500%E4%BA%BA&cl=false&fromSearch=true&labelWords=sug&suginput=%E5%90%8E%E7%AB%AF&kd=%E5%90%8E%E7%AB%AF%E5%BC%80%E5%8F%91%E5%B7%A5%E7%A8%8B%E5%B8%88&city=%E4%B8%8A%E6%B5%B7'
        urlLists.append(u)
def getUrls(urls):
    result=requests.get(urls,cookies=date,headers=headers,verify=False)
    result.encoding='utf-8'
    # f2 = open('date01.html','w',encoding='utf-8')
    # f2.write(result.text)
    htmlDate = result.text
    soup = BeautifulSoup(htmlDate,'lxml')
    return soup

# 数据粗清洗，返回一个列表，列表中的数据也是一个列表
def cleaDateFirst(soup):
    li = soup.findAll(class_='item-top__1Z3Zo')
    return li
def clearFinal(soup):
    # 职位
    position = []
    positionTemp = soup.findAll(class_='p-top__1F7CL')
    for i in positionTemp:
        s = re.findall(r'<a>(.*)<!',str(i.a))
        position.append(s)
    # 薪资
    money = []
    money__3Lkgq = soup.findAll(class_='money__3Lkgq')
    for i in money__3Lkgq:
        money.append(i.string)
    # 学历
    education = []
    pbom__JlNur = soup.findAll(class_='p-bom__JlNur')
    for i in pbom__JlNur:
        e = re.findall(r'</span>(.*)</div>',str(i))
        education.append(e)
    # 公司
    companyname = []
    companyname__2SjF =soup.findAll(class_='company-name__2-SjF')
    for i in companyname__2SjF:
        companyname.append(str(i.a.string))
    return position, money, education, companyname
if __name__ == '__main__':
    # urlList()
    # for i in range(0,6):
        name = str(5)+'la.xlsx'
        soup=getUrls(urlLists[6])
        cleaDateFirst()
        date = clearFinal(soup)
        book = xlwt.Workbook(encoding='utf-8',style_compression=0)
        we = book.add_sheet('sheet',cell_overwrite_ok=True)
        for column in range(0,len(date)):
            for row in range(0,len(date[column])):
                we.write(row,column,str(date[column][row]))
        book.save(name)
    # time.sleep(5)
