# -*- coding:utf-8 -*-

import requests,json,re,time,datetime,socket

from urllib import request

import pandas as pd

from pandas import DataFrame,Series

from bs4 import BeautifulSoup



def lagou_spider_keyword(keyword):

    #将搜索字符串转换为utf-8编码，之后进行lagou.com搜索url构造

    keywordbyte=keyword.encode('utf-8')

    keywordindex=str(keywordbyte).replace(r'\x','%').replace(r"'","")

    keywordindex=re.sub('^b','',keywordindex)


    #计算总共有多少搜索结果页

    i =0

    type='true'

    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Length": "19",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Cookie": "_ga=GA1.2.754895021.1542433960; user_trace_token=20181117135239-fde4320a-ea2c-11e8-892b-5254005c3644; LGUID=20181117135239-fde435ce-ea2c-11e8-892b-5254005c3644; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22%24device_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; index_location_city=%E6%B7%B1%E5%9C%B3; _gid=GA1.2.1351489001.1543843989; ab_test_random_num=0; hasDeliver=0; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; JSESSIONID=ABAAABAAAGGABCB51FB03810D687C0794FD1359B2019DD1; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543843989,1543844009,1543844821,1543892728; _putrc=B037EE072AD5FE30123F89F2B170EADC; login=true; unick=%E6%8B%89%E5%8B%BE%E7%94%A8%E6%88%B77173; gate_login_token=ade552a80d425d69ae9d91246076ede8e4bacd281eb7258223a8b69dda15981a; TG-TRACK-CODE=index_navigation; X_HTTP_TOKEN=784caa92b730af22c71c7e97b90c6558; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543895582; LGSID=20181204141216-8ca51493-f78b-11e8-8cb7-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2F5342034.html; LGRID=20181204141216-8ca516df-f78b-11e8-8cb7-5254005c3644; SEARCH_ID=987d0426249349e691e1fd396c88ab2f",
        "Host": "www.lagou.com",
        "Origin": "https://www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/list_?city=%E6%B7%B1%E5%9C%B3&cl=false&fromSearch=true&labelWords=&suginput=",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36",
        "X-Anit-Forge-Code": "0",
        "X-Anit-Forge-Token": "None",
        "X-Requested-With": "XMLHttpRequest"
    }

    headers_de = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        #"Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Cookie": "_ga=GA1.2.754895021.1542433960; user_trace_token=20181117135239-fde4320a-ea2c-11e8-892b-5254005c3644; LGUID=20181117135239-fde435ce-ea2c-11e8-892b-5254005c3644; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22%24device_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; index_location_city=%E6%B7%B1%E5%9C%B3; _gid=GA1.2.1351489001.1543843989; ab_test_random_num=0; hasDeliver=0; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; JSESSIONID=ABAAABAAAGGABCB51FB03810D687C0794FD1359B2019DD1; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543843989,1543844009,1543844821,1543892728; _putrc=B037EE072AD5FE30123F89F2B170EADC; login=true; unick=%E6%8B%89%E5%8B%BE%E7%94%A8%E6%88%B77173; TG-TRACK-CODE=index_navigation; X_HTTP_TOKEN=784caa92b730af22c71c7e97b90c6558; LGSID=20181204141216-8ca51493-f78b-11e8-8cb7-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2F5342034.html; SEARCH_ID=987d0426249349e691e1fd396c88ab2f; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543905329; LGRID=20181204143528-ca5f0016-f78e-11e8-89f5-525400f775ce; gate_login_token=4c7a96fe6766658fc7868e6c485a4f4255bd458f77393b390737a653e5693218",
        "Host": "www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/5342034.html",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
    }

    url='http://www.lagou.com/jobs/positionAjax.json?px=default&first='+type+'&kd='+keywordindex+'&pn='+str(i+1)

    # print(url)

    req = request.Request(url, headers=headers)

    with request.urlopen(req) as f:

        data=f.read()

        # print(data)
        #
        # print(json.loads(data.decode("utf-8")))

        urlcount=int(json.loads(str(data,encoding='utf-8',errors='ignore'))["content"]["positionResult"]["totalCount"])

        # print('本次搜索页面共计%d'%urlcount)


    #开始正式抓取

    # for i in list(range(0,urlcount)):
    for i in list(range(0, 4)):

        #构造页面

        if i ==0 :

            type='true'

        else:

            type='false'

        url='http://www.lagou.com/jobs/positionAjax.json?px=default&first='+type+'&kd='+keywordindex+'&pn='+str(i+1)
        # print(url)

        reqs = request.Request(url, headers=headers)

        with request.urlopen(reqs) as f:

            data=f.read()
        # print(data.decode('utf-8'))

        #读取json数据，开始解析

        try:

            jsondata=json.loads(str(data,encoding='utf-8',errors='ignore'))["content"]['positionResult']["result"]
            # print(jsondata)

            for t in list(range(len(jsondata))):

                #把company描述的列表合并为一个字符串

                # print(jsondata[t]['companyLabelList'])

                jsondata[t]['companyLabelList2']='-'.join(jsondata[t]['companyLabelList'])

                jsondata[t].pop('companyLabelList')



                #将每一行数据做成Series，之后再合并

                if t == 0:  # and i == 0:

                    rdata=DataFrame(Series(data=jsondata[t])).T

                else:

                    rdata=pd.concat([rdata,DataFrame(Series(data=jsondata[t])).T])

            #重新给rdata编码

            rdata.index=range(1,len(rdata)+1)

            rdata['keyword']=keyword

            rdata['salarymin']=0

            rdata['salarymax']=0

            rdata['url']=''

            rdata['jd']=''#职位描述

            rdata['handle_perc']=''#简历及时处理率，在七天内处理完简历占所有简历的比例

            rdata['handle_day']=''#完成简历处理平均天数

            for klen in list(range(len(rdata['salary']))):

                rdata.ix[klen+1,'salarymin'] = re.search('^(\d*?)k',rdata['salary'].iloc[klen]).group(1)

                #如果工资的最大值没有写，如（8k以上），则列为空值

                if re.search('-(\d*?)k$',rdata['salary'].iloc[klen]) != None:

                    rdata.ix[klen+1,'salarymax'] = re.search('-(\d*?)k$',rdata['salary'].iloc[klen]).group(1)

                else:

                    rdata.ix[klen+1,'salarymax'] = ''

                #增加url一列，便于后续抓取jd内容

                rdata.ix[klen+1,'url'] = 'http://www.lagou.com/jobs/%s.html'% rdata.ix[klen+1,'positionId']

                req_rdate = request.Request(rdata.ix[klen+1,'url'], headers=headers_de)

                #对url进行二次抓取，把jd抓进来

                print(rdata.ix[klen+1,'url'])

                with request.urlopen(req_rdate) as f:

                    # f = request.urlopen(req_rdate)

                    data_url=f.read()

                    # print(data_url)
                    # print(type(data_url))
                    # print(data_url.decode("utf-8"))

                    # print(data_url)

                    # soup_url=BeautifulSoup(data_url.decode('utf-8', 'ignore'),'lxml')  # html5lib
                    soup_url=BeautifulSoup(data_url,'lxml')  # html5lib

                    # strings_url=soup_url.find_all('dd',class_='job_bt')
                    #
                    # print(strings_url)

                    strings_url=soup_url.find('dd',class_='job_bt').strings  # strings拿到的是一个生成器对象

                    # for item in strings_url:
                    #     print("*" * 50)
                    #     print(item)
                    #     print("-" * 50)

                    rdata.ix[klen+1,'jd']=''.join(strings_url).replace(' ','')#.encode('gbk','ignore').decode('gbk','ignore').replace(' ','')
                    # print("*" * 50)
                    # print(rdata.ix[klen+1,'jd'])

                    # 回复率

                    temp=soup_url.find_all('span',class_='data')

                    # print(str(temp[0]))
                    # for item in temp:
                    #     print(item)

                    if re.search('>(\w*%)<',str(temp[0])) == None:

                        rdata.ix[klen+1,'handle_perc']=''
                        # print(222222)

                    else:
                        # print(111)
                        rdata.ix[klen+1,'handle_perc']=re.search('>(\w*%)<',str(temp[0])).group(1)

                    rdata.ix[klen+1,'handle_day']=re.search('>(\w*)<',str(temp[1])).group(1).replace('天','')

                    # print(rdata.ix[klen+1,'handle_day'])



        except Exception as e:

            print(e)

            continue

        #构造totaldata，是所有页面的集合，rdata是这一个页面的集合


        if i == 0:

             totaldata=rdata

        else:

             totaldata=pd.concat([totaldata,rdata])


        totaldata.index=range(1,len(totaldata)+1)

        print('正在抓取搜索页面第%d页,时间是%s，还剩下%d页'%(i+1,datetime.datetime.now(),urlcount-i-1))





    #开始写入数据库

    totaldata.to_excel('lagou.xls',sheet_name='sheet1')





if __name__=='__main__':

    keyword = input("请输入搜索词(回车进入下一步): ")

    #keyword='数据挖掘' #可以随意定义搜索词

    lagou_spider_keyword(keyword)
