# -*- coding:utf-8 -*-

import requests, json, re, time, datetime, socket

from urllib import request

import pandas as pd

from pandas import DataFrame, Series

from bs4 import BeautifulSoup

try:
    from Queue import Queue
except:
    from queue import Queue

import gevent
from gevent import monkey
from gevent.pool import Pool

monkey.patch_all()


class LagouSpider(object):
    def __init__(self, keyword):
        self.headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Length": "19",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Cookie": "_ga=GA1.2.754895021.1542433960; user_trace_token=20181117135239-fde4320a-ea2c-11e8-892b-5254005c3644; LGUID=20181117135239-fde435ce-ea2c-11e8-892b-5254005c3644; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22%24device_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; index_location_city=%E6%B7%B1%E5%9C%B3; _gid=GA1.2.1351489001.1543843989; ab_test_random_num=0; hasDeliver=0; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; JSESSIONID=ABAAABAAAGGABCB51FB03810D687C0794FD1359B2019DD1; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543843989,1543844009,1543844821,1543892728; _putrc=B037EE072AD5FE30123F89F2B170EADC; login=true; unick=%E6%8B%89%E5%8B%BE%E7%94%A8%E6%88%B77173; gate_login_token=ade552a80d425d69ae9d91246076ede8e4bacd281eb7258223a8b69dda15981a; TG-TRACK-CODE=index_navigation; X_HTTP_TOKEN=784caa92b730af22c71c7e97b90c6558; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543895582; LGSID=20181204141216-8ca51493-f78b-11e8-8cb7-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2F5342034.html; LGRID=20181204141216-8ca516df-f78b-11e8-8cb7-5254005c3644; SEARCH_ID=987d0426249349e691e1fd396c88ab2f",
        "Host": "www.lagou.com",
        "Origin": "https://www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/list_?city=%E6%B7%B1%E5%9C%B3&cl=false&fromSearch=true&labelWords=&suginput=",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36",
        "X-Anit-Forge-Code": "0",
        "X-Anit-Forge-Token": "None",
        "X-Requested-With": "XMLHttpRequest"
    }
        self.headers_de = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        # "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Cookie": "_ga=GA1.2.754895021.1542433960; user_trace_token=20181117135239-fde4320a-ea2c-11e8-892b-5254005c3644; LGUID=20181117135239-fde435ce-ea2c-11e8-892b-5254005c3644; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22%24device_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; index_location_city=%E6%B7%B1%E5%9C%B3; _gid=GA1.2.1351489001.1543843989; ab_test_random_num=0; hasDeliver=0; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; JSESSIONID=ABAAABAAAGGABCB51FB03810D687C0794FD1359B2019DD1; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543843989,1543844009,1543844821,1543892728; _putrc=B037EE072AD5FE30123F89F2B170EADC; login=true; unick=%E6%8B%89%E5%8B%BE%E7%94%A8%E6%88%B77173; TG-TRACK-CODE=index_navigation; X_HTTP_TOKEN=784caa92b730af22c71c7e97b90c6558; LGSID=20181204141216-8ca51493-f78b-11e8-8cb7-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2F5342034.html; SEARCH_ID=987d0426249349e691e1fd396c88ab2f; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1543905329; LGRID=20181204143528-ca5f0016-f78e-11e8-89f5-525400f775ce; gate_login_token=4c7a96fe6766658fc7868e6c485a4f4255bd458f77393b390737a653e5693218",
        "Host": "www.lagou.com",
        "Referer": "https://www.lagou.com/jobs/5342034.html",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
    }
        self.keyword = keyword

        self.queue = Queue()

        self.i = 0
        self.type = 'true'
        self.url = 'http://www.lagou.com/jobs/positionAjax.json?px=default&first='

        self.rdata = None


    def send_request(self, url):
        req = request.Request(url, headers=self.headers)
        if '.html' in url:
            req = request.Request(url, headers=self.headers_de)

        f = request.urlopen(req)

        return f.read()

    def _keywordindex(self, keyword):
        keywordbyte = keyword.encode('utf-8')
        keywordindex = str(keywordbyte).replace(r'\x', '%').replace(r"'", "")
        keywordindex = re.sub('^b', '', keywordindex)

        return keywordindex

    def parse_response(self, response):
        urlcount = int(json.loads(str(response, encoding='utf-8', errors='ignore'))["content"]["positionResult"]["totalCount"])

        print('本次搜索页面共计%d' % urlcount)

        return urlcount


    def _jsondata_rdata(self, jsondata, t):
        # 把company描述的列表合并为一个字符串

        jsondata[t]['companyLabelList2'] = '-'.join(jsondata[t]['companyLabelList'])

        jsondata[t].pop('companyLabelList')

        # 将每一行数据做成Series，之后再合并

        if t == 0:
            self.rdata = DataFrame(Series(data=jsondata[t])).T
        else:
            self.rdata = pd.concat([self.rdata, DataFrame(Series(data=jsondata[t])).T])

        return self.rdata


    def parse_second(self, data_url, rdata, klen):
        soup_url = BeautifulSoup(data_url, 'lxml')  # html5lib

        strings_url = soup_url.find('dd', class_='job_bt').strings  # strings拿到的是一个生成器对象

        rdata.ix[klen + 1, 'jd'] = ''.join(strings_url).replace(' ', '')

        # 回复率

        temp = soup_url.find_all('span', class_='data')

        if re.search('>(\w*%)<', str(temp[0])) == None:
            rdata.ix[klen + 1, 'handle_perc'] = ''
        else:
            rdata.ix[klen + 1, 'handle_perc'] = re.search('>(\w*%)<', str(temp[0])).group(1)

        rdata.ix[klen + 1, 'handle_day'] = re.search('>(\w*)<', str(temp[1])).group(1).replace('天', '')

        return rdata


    def _rdata_handler(self, rdata):
        # 重新给rdata编码

        rdata.index = range(1, len(rdata) + 1)

        rdata['keyword'] = keyword

        rdata['salarymin'] = 0

        rdata['salarymax'] = 0

        rdata['url'] = ''

        rdata['jd'] = ''  # 职位描述

        rdata['handle_perc'] = ''  # 简历及时处理率，在七天内处理完简历占所有简历的比例

        rdata['handle_day'] = ''  # 完成简历处理平均天数

        for klen in list(range(len(rdata['salary']))):
            rdata.ix[klen + 1, 'salarymin'] = re.search('^(\d*?)k', rdata['salary'].iloc[klen]).group(1)
            # 如果工资的最大值没有写，如（8k以上），则列为空值

            if re.search('-(\d*?)k$', rdata['salary'].iloc[klen]) != None:

                rdata.ix[klen + 1, 'salarymax'] = re.search('-(\d*?)k$', rdata['salary'].iloc[klen]).group(1)

            else:

                rdata.ix[klen + 1, 'salarymax'] = ''

            # 增加url一列，便于后续抓取jd内容
            rdata.ix[klen + 1, 'url'] = 'http://www.lagou.com/jobs/%s.html' % rdata.ix[klen + 1, 'positionId']

            print(rdata.ix[klen + 1, 'url'])
            # 对url进行二次抓取，把jd抓进来
            data_url = self.send_request(rdata.ix[klen + 1, 'url'])

            self.parse_second(data_url, rdata, klen)

        return rdata


    def save_data(self, data):
        # 开始写入数据库
        data.to_excel('lagou.xls', sheet_name='sheet1')


    def main(self):
        start_url = self.url + self.type + '&kd=' + self._keywordindex(self.keyword) + '&pn=' + str(self.i + 1)

        response = self.send_request(start_url)

        urlcount = self.parse_response(response)

        for i in list(range(0, 3)):  # urlcount
            self.type = 'true' if i ==0 else 'false'
            restart_url = self.url + self.type + '&kd=' + self._keywordindex(self.keyword) + '&pn=' + str(i + 1)

            data = self.send_request(restart_url)

            # 读取json数据，开始解析
            try:
                jsondata = json.loads(str(data, encoding='utf-8', errors='ignore'))["content"]['positionResult']["result"]

                # 把company描述的列表合并为一个字符串
                for t in list(range(len(jsondata))):
                    self.rdata = self._jsondata_rdata(jsondata, t)

                # 重新给rdata编码
                self.rdata = self._rdata_handler(self.rdata)

            except Exception as e:
                print(e)
                continue

            # 构造totaldata，是所有页面的集合，rdata是这一个页面的集合
            if i == 0:

                totaldata = self.rdata

            else:

                totaldata = pd.concat([totaldata, self.rdata])


            totaldata.index = range(1, len(totaldata) + 1)

            print('正在抓取搜索页面第%d页,时间是%s，还剩下%d页' % (i + 1, datetime.datetime.now(), urlcount - i - 1))

        self.save_data(totaldata)




if __name__ == "__main__":
    keyword = input("请输入搜索词(回车进入下一步): ")

    spider = LagouSpider(keyword)
    start = time.clock()
    spider.main()
    end = time.clock()

    print("[INFO]: using time {}".format(end))

