#encoding:utf-8

"""
    前面所学xpath/bs4都是HTML/XML文档的处理解析器
    这里学习JSON文档的处理解析器JsonPath
    JSON与XML都是一种数据交换格式,用于数据交互的场景
    json简单讲就是js中的对象(类似字典)和数组(类似列表)
    
    在获取了请求响应对象json化后,可通过jsonpath来获取json字符串中的值,这jsonpath通过取对应的键来取值,但是

"""

import json

import requests

from jsonpath import jsonpath


class LagouSpider(object):
    def __init__(self):
        self.headers = {
                "Accept": "application/json, text/javascript, */*; q=0.01",
                "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "Content-Length": "26",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Cookie": "JSESSIONID=ABAAABAAADEAAFI0DD07E26EB9FB6F8E52B9CDBC703B9DE; _ga=GA1.2.754895021.1542433960; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542433960; _gid=GA1.2.1536395690.1542433960; user_trace_token=20181117135239-fde4320a-ea2c-11e8-892b-5254005c3644; LGUID=20181117135239-fde435ce-ea2c-11e8-892b-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; X_HTTP_TOKEN=784caa92b730af22c71c7e97b90c6558; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22%24device_id%22%3A%22167203edb74166-00ff6f92a0c4db-4313362-2073600-167203edb75bf9%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; TG-TRACK-CODE=search_code; SEARCH_ID=f98b8ef78b5b47cf966c7f2e098b8d1a; LGRID=20181117150411-fc29f4a8-ea36-11e8-892c-5254005c3644; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542438252",
                "Host": "www.lagou.com",
                "Origin": "https://www.lagou.com",
                #这个在拉钩网站中也作为反爬处理,表示是从哪个页面跳转过来的
                "Referer": "https://www.lagou.com/jobs/list_python?",
                #这个字段通常作为反爬处理
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",
                "X-Anit-Forge-Code": "0",
                "X-Anit-Forge-Token": "None",
                "X-Requested-With": "XMLHttpRequest"
                }
        
        self.post_url = "https://www.lagou.com/jobs/positionAjax.json?"
        self.params = {
                "needAddtionalResult": "false",
                "city": raw_input("请输入需要抓取的城市: ")
                }
        self.page = 1
        self.form_data = {
                "first": "false",
                "pn": self.page,
                "kd": raw_input("请输出需要爬取的职位: ")
                }
        self.item_list = []

    def send_request(self, url):
        print("[INFO]正在发送请求: {} {}".format(self.post_url, self.page))
        response = requests.request("post", url, headers=self.headers, params=self.params, data=self.form_data)
        return response

    def parse_response(self, response):
        #json_obj = json.loads(response.content)  #此行代码可用以下代码替代
        json_obj = response.json()
        result_list = jsonpath(json_obj, "$..result")[0]  #注意:jsonpath返回一个列表,其中的元素是搜索到的结果,即招聘信息列表
        #print(result_list)
        #print("*"*50)
        #print(json_obj["content"]["positionResult"]["result"])  #上面的jsonpath取到的结果其实就是此行代码取到的结果,字典嘛不嫌麻烦就直接找

        if not result_list:
            return True

        for result in result_list:
            #一个result就是一个招聘职位信息
            item = {}
            item["createTime"] = result["createTime"]  # 职位创建时间
            item["city"] = result["city"]  # 城市
            item["positionName"] = result["positionName"]  # 职位名
            item["salary"] = result["salary"]  # 薪资
            item["companySize"] = result["companySize"]  # 公司规模
            item["district"] = result["district"]  # 区域
            item["companyFullName"] = result["companyFullName"]  # 公司名
            self.item_list.append(item)

    def save_data(self):
        json.dump(self.item_list, open('lagou.json', 'w'))

    def main(self):
        while True:
            response = self.send_request(self.post_url)
            
            parse_value = self.parse_response(response)
            
            #根据解析响应内容来停止多页面爬取
            if parse_value == True:
                break
            self.page += 1

            self.save_data()


if __name__ == "__main__":
    lagou = LagouSpider()
    lagou.main()

