#!/usr/bin/env python
# -*- coding:UTF-8 -*-

'''
Created on 2018年6月10日

@author: Administrator
'''

import requests
from bs4 import BeautifulSoup
import json
import time


# 网址：
# https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=


# def main():
#     headers = {
#         "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
#         "Host":"www.lagou.com",
#         "Referer":"https://www.lagou.com/"
#                
#                }
#     result = requests.get("https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=", headers=headers)
#     print result.content

def main():
    headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
        "Host":"www.lagou.com",
        "Referer":"https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=",
        "X-Anit-Forge-Code":"0",
        "X-Anit-Forge-Token":None,
        "X-Requested-With":"XMLHttpRequest"
               }
    
    positionList = []
    positionDetailList = [] # 职位详情
    for i in range(1,6):
        formData = {
            "first":"true",
            "pn":i,
            "kd":"python"
                    }
        result = requests.post("https://www.lagou.com/jobs/positionAjax.json?city=%E6%88%90%E9%83%BD&needAddtionalResult=false", 
                           headers=headers,data=formData)
#     print result.content
#     print result.json()
        resultJson = result.json()
        print resultJson
        pagePositions = resultJson["content"]["positionResult"]["result"]
        for position in pagePositions:
            positionId = position["positionId"]
            # 拿到这个positionId，然后爬取这个职位的详情
            positionDetail = crawl_detail(positionId)
            positionDetailList.extend(positionDetail)
        positionList.extend(pagePositions)
        time.sleep(10)
        
#     positionLine = json.dumps(positionList, ensure_ascii=False);
#     print positionLine
    
    print positionDetailList
    
#     with open("positionJson.json","w") as fp:
#         fp.write(positionLine.encode("utf-8")) #写入数据，并制定编码
    
    #列表转字符串
    positionDetailLine = [str(i.encode("utf-8")) for i in positionDetailList]
    positionDetailLine = ''.join(positionDetailLine)
    with open("positionDetails.txt","w") as fp:
        fp.write(positionDetailLine) #写入数据，并制定编码

def crawl_detail(id):
    '爬取详情页面'
    url = "https://www.lagou.com/jobs/%s.html" %id
    headers ={
        "Host":"www.lagou.com",
        "Referer":"https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=",
        "Upgrade-Insecure-Requests":"1",
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
        "Cookie":"user_trace_token=20171206225246-1f3422cc-da95-11e7-87b6-525400f775ce; LGUID=20171206225246-1f342571-da95-11e7-87b6-525400f775ce; index_location_city=%E6%88%90%E9%83%BD; JSESSIONID=ABAAABAACEBACDG66577FF94C9BEDB6546B89A572203CE3; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DK7FpZO6R7QdFns8EWPGZiTzUjTMsMcq5KY895YObwbK%26wd%3D%26eqid%3Ddbd3344000002270000000045b247c6a; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; SEARCH_ID=2b40de3184ba4c9887f14bc04cde7b76; _gid=GA1.2.901533388.1529117664; _ga=GA1.2.1598130728.1512571953; LGSID=20180616105650-eae47308-7110-11e8-a51c-525400f775ce; LGRID=20180616110308-cbd8a7ee-7111-11e8-9643-5254005c3644; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528630000,1528630014,1529117664; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1529118042; TG-TRACK-CODE=search_code",
        "Connection":"keep-alive",
        "Cache-Control":"max-age=0",
        "Accept-Language":"zh-CN,zh;q=0.9",
        "Accept-Encoding":"gzip, deflate, br",
        "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
              }
    result = requests.get(url,headers=headers)
#     print result.content
    soup = BeautifulSoup(result.content,'lxml')
    job_bt = soup.find('dd', attrs={'class':'job_bt'})
#     print job_bt
    print job_bt.text
    return job_bt.text
    
    
  
main()
# crawl_detail('4613044')

    




