# -*- coding:utf-8 -*-
import os
import time
import random
import re
import requests
import pandas as pd
from mod_Proxies import list_UserAgents
from mod_Proxies import get_Proxies
import sys
reload(sys)
sys.setdefaultencoding('utf-8')


UserAgent = random.choice(list_UserAgents.UserAgent)

headers = {
	'Accept': 'application/json, text/javascript, */*; q=0.01',
	'Accept-Encoding': 'gzip, deflate, br',
	'Accept-Language': 'zh-CN,zh;q=0.9',
	'Cache-Control': 'no-cache',
	'Connection': 'keep-alive',
	'Content-Length': '25',
	'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
	# 'Cookie': 'user_trace_token=20180118150844-6bdeb2fa-fc1e-11e7-a854-525400f775ce; LGUID=20180118150844-6bdeb67e-fc1e-11e7-a854-525400f775ce; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=index_search; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1514212709,1514900095,1516259322,1516340995; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1516347901; JSESSIONID=ABAAABAABEEAAJA70AD042495453200E319666EC9BCD985; _gid=GA1.2.1853430185.1516259322; _gat=1; _ga=GA1.2.1670913001.1516259322; LGSID=20180119154501-a7c518ca-fcec-11e7-a52b-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2Flist_python%3FlabelWords%3D%26fromSearch%3Dtrue%26suginput%3D; LGRID=20180119154501-a7c51a1f-fcec-11e7-a52b-5254005c3644; SEARCH_ID=b9a2588e1df94a008622f10c471e4dac',
	'Host': 'www.lagou.com',
	'Origin': 'https://www.lagou.com',
	'Pragma': 'no-cache',
	'Referer': 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=',
	'User-Agent': UserAgent,
	'X-Anit-Forge-Code': '0',
	'X-Anit-Forge-Token': None,
	'X-Requested-With': 'XMLHttpRequest',
}

start_url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false&isSchoolJob=0'

proxy_list = get_Proxies.temp_list()  # 获取proxy列表


def get_proxy():
	proxy = list(random.choice(proxy_list))
	proxies = {
					'http': 'http://' + str(proxy[0])
				}
	return proxies


def get_more_data(kd, pg, reg):
	file_name = r'./data_Python.csv'
	if os.path.exists(file_name):  # 若存储文件存在，则删除
		os.remove(file_name)
		print '已删除原有的存储文件'

	i = 0
	for pn in range(1, pg+1):
		form = {
			'first': 'true',
			'pn': pn,
			'kd': kd,
		}
		time.sleep(random.randint(5, 9))
		proxies = get_proxy()
		print '正在使用代理：{0}'.format(proxies.get('http'))
		res_doc = requests.request('POST', start_url, data=form, headers=headers, proxies=proxies)
		# print res_doc.text
		time.sleep(2)

		pattern = re.compile(reg)

		data = re.findall(pattern, res_doc.text)
		data = pd.DataFrame(data)
		print data

		i += 1
		print '正在追加存储文件<{0}>...'.format(i)
		data.to_csv(file_name, header=False, index=False, mode='a+')


if __name__ == '__main__':
	kd = 'Python'
	pg = 31
	reg = '{"companyId":.*?,"positionId":.*?,"industryField":".*?","education":"(.*?)","workYear":"(.*?)","city":"(.*?)","positionAdvantage":".*?","createTime":".*?","salary":"(.*?)","positionName":"(.*?)","companySize":".*?","companyShortName":"(.*?)","companyLogo":".*?","financeStage":".*?","jobNature":".*?","approve":.*?,"companyLabelList":.*?,"publisherId":.*?,"score":.*?,"district":".*?","positionLables":.*?,"industryLables":.*?,"businessZones":.*?,"hitags":.*?,"resumeProcessRate":.*?,"imState":".*?","lastLogin":.*?,"explain":.*?,"plus":.*?,"pcShow":.*?,"appShow":.*?,"deliver":.*?,"gradeDescription":.*?,"promotionScoreExplain":.*?,"firstType":".*?","secondType":".*?","isSchoolJob":.*?,"subwayline":.*?,"stationname":.*?,"linestaion":.*?,"formatCreateTime":".*?","longitude":".*?","latitude":".*?","companyFullName":"(.*?)","adWord":.*?}'
	get_more_data(kd=kd, pg=pg, reg=reg)
