﻿#coding:utf-8
#author:肥肥兔
#2015.9.30

"""
以语言为索引查找相应职位，并把相应的职位连接写进link.txt
"""
import socket
import urllib.request as request
import urllib.error   as error
import msvcrt

timeout = 10
socket.setdefaulttimeout(timeout)
address = 'result'

DIRECTION = ['java','python','C','PHP','Html5','Android','ios']#求职方向
HEADER ={
			'Host': 'www.liepin.com',
			'Connection': 'keep-alive',
			'Cache-Control':'max-age=0',
			'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
			'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36'
			'Accept-Encoding': 'gzip, deflate, sdch',
			'Accept-Language':' zh-CN,zh;q=0.8',
            'Cookie': ' __uuid=1439382635919.93; _uuid=21E402D7FFA8440B4CABE711E287AB5F; pgv_pvi=7795941376; pgv_si=s5240832; JSESSIONID=6C5A8DCCB14DE2B61DFA3AAEA8B78ABE; __tlog=1443604862924.50%7CR000000035%7CR000000035%7C00000000%7C00000000; __session_seq=47; __uv_seq=52; _mscid=00000000; rand=2c9595ee6d8c6a53ee62e5e000f2eb21; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1443265020,1443517956,1443587751,1443604863; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1443622539; _lpcdn=s%2C1%7Ccore.pc%2C1%7Ca.pc%2C1%7Cwww.pc%2C1%7Cjob.pc%2C1%7Ccompany.pc%2C1%7Cit.pc%2C1%7Cc.pc%2C1%7Clpt.pc%2C1%7Ch.pc%2C1%7Carticle.pc%2C1%7Cats.pc%2C1%7Ccampus.pc%2C1%7Ccity.pc%2C1%7Cclt.pc%2C1%7Cevent.pc%2C1%7Cphone.pc%2C1%7Csns.pc%2C1%7Crts.pc%2C1%7Cmsk.pc%2C1%7Cacs.pc%2C1'
		}


def del_label(content):
	"""
	在字符串中删除常用修饰文本的html标签
	一般是经过第一次挑选后无法去掉html标签时使用
	"""
	r = re.compile('<[^>]+>')#[^x] 匹配除了x以外的任意字符 
	content = r.sub("", content)  
	return content

def get_html(url,data=None,header=None,method ="GET"):
	"""
	访问url
	若data或header不为空，则重新构造请求
	返回字符串
	"""
	try:
		if data == None and header == None:
			content = opener.open(url).read()
		elif data == None:
			req = request.Request(url,headers=header,method=method)
			content = request.urlopen(req).read()#.decode('utf-8', 'ignore')#忽视不能编码的
			return content
		else:
			req = request.Request(url,data.encode(),header,method=method)
			content = request.urlopen(req).read()
			return content
	except error.HTTPError as e:
		print(str(e.code),str(e.reason))
		return False
	except error.URLError as e:
		print(str(e.reason))
		return False

def to_catch_a(max_num,d):
	#开始爬取所有超链接
	print("开始爬取"+d+"所有超链接……")
	a_link = []
	for page in range(0,max_num+1):
		url = "http://www.liepin.com/zhaopin/?imscid=R000000035&key="+ d +"&industries=040%2C420%2C010%2C030&dqs=050&curPage="+ page
		data = "imscid=R000000035&key="+d+"&industries=040%2C420%2C010%2C030&dqs=050&curPage=" + page
		content = str(get_html(url,data,HEADER).decode())
		if content != False:
			a_link.append(get_a(content))#获得超链接列表
		else:
			return False
	return a_link


def main():
	"""
	主函数
	"""
	for d in DIRECTION:
		url = "http://www.liepin.com/zhaopin/?imscid=R000000035&key="+ d +"&industries=040%2C420%2C010%2C030&dqs=050&curPage=0"
		data = "imscid=R000000035&key="+d+"&industries=040%2C420%2C010%2C030&dqs=050&curPage=0"
		content =str(get_html(url,data,HEADER).decode())
		#找到最大页,不知为何找到的会被实际的少1
		if content != False:
			try:
				max_num = int(re.findall('.{0,15}title="末页"',content)[0][-14:-12]) + 1#双位数
			except Exception as e:
				max_num = int(re.findall('.{0,15}title="末页"',content)[0][-13:-12]) + 1#单位数
			a_link =  to_catch_a(max_num,d)
			if a_link != False:
				print(d + "  超链接完成")
				write_txt(address+"\\"+'link.txt',d+'\n')
				for target in a_link:
					for target_url in target:
						write_txt(address+"\\"+'link.txt',target_url+'\n')
				write_txt(address+"\\"+'link.txt','---------------\n')
			else:
				print(d + "  超链接失败")
		else:
			print(d +"  登陆失败")
		time.sleep(5)#以免访问太快被发现是爬虫
	return



if __name__ == '__main__':
	#创建放置结果的文件夹
	print("开始爬取……")
	if os.path.exists(address) == False:
		os.makedirs(address)
	main()
	print('爬取结束，按任意键结束')
	msvcrt.getch()