#coding:utf-8
#author:肥肥兔
#2015.9.30

"""
读出link.txt,并访问,把相应内容写进对应的txt文件
例如把java的职位要求写进java.txt
"""
import socket
import urllib.request as request
import urllib.error   as error
import re
import time
import msvcrt

timeout = 10
socket.setdefaulttimeout(timeout)

header ={
            'Host': 'jop.liepin.com',
			'Connection': 'keep-alive',
			'Cache-Control':'max-age=0',
			'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
			'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36'
			'Accept-Encoding': 'gzip, deflate, sdch',
			'Accept-Language':' zh-CN,zh;q=0.8',
            'Cookie': "Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1443749830;\
				Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1443604863,1443627160,1443684008,1443749803;\
				JSESSIONID=49CB830B0BFDB7D2B92779F8E8FFBF9C;\
				__session_seq=2;\
				__tlog=1443749802287.13%7C00000000%7C00000000%7C00000000%7C00000000;\
				__uuid=1439382635919.93;\
				__uv_seq=2;\
				_lpcdn=s%2C1%7Ccore.pc%2C1%7Ca.pc%2C1%7Cwww.pc%2C1%7Cjob.pc%2C1%7Ccompany.pc%2C1%7Cit.pc%2C1%7Cc.pc%2C1%7Clpt.pc%2C1%7Ch.pc%2C1;\
				_mscid=00000000;\
				_uuid=21E402D7FFA8440B4CABE711E287AB5F;\
				pgv_pvi=7795941376;\
				pgv_si=s9015726080;\
				rand=3252a4550105fb5c608f957b223e747e"
		}


def del_label(content):
	"""
	在字符串中删除常用修饰文本的html标签
	一般是经过第一次挑选后无法去掉html标签时使用
	"""
	p = re.compile('<[^>]+>')#[^x] 匹配除了x以外的任意字符 
	content = p.sub("", content)  
	return content

def OpenTxt(fn):

	return open(fn,'r').readlines()#返回字符串列表

def write_txt(filename,content,method = 'a'):
	with open(filename,method) as f:
		f.write(content)
	return

def to_line(line):
	#将line中按“-----------”分割成二维列表，每一维0号为名字
	new_line = []
	_line = []
	for x in line:
		if x != "---------------\n":
			_line.append(x.replace('\n',''))
		else:
			new_line.append(_line)
			_line = []#重新初始化
	return new_line

if __name__ == '__main__':
	filename = "link.txt"
	line = OpenTxt(filename)
	new_line = to_line(line)
	for link in new_line:
		print("爬取" + link[0] + "招聘信息")
		print("---------------")
		for index in range(1,len(link)):
			url = link[index] 
			req = request.Request(url,headers=header,method="GET")
			try:
				content = request.urlopen(req).read().decode()
			except error.HTTPError as e:
				print(str(e.code),str(e.reason))	
			except error.URLError as e:
				print(str(e))
			else:
				try:
					#这里用正则死活匹配不出，只能用暴力方法
					begin = content.find('<div class="content content-word">')
					end = content.find('</div>',begin)
					content = del_label(content[begin:end])
				except Exception as e:
					print("匹配失败：" + str(e))
				else:
					print("匹配成功，正在写入"+link[0]+'.txt……',end="")
					try:
						write_txt(link[0]+'.txt',content+'。')
					except Exception as e:
						print("失败："+str(e))
					else:
						print("成功")
		print("---------------")
		time.sleep(5)#休息一下，防止被认出是爬虫
	print('全部写入完成……')
	msvcrt.getch()
