#-*-coding:utf-8-*-
from scrapy import Request
from scrapy.spiders import Spider
from scrapy_splash import SplashRequest
from scrapy_splash import SplashMiddleware
from scrapy.http import Request, HtmlResponse
from scrapy.selector import Selector
from CrawlJob.items import job
import chardet
import re
import sys

reload(sys)
sys.setdefaultencoding('utf-8')


class SplashSpider(Spider):
	sets = set()
	indexs = 0
	name = 'fuben'
	#start_urls = [
	#	'http://sou.zhaopin.com/jobs/searchresult.ashx?bj=4010200&sj=926&jl=%E9%9D%92%E5%B2%9B&sm=0&isfilter=0&fl=703&isadv=0&sg=cf38e79ef3d1473c9e1fd74cfedb2a71&p=1',
	#	'http://sou.zhaopin.com/jobs/searchresult.ashx?bj=160000&sj=6&jl=%E9%9D%92%E5%B2%9B&p=1&isadv=0'
	#]
	bj =[
"160000",
"160300",
"160200",
"160400",
"200500",
"200300",
"5001000"]
	sj = ["044","045","665","667","668","047","053","679","048","687","863","864","317",
"669","861","054","057","671","672","666","2034","2035","2036","2037","2038","2039","2040",
"2041","2042","2043","060","314","043","407","557","316","675","676","677","052","670","056",
"552","2046","2047","2048","2049","2050","2051","2052","2053","2054","2055","2056","2057","2058",
"2059","2060","2061","2062","556","693","049","694","695","696","868","692","2063","2064","2065","561",
"040","041","058","315","046","051","055","388","059","389","678","551","690","699","698","840",
"398","928","313","688","042","689","841","680","500","323","324","325",
"322","320","321","326","558","499","2183","327"
	]



	ins = ["210500","160400","160000","160500","160200","300100","160100","160600"]



	city = ["广州","韶关","深圳","珠海","汕头","佛山","江门","湛江","茂名","肇庆","惠州","梅州","汕尾","河源","阳江","清远","东莞","中山","潮州","揭阳","云浮","武汉","黄石","十堰","宜昌","襄阳","鄂州","荆门","孝感","荆州","黄冈","咸宁","随州","恩施","公安","武穴","天门","仙桃","潜江","宜城","神农架","西安","铜川","宝鸡","咸阳","渭南","延安","汉中","榆林","安康","商洛","兴平","杨凌","西咸新区","成都","自贡","攀枝花","泸州","德阳","绵阳","广元","遂宁","内江","乐山","南充","眉山","宜宾","广安","达州","雅安","巴中","资阳","阿坝","甘孜","凉山","峨眉","西昌","简阳","大连","沈阳","鞍山","抚顺","本溪","丹东","锦州","营口","阜新","辽阳","盘锦","铁岭","朝阳","葫芦岛","兴城","海城","昌图","开原","东港","长春","珲春","吉林市","四平","辽源","通化","白山","松原","白城","延边","公主岭","南京","苏州","昆山","常熟","张家港","无锡","江阴","徐州","常州","南通","连云港","淮安","盐城","扬州","镇江","泰州","宿迁","太仓市","宜兴","济南","青岛","淄博","枣庄","东营","烟台","潍坊","济宁","泰安","威海","日照","莱芜","临沂","德州","聊城","滨州","菏泽","杭州","宁波","温州","嘉兴","湖州","绍兴","金华","衢州","舟山","台州","丽水","方家山","南宁","柳州","桂林","梧州","北海","防城港","钦州","贵港","玉林","百色","贺州","河池","来宾","崇左","合肥","芜湖","蚌埠","淮南","马鞍山","淮北","铜陵","安庆","黄山","滁州","阜阳","宿州","六安","亳州","池州","宣城","凤阳","广德","宿松","石家庄","唐山","秦皇岛","邯郸","邢台","保定","张家口","承德","沧州","廊坊","衡水","遵化","太原","大同","阳泉","长治","晋城","朔州","晋中","运城","忻州","临汾","吕梁","永济市","呼和浩特","包头","乌海赤峰","通辽","鄂尔多斯","呼伦贝尔","兴安盟","锡林郭勒盟","乌兰察布","巴彦淖尔","阿拉善盟","乌审旗","满洲里","哈尔滨","齐齐哈尔","鸡西","鹤岗","双鸭山","大庆","伊春","佳木斯","七台河","牡丹江","黑河","绥化","大兴安岭","安达","双城","尚志","绥芬河","肇东市","福州","厦门","莆田","三明","泉州","漳州","南平","龙岩","宁德","南昌","景德镇","萍乡","九江","新余","鹰潭","赣州","吉安","宜春","抚州","上饶","郑州","开封","洛阳","平顶山","安阳","鹤壁","新乡","焦作","濮阳","许昌","漯河","三门峡","南阳","商丘","信阳","周口","驻马店","济源","西平","长沙","株洲","湘潭","衡阳","邵阳","岳阳","常德","张家界","益阳","郴州","永州","怀化","娄底","湘西","海口","三亚","洋浦市/洋浦经济开发区","琼海","儋州","五指山","文昌","万宁","东方","定安","屯昌","澄迈","临高","琼中","保亭","白沙","昌江","乐东","陵水","贵阳","六盘水","遵义","安顺","铜仁","黔西南","毕节","黔东南","黔南","拉萨","昌都","山南","日喀则","那曲","阿里","林芝","兰州","嘉峪关","金昌","白银","天水","武威","张掖","平凉","酒泉","庆阳","定西","陇南","临夏","甘南","西宁","海东","海北","黄南","海南州","果洛","玉树","海西","银川","石嘴山","吴忠","固原","中卫","乌鲁木齐","克拉玛依","吐鲁番","哈密","昌吉","博尔塔拉","巴音郭楞","阿克苏","克孜勒苏","喀什","和田","伊犁","塔城","阿勒泰","石河子","奎屯市","乌苏","阿拉尔","图木舒克","五家渠","北屯市","香港","澳门","台湾省"
	]
	def start_requests(self):
		#for url in self.start_urls:
			#yield SplashRequest(url
			#                   , args={'wait': '0.5'}
			#                   ,endpoint='render.html'
			#                  )
			#yield Request(url, callback=self.parse)
		for i in self.bj:
			for j in self.sj:
				for k in self.ins:
					for l in self.city:
						strs = "http://sou.zhaopin.com/jobs/searchresult.ashx?bj="+i+"&sj="+j+"&in="+k+"&jl="+l.encode('utf-8').decode('utf-8')+"&p=1&isadv=0"

						print strs
						yield Request(strs,self.parse)

		pass


	def parse(self, response):
		name = None;
		string = response.body


		textline = response.xpath("//table")
		for text in textline:
			jobs = job();
			# print text.extract()
			title = text.xpath("./tr[1]/td[@class = 'zwmc']/div/a/text()").extract()
			if title == None:
				print "None"
				return
			url = text.xpath("./tr[1]/td[@class = 'zwmc']/div/a/@href").extract()
			if len(url) > 0:
				url = url[0]
				digit = re.search(r".*/(\d+).*", url).group(1)
				print digit
				if(len(digit) == 15 and digit not in self.sets):
					self.sets.add(digit)
					print len(self.sets)
				else:
					print "已经存在",len(self.sets)
					continue
				jobs['url'] = url
			else:
				jobs['url'] = ''

			print url
			#print digit
			if (len(title) > 0):
				name = title[0].encode('utf-8').decode("utf-8")
				jobs['name'] = name
			else:
				jobs['name'] = ''
			print name

			# //*[@id="newlist_list_content_table"]/table[2]/tbody/tr[1]/td[3]/a[1]
			company = text.xpath("./tr[1]/td[@class='gsmc']/a[1]/text()").extract()
			if len(company):
				company = company[0].encode("utf-8").decode('utf-8')
				jobs['company'] = company
			else:
				jobs['company'] = ''
			print company
			salary = text.xpath("./tr[1]/td[@class = 'zwyx']/text()").extract()
			if len(salary) > 0:
				salary = salary[0].encode("utf-8").decode("utf-8")
				jobs['salary'] = salary
			else:
				jobs['salary'] = ''
			print salary
			city = text.xpath("./tr[1]/td[@class = 'gzdd']/text()").extract()
			if len(city) > 0:
				city = city[0].encode('utf-8').decode("utf-8");
				jobs['city'] = city
			else:
				jobs['city'] = ''
			
			print city
			"""
			title = text.xpath("./tbody/tr[1]/td[@class = 'zwmc']/div/a/text()").extract()
			if(len(title)>0):
				name =  title[0].encode('utf-8').decode("utf-8");
				jobs['name'] = name
			else:
				jobs['name'] = ''
			print name
			url = text.xpath("./tbody/tr[1]/td[@class = 'zwmc']/div/a/@href").extract()
			if len(url)>0:
				url =  url[0]
				jobs['url'] = url
			else:
				jobs['url'] = ''
			print url
			#//*[@id="newlist_list_content_table"]/table[2]/tbody/tr[1]/td[3]/a[1]
			company = text.xpath("./tbody/tr[1]/td[@class = 'gsmc']/a[1]/text()").extract()
			if len(company):
				company = company[0].decode("utf-8").encode('utf-8')
				jobs['company'] = company
			else:
				jobs['company'] = ''
			print company
			salary = text.xpath("./tbody/tr[1]/td[@class = 'zwyx']/text()").extract()
			if len(salary)>0:		
				salary = salary[0].decode("utf-8").encode("utf-8")
				jobs['salary'] = salary
			else:
				jobs['salary'] = ''
			print salary
			city = text.xpath("./tbody/tr[1]/td[@class = 'gzdd']/text()").extract()
			if len(city)>0:
				city = city[0].encode('utf-8').decode("utf-8");
				jobs['city'] = city
			else:
				jobs['city'] = ''
			print city
			"""
		# yield jobs
		#next_page_url = response.xpath(
		#	'/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a/@href').extract()

		next_page_url= response.xpath("//a[@class = 'next-page']/@href").extract()
		if next_page_url != None and len(next_page_url) != 0:
			self.indexs+=1
			print self.indexs,next_page_url
			yield Request(next_page_url[0], callback=self.parse)
		else:
			print "error"

		# yield SplashRequest(next_page_url[0]
		#               , args={'wait': '0.5'}
		#               ,endpoint='render.html'
		#              )
