#-*-coding:utf-8-*-
import scrapy
from CrawlJob.items import CrawljobItem		
import json
from scrapy import Request
from scrapy.contrib.spiders import CrawlSpider
import re
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class MySpider(CrawlSpider):
	name="51job"
	allowed_domains = ['search.51job.com','jobs.51job.com']
	start_urls = ['http://search.51job.com']
	computer=['硬件工程师']#,'数据挖掘','C++','java','计算机','数据分析''测试工程师','需求分析','系统架构','运营','前端','ui','测试工程师','软件开发'
	headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
	def start_requests(self):
		for pro in self.computer:
			u = "http://search.51job.com/jobsearch/search_result.php?keyword=" + pro.encode('utf-8').decode('utf-8')
  			yield scrapy.Request(u, callback=self.parse)
			# for i in xrange(1, 20):
			# 	url = "http://search.51job.com/list/000000,000000,0000,00,9,99,%s,2,{0}.html".format(i) %pro.encode('utf-8').decode('utf-8');
			# 	# print url
			# 	yield scrapy.Request(url)
	def parse(self, response):
		for sel in response.css("html body div.dw_wp div#resultList.dw_table div.el p.t1 span a"): 
			url = sel.re('href="(.*?)"')[0]
			#print url
			yield scrapy.Request(url,callback=self.parse_item)
		nextpage=response.xpath('/html/body/div[2]/div[6]/div/div/div/ul/li[8]/a/@href').extract()[0]
		if nextpage.strip():
			print nextpage
			yield scrapy.Request(nextpage,callback=self.parse)

	def parse_item(self, response):
		item = CrawljobItem()
		item['offer'] = response.xpath('/html/body/div[2]/div[2]/div[2]/div/div[1]/strong/text()').extract()[0].encode('utf-8').decode('utf-8')
		item['url'] = response.url
		item['city'] = response.xpath('//span[@class="lname"]/text()').extract()[0].encode('utf-8').decode('utf-8')
		item['company'] = response.xpath('//p[@class="cname"]/a/@title').extract()[0].encode('utf-8').decode('utf-8')
		item['location'] = response.xpath('//p[@class="fp"]/text()').extract()[1].rstrip()
		item['title'] = response.xpath('//div[@class="cn"]/h1/@title').extract()[0].encode('utf-8').decode('utf-8')
		item['people'] = response.xpath('/html/body/div[2]/div[2]/div[3]/div[1]/div/div/span[3]/text()').extract()[0].encode('utf-8').decode('utf-8')
		item['trades'] = response.xpath('/html/body/div[2]/div[2]/div[2]/div/div[1]/p[2]/text()').extract()[0].replace(u'\xa0', u'').replace(u" ",u'').encode('utf-8').decode('utf-8')
		item['work_experience'] = response.xpath('/html/body/div[2]/div[2]/div[3]/div[1]/div/div/span[1]/text()').extract()[0].encode('utf-8').decode('utf-8')
		item['education']=response.xpath('/html/body/div[2]/div[2]/div[3]/div[1]/div/div/span[2]/text()').extract()[0].encode('utf-8').decode('utf-8')
		item['time'] = response.xpath('/html/body/div[2]/div[2]/div[3]/div[1]/div/div/span[4]/text()').extract()[0].encode('utf-8').decode('utf-8')
		print '---------------------------'
		print item['offer']
		print '---------------------------'
		print item['url']
		print '---------------------------'
		print item['city']
		print '---------------------------'
		print item['location']
		print '---------------------------'
		print item['company']
		print '---------------------------'
		print item['title']
		print '---------------------------'
		print item['people']
		print '---------------------------'
		print item['trades']
		print '---------------------------'
		print item['work_experience']
		print '---------------------------'
		print item['education']                                                                                                                              
		print '---------------------------'
		print item['time']
		print '---------------------------'
		# yield item

