# -*- coding: utf-8 -*-
import scrapy
from lj.items import LjItem


class LjPositionSpider(scrapy.Spider):
	name = 'lj_position'
	allowed_domains = ['www.lagou.com']
	start_urls = ['https://www.lagou.com/zhaopin/Java/']

	def parse(self, response):
		item = LjItem()
		for job_primary in response.xpath('//div[@id="s_position_list"]//ul'):
			item['title'] = job_primary.xpath('.//h3/text()').extract()
			item['salary'] = job_primary.xpath('.//span[@class="money"]/text()').extract()
			item['company'] = job_primary.xpath('.//div[@class="company_name"]/a/text()').extract()
			item['url'] = job_primary.xpath('.//a[@class="position_link"]/@href').extract_first()
			item['work_addr'] = job_primary.xpath('.//span[@class="add"]').extract_first()
			item['publish_date'] = job_primary.xpath('.//span[@class="format-time"]/text()').extract()
			#print("job_primary.xpath('self')----------->>>>",job_primary.xpath('./child::node()').extract())
			li_b_l = job_primary.xpath('.//div[@class="p_bot"]/div[@class="li_b_l"]').extract()
			if len(li_b_l) == 0:
				print("=======================")
			item['work_age'] = li_b_l[0].extract()[1]
			for tag in li_b_l[1]:
				print(tag.xpath('.//span/text()'))

			item['industry'] = job_primary.xpath('.//div[@class="industry"]').extract()
			print(job_primary.xpath('.//div[@class="li_b_r"]/text()')).extract()
			yield

			# 根据内页地址爬取
			# yield scrapy.Request(item['url'], meta={'item': item}, callback=self.scrapy_inner_page)

	def scrapy_inner_page(self, response):
		# 接收上级已爬取的数据
		item = response.meta['item']
		for info in response.xpath('//div[@class="detail-content"]/div[@class="job-sec"]'):
			block_title = info.xpath('.//h3/text()').extract()
			if '职位描述' in block_title:
				all_desc = info.xpath('./div[@class="text"]//text()').extract()
				for inner in all_desc:
					item['content'] = item['content'] + inner
				return item
