# -*- coding: utf-8 -*-
import scrapy
from webspider.items import job_2spiderItem
from copy import deepcopy
import re


class JobTestSpider(scrapy.Spider):
    name = 'job_c'
    allowed_domains = ['jobs.51job.com']
    start_urls = ['http://jobs.51job.com/']

    def parse(self, response):
        item = job_2spiderItem()
        url_list =  response.xpath("//div[@class='filter']/div[@class='e e5']/div[@class='lkst']")[0].xpath("./a")
        for i in url_list:
            item['key_word'] = i.xpath("./text()").extract_first()
            url = i.xpath("./@href").extract_first()
            yield scrapy.Request(
                url,
                callback=self.parse_detail,
                meta={"item":deepcopy(item)}
            )
    def parse_detail(self,response):
        item = response.meta["item"]
        job_list = response.xpath("//div[@class='e ']")
        for job in job_list:
            item['name'] = job.xpath('.//a/@title').extract_first()
            item['url'] = job.xpath('.//a/@href').extract_first()
            item['company'] = job.xpath(".//a/@title").extract()[1]
            item['salary'] = job.xpath(".//span[@class='location']/text()").extract_first()
            item['release_time'] = job.xpath(".//span[@class='time']/text()").extract_first()
            item['location'] = job.xpath(".//span[@class='location name']/text()").extract_first()
            item['edu_background'] = re.search("学历要求：(.*?)<span>",
                                               job.xpath(".//p[@class='order']").extract_first()).group(1)
            item['work_exp'] = re.search("工作经验：(.*?)<span>", job.xpath(".//p[@class='order']").extract_first()).group(1)
            item['company_nature'] = re.search("公司性质：(.*?)<span>",
                                               job.xpath(".//p[@class='order']").extract_first()).group(1)
            item['company_scale'] = re.search("公司规模：(.*?)</p>",
                                              job.xpath(".//p[@class='order']").extract_first()).group(1)
            yield item
        try:
            page_count = int(response.xpath("//div[@class='dw_page']//input/@value").extract_first())
            current_page = int(response.xpath("//div[@class='dw_page']//input/@value").extract()[1])
            if current_page < page_count:
                next_url = response.url + "p{}/".format(current_page + 1)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse_detail,
                    meta={"item":item}
                )
        except:
            print("error")