# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from zhipin.items import test_ZhipinItem
import time
import zhipin.settings
import re

def search_skill(description):
    rule = re.compile(r'[a-zA-z]+')
    skill_list = rule.findall(description)
    return skill_list

class CrawlZhipinSpider(CrawlSpider):
    name = 'crawl_zhipin'
    allowed_domains = ['zhipin.com']

    rules = (
        Rule(LinkExtractor(allow=r'.+page=(10|[1-9])'), follow=True),
        Rule(LinkExtractor(allow=r'.+job_detail/.+.html'), callback='parse_item', follow=False),
    )

    # @classmethod
    # def from_crawler(cls, crawler):
    #     """
    #
    #     :param crawler:
    #     :return:
    #     """
    #     # 从项目的配置文件中读取相应的参数
    #     # cls.name = 'crawl_zhipin'
    #     # cls.allowed_domains = ['zhipin.com']
    #     cls.rules = (
    #         Rule(LinkExtractor(allow=r'.+page=(10|[1-9])'), follow=True),
    #         Rule(LinkExtractor(allow=r'.+job_detail/.+.html'), callback='parse_item', follow=False),
    #     )
    #     cls.KEY = crawler.settings.get("KEY", 'Java')
    #     return cls()

    def parse_item(self, response):

        item = test_ZhipinItem()

        try:
            item['job_name'] = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/div[2]/h1/text()').extract()[0]
        except Exception as e:
            print(e)
            yield

        try:
            city = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/p/text()[1]').extract()[0]
            item['city'] = city
        except Exception as e:
            print(e)
            yield

        try:
            salary = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/div[2]/span/text()').extract()[0]
            if salary.find('天') < 0:
                item['salary'] = salary
                item['min_salary'] = int(salary.split('·')[0].split('-')[0], 10)
                item['max_salary'] = int(salary.split('·')[0].split('-')[1][0:-1], 10)
                item['avg_salary'] = (item['min_salary'] + item['max_salary']) / 2

                experience = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/p/text()[2]').extract()
                item['experience'] = experience[0]

                item['education'] = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/p/text()[3]').extract()[0]
        except Exception as e:
            print(e)
            item['salary'] = '2-3K'
            item['min_salary'] = int(2)
            item['max_salary'] = int(3)
            item['avg_salary'] = float(2.5)
            item['experience'] = '在校/应届'
            item['education'] = '本科'

        try:
            item['url'] = response.request.url
        except Exception as e:
            print(e)
            yield

        try:
            item['industry'] = response.xpath('//*[@id="main"]/div[3]/div/div[1]/div[2]/p[4]/a/text()').extract()[0]
            item['sel_stage'] = response.xpath('//*[@id="main"]/div[3]/div/div[1]/div[2]/p[2]/text()').extract()[0]
            item['sel_scale'] = response.xpath('//*[@id="main"]/div[3]/div/div[1]/div[2]/p[3]/text()').extract()[0]
        except Exception as e:
            print(e)
            item['sel_stage'] = '未融资'
            item['sel_scale'] = '0-20人'
            item['industry'] = '计算机'

        try:
            for i in range(1, 7):
                job_des_div = \
                    response.xpath('//*[@id="main"]/div[3]/div/div[2]/div[2]/div[' + str(i) + ']/h3/text()').extract()[
                        0]
                try:
                    if job_des_div == '职位描述':
                        item['description'] = \
                            response.xpath('//*[@id="main"]/div[3]/div/div[2]/div[2]/div[' + str(i) + ']/div').xpath(
                                'string(.)').extract()[
                                0].strip()
                    elif job_des_div == '公司介绍':
                        item['company_info'] = \
                            response.xpath('//*[@id="main"]/div[3]/div/div[2]/div[2]/div[' + str(i) + ']/div').xpath(
                                'string(.)').extract()[
                                0].strip()
                    elif job_des_div == '工商信息':
                        item['company_name'] = \
                            response.xpath(
                                '//*[@id="main"]/div[3]/div/div[2]/div[2]/div[' + str(i) + ']/div[1]/text()').extract()[
                                0]
                    elif job_des_div == '工作地址':
                        item['address'] = \
                            response.xpath(
                                '//*[@id="main"]/div[3]/div/div[2]/div[2]/div[' + str(
                                    i) + ']/div/div[1]/text()').extract()[
                                0]
                except Exception as e:
                    print(e)
                    item['description'] = 'NULL'
                    item['company_info'] = 'NULL'
                    item['address'] = item['city']
                    item['company_name'] = 'NULL'
        except Exception as e:
            print(e)

        try:
            welfare = response.xpath('//*[@id="main"]/div[1]/div/div/div[2]/div[3]/div[2]/span/text()').extract()
            item['welfare'] = ','.join(welfare)
        except Exception as e:
            print(e)
            item['welfare'] = 'NULL'

        try:
            skill = search_skill(item['description'])
            if len(skill):
                item['skill'] = ','.join(skill)
            else:
                yield
        except Exception as e:
            print(e)
            yield

        yield item
