# -*- coding: utf-8 -*-
from urllib.parse import urljoin
from scrapy_redis.spiders import RedisSpider
import scrapy
from ..items import CompanyItem, JobItem


class BossSpider(RedisSpider):
    name = 'boss'
    allowed_domains = ['zhipin.com']
    # start_urls = ['https://www.zhipin.com/c101020100-p100109/']
    redis_key = 'bosszhipin'

    def parse(self, response):
        # 获取各个区的URL
        area_list = response.xpath('//dl[@class="condition-district show-condition-district"]/dd/a')
        del area_list[0]
        for area in area_list:
            area_name = area.xpath('./text()').extract_first()
            area_url = urljoin(response.request.url, area.xpath('./@href').extract_first())
            yield scrapy.Request(
                area_url,
                callback=self.parse_list,
                meta={'area': area_name}
            )

    def parse_list(self, response):
        area = response.meta['area']
        # 获取每个区的职位列表和公司
        for job in response.xpath('//div[@class="job-list"]/ul/li'):
            job_item = JobItem()
            company_item = CompanyItem()
            job_item['name'] = job.xpath('.//div[@class="job-title"]/text()').extract_first()
            company_name = job.xpath('./div/div[2]/div/h3/a/text()').extract_first()
            job_item['company'] = company_name
            job_item['job_url'] = urljoin(response.request.url, job.xpath('./div/div[1]/h3/a/@href').extract_first())
            job_item['area'] = area

            company_item['name'] = company_name
            company_item['company_url'] = urljoin(response.request.url, job.xpath('./div/div[2]/div/h3/a/@href').extract_first())
            yield scrapy.Request(
                job_item['job_url'],
                callback=self.parse_job,
                meta={'item': job_item}
            )
            yield scrapy.Request(
                company_item['company_url'],
                callback=self.parse_company,
                meta={'item': company_item}
            )
        # 检测下一页
        next_url = response.xpath('//div[@class="job-list"]/div[@class="page"]/a[last()]/@href').extract_first()
        if next_url is not None and next_url != 'javascript:;':
            next_url = urljoin(response.request.url, next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse_list,
                meta={'area': area}
            )

    def parse_job(self, response):
        item = response.meta['item']
        item['publish_time'] = response.css('.time::text').extract_first()[3:]
        info = response.css('.job-primary.detail-box .info-primary p::text').extract()
        item['city'] = info[0].split('：')[1]
        item['work_experience'] = info[1].split('：')[1]
        item['education'] = info[2].split('：')[1]
        item['tags'] = ';'.join(response.css('.job-primary.detail-box .info-primary .job-tags span::text').extract())
        salary = response.css('.job-banner .badge::text').extract_first().split('-')
        item['salary_low'] = salary[0][:-1]
        item['salary_high'] = salary[1][:-1]
        item['description'] = '*_*'.join(response.css('.detail-content div:nth-child(1) .text::text').extract())
        #print(item)
        yield item

    def parse_company(self, response):
        item = response.meta['item']
        info = response.css('.company-banner .info-primary p::text').extract()
        if len(info) == 2:
            item['company_finance'] = ''
            item['company_size'] = info[0]
            item['type'] = info[1]
        else:
            item['company_finance'] = info[0]
            item['company_size'] = info[1]
            item['type'] = info[2]
        item['description'] = '*_*'.join(response.css('.detail-content div:nth-child(1) .text::text').extract())
        item['environments'] = ';'.join(response.css('.cur img::attr("src")').extract())
        item['total_name'] = response.css('.job-sec.company-business h4::text').extract_first()
        item['reg_principal'] = response.css('.business-detail ul li:nth-child(2)::text').extract_first()
        item['reg_time'] = response.css('.business-detail ul li:nth-child(3)::text').extract_first()
        item['reg_address'] = response.css('.business-detail ul li:nth-child(6)::text').extract_first()
        #print(item)
        yield item
