import scrapy
from bs4 import BeautifulSoup
from boss.MongodbClient import MongodbClient


class BossSpiderSpider(scrapy.Spider):
    name = 'boss_spider'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/c101180100/?query=android&page=1']
    base_url = 'https://www.zhipin.com'

    mongoClient = MongodbClient()

    def parse(self, response):
        html_doc = response.body
        soup = BeautifulSoup(html_doc, 'lxml')

        jobs = soup.select('div.job-primary')
        print('获取到{}个工作'.format(len(jobs)))

        for job in jobs:
            job_name = job.select_one('.job-name a').get('title')
            job_area = job.select_one('.job-area').text
            company_name = job.select_one('.info-company .name a').text

            company_info = job.select_one('.info-company p')
            company_info.find('em').replace_with(' | ')
            if company_info.find('em'):
                company_info.find('em').replace_with(' | ')
            company_info = company_info.text
            infos = company_info.split(' | ')
            company_industry = ''
            financing_status = ''
            company_size = ''
            if len(infos) == 2:
                company_industry = infos[0]
                company_size = infos[1]
            elif len(infos) == 3:
                company_industry = infos[0]
                financing_status = infos[1]
                company_size = infos[2]

            company_logo = job.select_one('.company-logo').get('src')
            salary = job.select_one('.job-limit span').text

            job_requirements = job.select_one('.job-limit p')
            job_requirements.find('em').replace_with(' | ')
            job_requirements = job_requirements.text
            requirements = job_requirements.split(' | ')
            work_years = requirements[0]
            degree_required = requirements[1]

            tags = [tag.text for tag in job.select('.tags span')]
            job_bean = {
                'job_name': job_name,
                'job_area': job_area,
                'company_name': company_name,
                'company_info': company_info,
                'company_industry': company_industry,
                'financing_status': financing_status,
                'company_size': company_size,
                'company_logo': company_logo,
                'salary': salary,
                'job_requirements': job_requirements,
                'work_years': work_years,
                'degree_required': degree_required,
                'tags': tags,
            }
            print(job_bean)
            self.mongoClient.put(job_bean)

        # 获取下一页的地址
        page = response.xpath("//div[@class='page']/a[last()]/@href").get()

        next_url = self.base_url + page
        if not next_url:
            print("退出")
            return
        else:
            print("下一页地址：", next_url)
            yield scrapy.Request(next_url)
