import scrapy

'''
实习僧网站职位信息
原数据来自 : https://www.shixiseng.com/interns?page=1&type=intern&keyword=&area=&months=&days=&degree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%E5%85%A8%E5%9B%BD&internExtend=
需求 : 
    No.1 职位
    No.2 薪资
    No.3 公司
    No.4 工作地点
    No.5 每周工作天数
    No.6 工作月数要求
    No.7 岗位类型
    No.8 岗位需求人数
    No.9 学历要求
    No.10 公司福利
'''


class ShixisengSpider(scrapy.Spider):
    name = 'shixiseng'
    allowed_domains = ['shixiseng.com']
    start_urls = [
        'https://apigateway.shixiseng.com/api/interns/v2.0/interns/wxz/search/v3?p=1&city=%E5%85%A8%E5%9B%BD&k=Python']
    #  页面数
    page = 1

    def parse(self, response):
        data = response.json()
        # print(data)
        item = {}
        if data['code'] == 100:
            jobs = data['msg']['data']
            if jobs:
                for job in jobs:
                    #  职位
                    item['job'] = job['job']
                    #  薪资
                    item['salary'] = job['salary_desc']
                    #  公司
                    item['company'] = job['company']
                    #  工作城市
                    item['city'] = job['city']
                    #  每周工作天数
                    item['dayPerWeek'] = '每周工作' + str(job['dayPerWeek']) + '天'
                    #  工作月数要求
                    item['month_num'] = str(job['month_num']) + '个月'
                    #  岗位描述
                    item['describeJob'] = job['industry']
                    #  岗位需求人数
                    item['peoples'] = job['scale']
                    #  学历要求
                    item['degree'] = job['degree']
                    #  公司福利
                    item['welfare'] = job['attraction']
                    yield item
            else:
                print('已经爬完 !')
                exit(0)
        #  开始请求第二个页面
        self.page += 1
        url = f'https://apigateway.shixiseng.com/api/interns/v2.0/interns/wxz/search/v3?p={self.page}&city=%E5%85%A8%E5%9B%BD&k=Python'
        yield scrapy.Request(url)
