# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

# 导入item
from Zhiyouji.items import ZhiyoujiItem
import time

"""
分布式爬虫实现：
1.导入分布式爬虫类
2.修改爬虫类的继承
3.注释start_urls，定义redis_key
4.把scrapy-redis官方示例的配置信息，粘贴到爬虫项目的配置文件中
5.代码启动后，在redis数据库中，添加redis_key，并指定起始的url

"""
from scrapy_redis.spiders import RedisCrawlSpider, RedisSpider


class ZhiyoujiCrawlSpider(RedisCrawlSpider):
    # 数据源、采集时间、企业名称、公司性质、行业分类、企业简介
    name = 'zhiyouji_crawl_redis'
    allowed_domains = ['jobui.com']
    # start_urls = ['https://www.jobui.com/rank/company/view/beijing/?n=1']

    redis_key = 'zhiyouji'

    rules = (
        # 列表页，需要翻页操作，不需要提取数据
        Rule(LinkExtractor(allow=r'rank/company/view/beijing/\?n=\d+'), follow=True),
        # 详情页，不需要翻页，需要提取数据
        # https://www.jobui.com/company/16731745/
        Rule(LinkExtractor(allow=r'/company/\d+/$'), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        # print(response.url)
        # 实例化item
        item = ZhiyoujiItem()
        item['data_source'] = response.request.url
        item['time_stamp'] = int(time.time() * 1000)
        item['company_name'] = response.xpath('//*[@id="cmp-intro"]/div/div[2]/div[1]/div[4]/text()').extract_first()
        item['nature'] = response.xpath('//div[@class="company-nature"]/text()').extract_first()
        item['industry'] = response.xpath('//*[@id="cmp-intro"]/div/div[2]/div[1]/div[3]/span/a/text()').extract_first()
        item['intro'] = response.xpath('//*[@id="textShowMore"]/text()').extract_first()
        # print('item=',item)
        yield item

