# -*- coding: utf-8 -*-
import scrapy


class TencentSpider(scrapy.Spider):
    name = 'tencent'  # 爬虫名
    allowed_domains = ['hr.tencent.com']  # 可爬范围
    start_urls = ['http://hr.tencent.com/position.php?&start=#a0']  # 开始抓取url

    def parse(self, response):
        # 先分组，再提取当前页数据
        tr_list = response.xpath("//table[@class='tablelist']/tr")[1: -1]
        for tr in tr_list:
            item = {}
            # extract_first()   返回列表中的第一个字符串，列表为空没有返回None
            # extract()    返回一个包含有字符串的列表
            item["position_name"] = tr.xpath("./td[1]/a/text()").extract_first()
            item["position_href"] = tr.xpath("./td[1]/a/@href").extract_first()
            item["position_cate"] = tr.xpath("./td[2]/text()").extract_first()
            item["need_num"] = tr.xpath("./td[3]/text()").extract_first()
            item["location"] = tr.xpath("./td[4]/text()").extract_first()
            item["publish_date"] = tr.xpath("./td[5]/text()").extract_first()
            yield item

        # 下一页
        next_url = response.xpath("//a[@id='next']/@href").extract_first()
        if next_url != "javascript:;":
        #     next_url = "https://hr.tencent.com/" + next_url
        #     yield scrapy.Request(
        #         next_url,
        #         callback=self.parse
        #     )
        # yield 能够传递的对象只能是：BaseItem,Request,dict,None
        # return 则结束了循环

            # 或使response.follow()
            yield response.follow(next_url, callback=self.parse)


