# -*- coding: utf-8 -*-
import scrapy
from Tencent.items import TencentItem

class TencentSpider(scrapy.Spider):
    name = 'tencent'
    allowed_domains = ['tencent.com']
    start_urls = ['http://hr.tencent.com/position.php']

    def parse(self, response):
        # print(response)
        node_list = response.xpath('//tr[@class="even"]|//tr[@class="odd"]')

        # print(len(node_list))

        for node in node_list:
            item = TencentItem()
            item['job_name'] = node.xpath('./td[1]/a/text()').extract()[0]
            item['detail_link'] = 'http://hr.tencent.com/' + node.xpath('./td[1]/a/@href').extract()[0]
            item['job_type'] = node.xpath('./td[2]/text()').extract_first()[0]
            item['number'] = node.xpath('./td[3]/text()').extract()[0]
            item['address'] = node.xpath('./td[4]/text()').extract()[0]
            item['pub_time'] = node.xpath('./td[5]/text()').extract()[0]
            yield item

        try:
            next_url = 'http://hr.tencent.com/' + response.xpath('//a[@id="next"]/@href').extract()[0]
            yield scrapy.Request(next_url, callback=self.parse)
        except:
            pass

        # try:
        #     # 获取下一页url
        #     next_url = 'http://hr.tencent.com/' + response.xpath('//a[@id="next"]/@href').extract()[0]
        #     # 将url做成请求，返回给引擎
        #     yield scrapy.Request(next_url, callback=self.parse)
        # except:
        #     pass

