# -*- coding: utf-8 -*-
import scrapy
from tencent.items import TencentItem

class Tencent02Spider(scrapy.Spider):
    name = 'tencent02'
    allowed_domains = ['hr.tencent.com']
    start_urls = ['http://hr.tencent.com/position.php?&start=0']
    detailPagePrefix = "http://hr.tencent.com/"

    def parse(self, response):

        node_list = response.xpath("//tr[@class='even' or @class='odd']")

        for node in node_list:
            item = TencentItem()
            positionName = node.xpath("./td[1]/a/text()").extract()
            item['positionName'] = positionName[0] if len(positionName)  else ""
            positionLink =  node.xpath("./td[1]/a/@href").extract()
            item['positionLink'] = self.detailPagePrefix + positionLink[0] if len(positionLink)  else ""
            positionType = node.xpath("./td[2]/text()").extract()
            item['positionType'] = positionType[0] if len(positionType)  else ""
            positionNeedNum = node.xpath("./td[3]/text()").extract()
            item['positionNeedNum'] = positionNeedNum[0] if len(positionNeedNum)  else ""
            positionBase =  node.xpath("./td[4]/text()").extract()
            item['positionBase'] = positionBase[0] if len(positionBase)  else ""
            positionPublishTime =  node.xpath("./td[5]/text()").extract()
            item['positionPublishTime'] = positionPublishTime[0]  if len(positionPublishTime)  else ""
            yield item

        nextBtnFreeze  = response.xpath("//a[@id='next' and @class='noactive']")
        if not nextBtnFreeze:
            url = response.xpath("//a[@id='next']/@href").extract()[0]
            self.requestUrl =  self.detailPagePrefix + url
            yield scrapy.Request(self.requestUrl,,callback=self.parse)
        else:
            print("抓取完毕!")
