# tencent.py
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from TencntSpider.items import TencentspiderItem
from TencntSpider.items import PositionDescribe


class TencentSpider(CrawlSpider):
    name = 'tencent'
    allowed_domains = ['hr.tencent.com']
    start_urls = ['https://hr.tencent.com/position.php?&start=0#a']

    rules = (
        Rule(LinkExtractor(allow=r'&start=\d+'), callback='tencentParse', follow=True),
        Rule(LinkExtractor(allow=r'/position_detail.php?'), callback='positionParse', follow=True)
    )

    def tencentParse(self, response):
        jobs_list = response.xpath('//tr[@class="even"or@class="odd"]')

        for node in jobs_list:
            item = TencentspiderItem()
            name = node.xpath('./td[1]/a/text()').extract()[0]
            link = node.xpath('./td[1]/a/@href').extract()[0]
            type = ''.join(node.xpath('./td[2]/text()').extract())
            num = node.xpath('./td[3]/text()').extract()[0]
            location = node.xpath('./td[4]/text()').extract()[0]
            date = node.xpath('./td[5]/text()').extract()[0]
            item['positionName'] = name
            item['positionLink'] = 'https://hr.tencent.com/' + str(link)
            item['positionType'] = type
            item['peopleNum'] = num
            item['workLocation'] = location
            item['publishTime'] = date

            yield item

    def positionParse(self, response):
        item = PositionDescribe()
        name = response.xpath('//td[@id="sharetitle"]/text()').extract()
        location = response.xpath('//tr[@class="c bottomline"]/td[1]/text()').extract()
        type = response.xpath('//tr[@class="c bottomline"]/td[2]/text()').extract()
        num = response.xpath('//tr[@class="c bottomline"]/td[3]/text()').extract()
        s = ''
        duties = response.xpath('//table//tr[3]//ul/li/text()').extract()
        for duty in duties:
            s += duty
        requirements = response.xpath('//table//tr[4]//ul/li/text()').extract()
        q = '' 
        for require in requirements:
            q += require

        # 职位名
        item['positionName'] = name
        # 职位类别
        item['positionType'] = type
        # 招聘人数
        item['peopleNum'] = num
        # 工作地点
        item['workLocation'] = location
        # 职责
        item['duty'] = s
        # 要求
        item['requirement'] = q

        yield item
