# coding=utf-8

import scrapy
from scrapy.loader import ItemLoader
from chinahr.items import JobInfoItem, ComInfoItem
from scrapyluke.processors import *

class ChinahrSpider(scrapy.Spider):

    name = 'campus'
    allowed_domains = ['campus.chinahr.com']
    start_urls = ['http://campus.chinahr.com/sou/?Type=Search&KeyWord=&JobLocation=&JobDateTime=&PageSize=80']

    def parse(self, response):
        # print response.body.decode('gbk')
        # print(response.xpath('//a[@id="PagerBotomm_TotalPage"and@class="total"]/text()').extract()[0])
        maxPageNumStr = ''.join(response.xpath('//a[@id="PagerBotomm_TotalPage"and@class="total"]/text()').extract())
        #onePage = ''.join(response.xpath('//a[@id="PagerBotomm_CurrentPage"]/text()').extract()).strip()
        urls = ['%s&PageIndex=%s'% (response.url, str(n+1)) for n in range(int(maxPageNumStr))]
        for url in urls:
            # print(url)
            yield scrapy.Request(url, callback=self.parse_jobinfo)

    def parse_jobinfo(self, response):
        loader = ItemLoader(item=JobInfoItem(), response=response)
        loader.add_value('url', value=response.url)
        #print response.body
        job_name = response.xpath(u'//table[@id="jobView"]//a[@title="单击查看该职位详情"]/text()').extract()
        #loader.add_value('job_name', value=job_name[0].strip())
        for job in job_name:
            print job.decode('utf-8')
        # print(response.xpath('//table[@id="jobView"]//a[@title="µ¥»÷²é¿ŽžÃ¹«ËŸÖ°Î»ÏêÇé"]/text()').extract()[0])
        # print(response.xpath('//span[@id="jobView_ctl02_OccupationCategory1_lblText"]/text()').extract()[0])
        # print(response.xpath('//span[@id="jobView_ctl02_JobLocation1_lblText"]/text()').extract()[0])
        # print(response.xpath('//span[@id="jobView_ctl02_Label1"]/text()"]/text()').extract()[0])
        # loader.add_xpath('job_name', '//table[@id="jobView"]//a[@title="µ¥»÷²é¿ŽžÃÖ°Î»ÏêÇé"]/text()', TakeFirstL())
        return loader.load_item()