# -*- coding: utf-8 -*-
import scrapy

from ..items import DataItem, ContentItem
import hashlib
import lxml.html


class GuetSpider(scrapy.Spider):
    name = "guet"
    allowed_domains = ["job.myclub2.com"]
    start_urls = ['http://job.myclub2.com/Home/ArticleList?label=15']
    prefix_url = 'http://job.myclub2.com'

    def parse(self, response):
        table = None

        if response.url.find('ArticleList') != -1:
            for data in self.process_list(response):
                ty = type(data)

                if ty == DataItem:
                    yield data
                elif ty == str or ty == unicode:
                    detail = response.urljoin(data)
                    yield scrapy.Request(detail, callback=self.parse)
                else:
                    table = data

                    # next_page = None
                    next_page = table.xpath('.//div[@class="pager"]/a[last()-1]')

                    if next_page is not None and next_page.xpath('.//text()').extract_first() == u'下一页':
                        next_page = response.urljoin(next_page.xpath('.//@href').extract_first())
                        yield scrapy.Request(next_page, callback=self.parse)
        else:
            item = ContentItem()

            html = lxml.html.fromstring(response.xpath('.//div[@class="main05"]').extract_first())
            item['content']= html.text_content()
            item['url'] = response.url

            yield item

    def process_list(self, response):
        table = response.css('.container06rightb table tbody')
        for tr in table.xpath('.//tr[position() > 1]'):

            url = tr.xpath('.//a/@href').extract_first()
            name = tr.xpath('.//a').xpath('text()').extract_first()
            publish_date = tr.xpath('.//td[3]/text()').extract_first()

            if url is None or url == '#':
                continue

            item = DataItem()
            item['url'] = self.prefix_url + url
            item['name'] = name
            item['publish_date'] = publish_date
            item['source'] = self.name

            yield url
            yield item
        yield table
