# -*- coding: utf-8 -*-
import scrapy

from ..items import UrlItem


class WhuCsSpider(scrapy.Spider):
    name = "whu_mtwd"
    base_url = "http://news.whu.edu.cn/mtwd/{0}.htm"
    root_domain = "http://news.whu.edu.cn/"

    max_page = 1
    pages = 1
    isFirst = True

    custom_settings = {
        'ITEM_PIPELINES': {
            'whucs.pipelines.MtwdUrlPipeline': 300
        }
    }

    def start_requests(self):
        yield scrapy.Request('http://news.whu.edu.cn/mtwd.htm', callback=self.parse)

    def parse(self, response):
        if self.isFirst:
            self.max_page = int(response.xpath("//td[@align='left']//a[@class='Next'][1]/@href")
                                .re_first(r"mtwd/(\d+)\.htm")) + 1
            self.isFirst = False
        self.pages += 1

        links = response.xpath("//div[@class='page_index_left']//div[@class='list']//div[@class='infotitle']/a")
        for link in links:
            href = link.xpath("@href").extract_first()
            if href.startswith('..'):
                href = href.split('/', 1)[1]
            if href.startswith('http'):
                href = None
            else:
                href = self.root_domain + href
            print href
            if href is not None:
                yield UrlItem(url=href)

        if self.pages <= self.max_page:
            yield scrapy.Request(self.base_url.format(self.max_page - self.pages + 1), callback=self.parse)