# -*- coding: utf-8 -*-
import scrapy

from ..items import UrlItem


class WhuCsSpider(scrapy.Spider):
    name = "whu-cs"

    base_url = "http://cs.whu.edu.cn/a/xueshujiangzuofabu/list_39_{0}.html"
    detail_base_url = "http://cs.whu.edu.cn/{0}"

    first = True
    page_total = 0
    page_now = 1

    custom_settings = {
        'ITEM_PIPELINES': {
            'whucs.pipelines.LectureUrlPipeline': 300
        }
    }

    def start_requests(self):
        start_url = self.base_url.format(self.page_now)

        yield scrapy.Request(start_url, callback=self.parse)

    def parse(self, response):

        print "正在爬取第{0}页...".format(self.page_now)

        # 统计总页数
        if self.first:
            self.first = False
            self.page_total = int(response.xpath("//*[@id='container']/div/ul/li[7]/span/strong[1]/text()").extract_first())
            print self.page_total

        # 解析列表
        dds = response.xpath("//*[@id='container']/dl/dd")
        for dd in dds:
            url = dd.xpath("a/@href").extract_first()
            item = UrlItem()
            item['url'] = self.detail_base_url.format(url)
            print item['url']
            yield item

        self.page_now += 1
        # 判断是否爬取结束
        if self.page_now <= self.page_total:
            next_url = self.base_url.format(self.page_now)
            yield scrapy.Request(next_url, callback=self.parse)
