# -*- coding: utf-8 -*-
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from moocspider.items import MoocspiderItem

class ImoocSpider(scrapy.Spider):
    name = 'imooc'
    allowed_domains = ['coding.imooc.com']
    start_urls = ['https://coding.imooc.com/?c=mongodb']

    def parse(self, response):
        moocItemList = response.xpath(
            "//div[@class='w index-main']/div[@class='index-list-wrap']/div[@class='shizhan-course-list clearfix']/div[contains(@class,'shizhan-course-wrap l')]")
        for moocitem in moocItemList:
            cloneitem = MoocspiderItem()
            cloneitem['gurl'] = moocitem.xpath("a/@href").extract_first()
            cloneitem['image'] = moocitem.xpath("a/div/div[@class='box']/div[@class='img-box']/img/@src").extract_first()
            cloneitem['name'] = moocitem.xpath("a/div/div[@class='box']/div[@class='shizhan-intro-box']/p[@class='shizan-name']/text()").extract_first()
            cloneitem['desc'] = moocitem.xpath("a/div/div[@class='box']/div[@class='shizhan-intro-box']/p[@class='shizan-desc']/text()").extract_first()
            yield cloneitem

        next_link = response.xpath("//div[@class='page']//a[contains(text(),'下一页')]/@href").extract()
        if next_link:
            next_link = next_link[0]
            re_next_link = next_link.replace('/','&')
            print(re_next_link)
            yield scrapy.Request("https://coding.imooc.com/?c=mongodb" + re_next_link, callback=self.parse)


if __name__ == '__main__':
    process = CrawlerProcess(get_project_settings())
    process.crawl('imooc')
    process.start()
