# -*- coding: utf-8 -*-
import scrapy
from scrapy.spider import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
from spider_CSDNCourse.items import CourseUrlItem


class CourseSpiderSpider(CrawlSpider):
    name = 'master'
    allowed_domains = ['edu.csdn.net']
    start_urls = ['https://edu.csdn.net/courses/k/p1']

    item=CourseUrlItem()

    #Rule是在定义抽取链接的规则
    rules = (
        Rule(LinkExtractor(allow=('https://edu.csdn.net/courses/k/p[0-9]+',)),
            callback='parse_page',follow=True),
    )

    def parse_page(self,response):
        '''解析当前页面课程列表'''
        courselist=response.css("div.course_html .course_item")
        # 循环获取每个课程详情链接
        for course in courselist:
            detailUrl=""
            detailUrls=course.css("a::attr(href)") # 课程详情链接
            if detailUrls:
                detailUrl=detailUrls.extract_first()
            item=self.item
            item["url"]=detailUrl
            print("课程地址：",item)
            yield item
