# -*- coding: utf-8 -*-
import scrapy
from csdn.items import CsdnItem


# 可在settings中开启，开启后命令行界面将不再显示信息，信息将全部保存到日志文件中
# import logging  # 日志


class CourseSpider(scrapy.Spider):
    name = 'course'
    allowed_domains = ['edu.csdn.net']
    start_urls = ['https://edu.csdn.net/courses/k']  # 开始url
    page = 1  # 页码
    num_page = 0  # 总页数

    def parse(self, response):
        """爬取课程URL以及下一页URL"""
        for item in response.css('.course_item'):
            course = item.css('a::attr(href)').extract_first()  # 获取课程详情url
            print('课程详情页。URL：', course)
            set_meal = item.css('span.stag::text').extract_first()
            if set_meal is None:  # 将套餐课排除
                course = response.urljoin(course)
                yield scrapy.Request(course, callback=self.parse_course)  # 标准课程详情请求
            else:
                print('套餐课程，主动移除。URL：', course)
        if self.num_page == 0:
            self.num_page = response.css('.page-nav a:nth-child(9)::text').get()
            print('总页数：', self.num_page)
        self.page += 1
        if self.page < int(self.num_page):
            next_page = response.css('a.btn-next::attr(href)').get()
            # next_page = response.selector.re_first('<a href="(.*?)" class="btn btn-xs btn-default btn-next"></a>')
            print('第 {} 页。URL：'.format(self.page), next_page)
            yield scrapy.Request(next_page, callback=self.parse)  # 下一页请求

    def parse_course(self, response):
        """爬取课程详情信息"""
        items = CsdnItem()  # 项目模型类实例化
        outline = {}  # 课程大纲
        items['title'] = response.css('.info_right h1::text').get()  # 课程标题
        items['teacher'] = response.css('.professor_header a img::attr(title)').get()  # 教师信息
        items['pic'] = response.css('.info_left a img::attr(src)').get()  # 图片url
        items['number'] = response.css('.pr .info_right span::text').get()  # 已观看人数
        items['price'] = response.css('.sale .price_wrap sapn::text').get()  # 价格
        for item in response.css('.outline_list.J_accordion_box'):  # 提取课程列表信息
            # 依次提取章节，小节信息，其中章节信息为一个字符串，小节信息为一个列表，，并将数据的换行符与空格替换掉（字符串方法replace）
            dt = item.css('.J_accordion_btn span::text').get().replace('\n', '').replace(' ', '')
            li = item.css('.section_name a span::text').getall()
            for v in range(len(li)):  # 循环对小节信息进行处理
                li[v] = li[v].replace('\n', '').replace(' ', '')
            outline[dt] = li  # 构造章节完整信息字典
        items['outline'] = outline
        print('调试信息：课程标题：', items['title'], '价格：', items['price'])
        yield items
