# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from master.items import MasterItem


class CoursesSpider(CrawlSpider):
    name = 'master'
    allowed_domains = ['edu.csdn.net']
    start_urls = ['https://edu.csdn.net/courses/k']
    item = MasterItem()

    # 抽取页码数据
    rules = (
        Rule(LinkExtractor(allow=('https://edu.csdn.net/courses/k/p[0-9]+',)), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        urls = response.css('.course_item a::attr(href)').extract()
        for url in urls:
            # 判断是否为套餐，若是则进入后再进行一次爬取
            if re.search('/combo/detail/', url):
                headers = {
                    'Host': 'edu.csdn.net',
                    'Connection': 'keep-alive',
                    'Accept': 'application/json, text/javascript, */*; q=0.01',
                    'X-Requested-With': 'XMLHttpRequest',
                    'User-Agent': ' Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
                    'X-Tingyun-Id': 'QJMBfdIF52g;r=233173402',
                    'Referer': url,
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Accept-Language': 'zh-CN,zh;q=0.9',
                }
                json = 'https://edu.csdn.net/combo/ajaxGetCourseList?combo_id=' + re.findall('https://edu.csdn.net/combo/detail/([0-9]+)', url)[0]
                yield scrapy.Request(url=json, headers=headers, callback=self.parse_in, dont_filter=True)
            else:
                print(url)
                item = self.item
                item['url'] = url
                yield item

    def parse_in(self, response):
        # 爬取套餐中的课
        urls = re.findall('"url":"(.*?)"', str(response.body))
        for url in urls:
            item = self.item
            item['url'] = re.sub(r'\\+', '', url)
            yield item
