import scrapy
import re
import csv
import os

from scrapy_test.items import CourseItem,CourseCataLogItem

#将抓取的数据导入到csv（spider版）

# 创建新的爬虫项目
# scrapy startproject xcrapy_test

# 生产spider文件，注意：爬虫名字不要和项目名字重复
# scrapy genspider [爬虫名字] [目标网站域名]
# scrapy genspider myspider www.imooc.com


# 调用方式：命令行执行 scrapy crawl myspider
# myspider为你的spider名称，用来区分不同的spider

#导出为json文件
# scrapy crawl myspider -o myspider.json

#导出为csv文件
# scrapy crawl myspider -o myspider.csv -t csv
# scrapy crawl myspider -o myspider.csv

# 单独运行爬虫文件（不走配置文件）
# scrapy runspider myspider.py


def dispose_url_header(str):
    str = str.replace("//", '')
    if "https://" not in str and len(str) != 0: str = "https://"+str
    return str

#spider爬取
class MyspiderSpider(scrapy.Spider):
    name = "myspider"#每个spider的名字，用来区分不同的spider
    allowed_domains = ["www.imooc.com"]#允许爬取的域名，未定义的会被过滤掉
    start_urls = ["https://www.imooc.com/course/list"]#爬虫的起始地址，可以是多个
    web_url = "https://www.imooc.com"


    def parse(self, response):
        list = response.xpath('//div[@class="course-list"]//a[@class="item free "]')

        # 爬取列表页
        for info in list:
            data = CourseItem()

            data['title'] = info.xpath('@data-title').extract_first()
            data['title2'] = info.xpath('.//p[@class="one"]/text()').extract_first()
            data['course_id'] = info.xpath('@data-cid').extract_first()

            data['course_url'] = dispose_url_header(info.xpath('@href').extract_first())

            style_attr = info.xpath('.//div[@class="img"]/@style').extract_first()
            url_pattern = r'url\((.+?)\)'
            match = re.search(url_pattern, style_attr)

            if match:
                # 提取URL并去除额外的引号和空格
                bg_url = match.group(1).strip().replace("'", '')
                data['bg_url'] = dispose_url_header(bg_url)


            yield response.follow(data['course_url'], callback=self.course_info,meta={
                    "list_data": data,
                })
            # yield data

        #下一页,有下一页采集下一页，没有采集结束
        next_page = response.xpath('//div[@class="course-list"]//div[@class="page"]/a[last()-1]')
        if next_page.xpath('text()').extract_first() == '下一页':
            next_link = next_page.xpath('@href').extract_first()
            # print(next_link)
            yield response.follow(next_link, callback=self.parse)



    def course_info(self, response):

        teacher_info = response.xpath('//div[@class="teacher-info l"]')

        list_data = response.meta['list_data']
        list_data['teacher'] = teacher_info.xpath('.//span[@class="tit"]//a/text()').extract_first()

        list_data['teacher_avatar'] = dispose_url_header(teacher_info.xpath('.//img[@class="js-usercard-dialog"]/@src').extract_first())
        list_data['teacher_url'] = teacher_info.xpath('.//span[@class="tit"]//a/@href').extract_first()
        list_data['content'] = response.xpath('//div[contains(@class,"course-description")]/text()').extract_first()
        yield list_data

        c_list = response.xpath('//div[@class="course-chapters"]//div[contains(@class,"chapter")]')
        chapter_p_id = 0
        for c in c_list:
            chapter_p_id = chapter_p_id + 1
            c_data = CourseCataLogItem()

            c_data['course_id'] = list_data['course_id']
            c_data['chapter_id'] = chapter_p_id
            c_data['chapter_p_id'] = 0
            c_data['title'] = c.xpath('.//h3/text()').extract_first() or ''
            c_data['content'] = c.xpath('.//div[@class="chapter-description"]/text()').extract_first() or ''

            if len(c_data['content'])>0:
                c_data['content'] = c_data['content'].replace(" ", '').replace("\t", '').replace("\n", '')

            if len(c_data['title'])>0:
                c_data['title'] = c_data['title'].replace(" ", '').replace(" ", '').replace("\t", '').replace("\n", '')
                yield c_data

            chapter_id = 0

            video_list = c.xpath('.//ul[@class="video"]//li')
            for v in video_list:
                chapter_id = chapter_id + 1
                c_data2 = CourseCataLogItem()

                title_info = ''.join(v.xpath('.//a//text()').getall())
                title_info = title_info.replace(" ", '').replace("\t", '').replace("\n", '').replace("视频：",'').replace("开始学习", '')
                title = re.sub(r'\([^)]*\)(?!.*\()', '', title_info)

                video_duration = '';
                video_match = re.findall(r'\((.*?)\)', title_info)
                if len(video_match) > 0:
                    video_duration = video_match[-1]

                c_data2['course_id'] = list_data['course_id']
                c_data2['chapter_id'] = chapter_id
                c_data2['chapter_p_id'] = chapter_p_id
                c_data2['title'] = title
                c_data2['video_id'] = v.xpath('.//@data-media-id').extract_first()
                c_data2['video_url'] = v.xpath('.//a/@href').extract_first()
                c_data2['video_duration'] = video_duration
                yield c_data2

