import scrapy
import hashlib
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from ..items import MoocCourseItem


class ImoocSpider(scrapy.Spider):
    name = 'imooc'
    allowed_domains = ['imooc.com']
    start_urls = ['https://imooc.com/']

    def parse(self, response):
        res_text = response.text
        soup = BeautifulSoup(res_text, 'html5lib')
        nav_items = soup.find(id='header').find(class_='nav-item').find_all('a')
        for nav_item in nav_items:
            item_title = ''.join(nav_item.stripped_strings)
            item_url = urljoin(self.start_urls[0], nav_item['href'])
            # print('1', item_title, '1', item_url)
            if item_title == '实战课程':
                yield scrapy.Request(url=item_url, callback=self.coding_course)
                pass
            elif item_title == '金职位':
                yield scrapy.Request(url=item_url, callback=self.class_course)
                pass
            elif item_title == '专栏':
                yield scrapy.Request(url=item_url, callback=self.read_course)

    def coding_course(self, response):
        """
        实战课程
        :param response:
        :return:
        """
        mooc_item = MoocCourseItem()
        res_text = response.text
        soup = BeautifulSoup(res_text, 'html5lib')
        course_list = soup.find_all(class_='shizhan-course-wrap')
        for course in course_list:
            mooc_item['course_title'] = course.find(class_='shizan-name').string
            mooc_item['course_url'] = urljoin(response.url, course.find('a')['href'])
            mooc_item['course_sale_screenshot'] = hashlib.md5(mooc_item['course_url'].encode()).hexdigest()
            course_img = course.find(class_='shizhan-course-img')['src']
            mooc_item['course_img_url'] = urljoin(response.url, course_img)
            if course.find(class_='course-card-price'):
                mooc_item['course_price'] = course.find(class_='course-card-price').string
            elif course.find(class_='cost-price'):
                mooc_item['course_price'] = course.find(class_='cost-price').string
            else:
                mooc_item['course_price'] = '0.0'
            teacher_header = course.find(class_='lecturer-info').find('img')['src']

            mooc_item['course_teacher_name'] = course.find(class_='lecturer-info').find('span').string
            mooc_item['course_teacher_header_url'] = urljoin(response.url, teacher_header)

            yield mooc_item
        next_page = soup.find('a', text='下一页')
        if next_page:
            next_page_url = urljoin(response.url, next_page['href'])
            yield scrapy.Request(url=next_page_url, callback=self.coding_course)

    def class_course(self, response):
        """
        金职位
        :param response:
        :return:
        """
        mooc_item = MoocCourseItem()
        res_text = response.text
        soup = BeautifulSoup(res_text, 'html5lib')
        course_list = soup.find_all('div', class_='js-show-num')
        # print(course_list)
        for course in course_list:
            # print(course)
            mooc_item['course_title'] = ''.join(course.find('a', class_='title').stripped_strings)
            mooc_item['course_url'] = urljoin(response.url, course['data-url'])
            course_img = course.find('div', class_='img-con')['style'].replace('background-image:url(', '').replace(')',
                                                                                                                    '')
            mooc_item['course_img_url'] = urljoin(response.url, course_img)
            mooc_item['course_sale_screenshot'] = hashlib.md5(mooc_item['course_url'].encode()).hexdigest()
            mooc_item['course_price'] = course.find(class_='old-price').string
            mooc_item['course_teacher_name'] = ''
            mooc_item['course_teacher_header_url'] = ''
            yield mooc_item

    def read_course(self, response):
        """
        专栏
        :param response:
        :return:
        """

        meta = response.meta
        if not meta.get('page'):
            page = 0
        else:
            page = meta.get('page')

        if page == 0:
            url = "https://www.imooc.com/read/list?page=" + str(page + 1) + "&mark=1"
            yield scrapy.Request(url=url, meta={
                'page': page + 1
            }, callback=self.read_course)
        else:
            res_text = response.text
            if res_text.find('没有数据了') == -1:
                soup = BeautifulSoup(res_text)
                read_list = soup.find_all('li')
                mooc_item = MoocCourseItem()
                for r in read_list:
                    mooc_item['course_title'] = r.find('p', class_='title').string
                    mooc_item['course_url'] = urljoin(response.url, r.find('a')['href'])
                    mooc_item['course_sale_screenshot'] = hashlib.md5(mooc_item['course_url'].encode()).hexdigest()
                    course_img_url = r.find('div', class_='img')['style'].replace('background-image:url(', '').replace(
                        ')', '')
                    mooc_item['course_img_url'] = urljoin(response.url, course_img_url)

                    mooc_item['course_teacher_name'] = r.find('span', class_='author-name').string
                    mooc_item['course_teacher_header_url'] = r.find('div', class_='author-img')['style'].replace(
                        'background-image:url(',
                        '').replace(')', '')
                    mooc_item['course_price'] = r.find('p', class_='sale').string
                    yield mooc_item
                url = "https://www.imooc.com/read/list?page=" + str(page + 1) + "&mark=1"
                yield scrapy.Request(url=url, meta={
                    'page': page + 1
                }, callback=self.read_course)
