# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from urllib import parse
from ..items import *
from datetime import datetime
import json


def now_date_time():
    return datetime.now().strftime('%Y-%m-%d %H:%M:%S')


class ImoocFreeSpider(scrapy.Spider):
    name = 'imooc_free'
    allowed_domains = ['imooc.com']
    start_urls = ['https://www.imooc.com/course/list']

    def parse(self, response):

        """
        解析方向
        :param response:
        :return:
        """
        soup = BeautifulSoup(response.body, 'html5lib')
        for imooc_dir in soup.find_all(class_='course-nav-row')[0].find_all('a'):
            if imooc_dir.text != '全部':
                imooc_direct_item = ImoocFreeDirectItem()
                imooc_direct_item['direct_name'] = imooc_dir.text
                imooc_direct_item['direct_code'] = imooc_dir['data-ct']
                imooc_direct_item['direct_url'] = parse.urljoin(response.url, imooc_dir['href'])
                imooc_direct_item['created_at'] = now_date_time()
                yield imooc_direct_item
                yield scrapy.Request(url=imooc_direct_item['direct_url'],
                                     meta={'direct_code': imooc_direct_item['direct_code']}, callback=self.parse_type)

    def parse_type(self, response):
        """
        解析分类
        :param response:
        :return:
        """
        metas = response.meta
        direct_code = metas['direct_code']
        soup = BeautifulSoup(response.body, 'html5lib')
        for imooc_type in soup.find_all(class_='course-nav-row')[1].find_all('a'):
            if imooc_type.text != '全部':
                imooc_type_item = ImoocFreeTypeItem()
                imooc_type_item['type_id'] = int(imooc_type['data-id'])
                imooc_type_item['type_name'] = imooc_type.text
                imooc_type_item['type_url'] = parse.urljoin(response.url, imooc_type['href'])
                imooc_type_item['direct_code'] = direct_code
                imooc_type_item['created_at'] = now_date_time()
                yield imooc_type_item
                yield scrapy.Request(url=imooc_type_item['type_url'],
                                     meta={'type_id': imooc_type_item['type_id']},
                                     callback=self.parse_course_list)

    def parse_course_list(self, response):
        metas = response.meta
        type_id = metas['type_id']
        soup = BeautifulSoup(response.body, 'html5lib')
        for course in soup.find_all(class_='course-card-container'):
            imooc_course_list = ImoocFreeCourseListItem()
            imooc_course_list['type_id'] = type_id
            imooc_course_list['course_name'] = course.find('h3').text
            imooc_course_list['course_id'] = int(course.find('a', class_='course-card')['href'].split('/')[-1])
            tmp_img_url = course.find(class_='course-card-top').find(class_='course-banner')['data-original']
            imooc_course_list['course_img_url'] = parse.urljoin('https://www.imooc.com/', tmp_img_url)
            imooc_course_list['course_card_desc'] = course.find(class_='course-card-desc').text
            tmp_label_list = []
            tmp_label_soup = course.find(class_='course-label')
            if tmp_label_soup:
                for label in tmp_label_soup.find_all('label'):
                    tmp_label_list.append(label.text)
            imooc_course_list['course_label_str'] = ','.join(tmp_label_list)

            # 课程等级
            tmp_card_info = course.find(class_='course-card-info').find_all('span')
            tmp_level = tmp_card_info[0].text
            tmp_level_num = 0
            if tmp_level == '入门':
                tmp_level_num = 1
            elif tmp_level_num == '初级':
                tmp_level_num = 2
            elif tmp_level == '进阶':
                tmp_level_num = 3
            elif tmp_level_num == '高级':
                tmp_level_num = 4
            imooc_course_list['course_level'] = tmp_level_num
            imooc_course_list['course_student_num'] = int(tmp_card_info[1].text)
            # 课程价格
            tmp_course_price = course.find(class_='price').text
            if tmp_course_price.find('￥') != -1:
                imooc_course_list['course_price'] = float(tmp_course_price.replace('￥', ''))
            else:
                imooc_course_list['course_price'] = 0.0
            tmp_cost_price_soup = course.find(class_='cost-price')
            imooc_course_list['course_cost_price'] = 0.0
            if tmp_cost_price_soup:
                imooc_course_list['course_cost_price'] = float(tmp_cost_price_soup.text.replace('￥', ''))

            imooc_course_list['course_url'] = 'https://www.imooc.com/learn/' + str(
                imooc_course_list['course_id'])
            imooc_course_list['created_at'] = now_date_time()
            yield imooc_course_list
            # 下一页
            tmp_next_page = soup.find('a', text='下一页')
            if tmp_next_page:
                next_page_url = parse.urljoin(response.url, tmp_next_page['href'])
                yield scrapy.Request(url=next_page_url, meta={'type_id': type_id}, callback=self.parse_course_list)
            # 课程详情页面
            yield scrapy.Request(url=imooc_course_list['course_url'],
                                 meta={'course_id': imooc_course_list['course_id']},
                                 callback=self.parse_course_info)

    def parse_course_info(self, response):
        metas = response.meta
        course_id = metas['course_id']
        soup = BeautifulSoup(response.body, 'html5lib')
        course_info = ImoocFreeCourseInfoItem()
        course_info['course_id'] = course_id
        course_info['course_description'] = soup.find(class_='course-description').text.strip()
        course_info['course_chapter_info'] = ''
        tmp_chapter = soup.find_all(class_='chapter')
        tmp_chapter_list = []
        for i in range(len(tmp_chapter)):
            tmp_dic = {
                'index': i,
                'title': tmp_chapter[i].find('h3').text.replace('\n', '').strip(),
                'chapter_description': tmp_chapter[i].find(class_='chapter-description').text.replace('\n', '').strip(),
                'chapter_list': []
            }
            # 有的章节只有标题
            tmp_video = tmp_chapter[i].find(class_='video')
            if tmp_video:
                for video in tmp_video.find_all('a'):
                    tmp_chapter_info = {
                        'title': video.text.replace('\n', '').replace('开始学习', '').replace(' ', '').strip(),
                        'url': parse.urljoin(response.url, video['href'])
                    }
                    tmp_dic['chapter_list'].append(tmp_chapter_info)
            tmp_chapter_list.append(tmp_dic)
        course_info['course_chapter_info'] = json.dumps(tmp_chapter_list)
        course_info['created_at'] = now_date_time()
        course_info['teacher_id'] = int(
            soup.find(class_='teacher-info').find('img', class_='js-usercard-dialog')['data-userid'])
        course_info['teacher_url'] = parse.urljoin(response.url, soup.find(class_='teacher-info').find('a')['href'])
        yield course_info
        yield scrapy.Request(url=course_info['teacher_url'], callback=self.parse_teacher_info)

    def parse_teacher_info(self, response):
        teacher_item = ImoocTeacherItem()
        tmp_teacher_info = parse.urlparse(response.url).path.strip('/').split('/')
        teacher_item['teacher_id'] = int(tmp_teacher_info[1])
        teacher_item['teacher_url'] = response.url
        tmp_url_type = tmp_teacher_info[0]
        soup = BeautifulSoup(response.body, 'html5lib')

        if tmp_url_type == 'u':
            teacher_item['teacher_header_img'] = parse.urljoin(response.url,
                                                               soup.find(class_='user-pic-bg').find('img')['src'])
            tmp_teacher_detail = []
            tmp_desc = soup.find(class_='user-desc')
            if tmp_desc:
                teacher_item['teacher_desc'] = tmp_desc.text
            tmp_detail = soup.find(class_='study-info').find_all('div', class_='follows')
            for detail in tmp_detail:
                if detail.find('em'):
                    tmp_dict = {
                        'text': detail.find('span').text,
                        'num': detail.find('em').text
                    }
                    tmp_teacher_detail.append(tmp_dict)
            teacher_item['teacher_detail'] = json.dumps(tmp_teacher_detail)
            teacher_item['teacher_nickname'] = soup.find(class_='user-name').find('span').text
            teacher_item['teacher_professional'] = ''
        elif tmp_url_type == 't':
            teacher_item['teacher_header_img'] = parse.urljoin(response.url,
                                                               soup.find('img', class_='tea-header')['src'])
            teacher_item['teacher_nickname'] = soup.find(class_='tea-nickname').text
            teacher_item['teacher_professional'] = soup.find(class_='tea-professional').text
            tmp_teacher_detail = []
            tmp_detail = soup.find(class_='tea-detail-box').find('ul').find_all('li')
            for detail in tmp_detail:
                tmp_p = detail.find_all('p')
                tmp_dict = {
                    'text': tmp_p[1].text,
                    'num': tmp_p[1].num
                }
                tmp_teacher_detail.append(tmp_dict)
            teacher_item['teacher_detail'] = json.dumps(tmp_teacher_detail)
            tmp_desc = soup.find(class_='tea-desc')
            if tmp_desc:
                teacher_item['teacher_desc'] = soup.find(class_='tea-desc').text
            else:
                teacher_item['teacher_desc'] = ''
        teacher_item['created_at'] = now_date_time()
        yield teacher_item