# -*- coding: utf-8 -*-
import scrapy
import json
import random

from ..items import CategoryItem,TeacherItem,ChapterItem,CourseItem,CourseDetailItem

class TzSpider(scrapy.Spider):

    name = 'tz_bk15'
    allowed_domains = ['www.shiguangkey.com']
    start_urls = ['https://www.shiguangkey.com/api/homePage/listCate']

    CourseList_urls = 'https://www.shiguangkey.com/api/courses/getCourseList?pageIndex={}&pageSize=30&cateId={}'
    TeacherList_urls = 'https://www.shiguangkey.com/api/courses/getMainTeacherList?classId={}&courseId={}'
    CourseInfo_urls ='https://www.shiguangkey.com/api/courses/getCourseInfo?courseId={}'
    ChapterInfo_urls = 'https://www.shiguangkey.com/api/courses/listChapterInfo?classId={}'
    base_cover_url  =   ' https://res.shiguangkey.com/'

    pageIndex   = 1
    auto_increment = 0


    def parse(self, response):

        print('= parse = 分类方法 ',type(response.text),response.text)

        dict_data = json.loads(response.body)

        data = dict_data['data']['list']

        data_list = []
        cateId_list = []  # 用于存储 分类id，以便查询课程

        if  dict_data['msg'] == "success" and data != "" :
              print('parse_data =',type(data),data)

              item = CategoryItem()

              def pool_supper(data): # 一级分类

                      print('pool_supper',type(data),data)

                      cateId = int( data['cateId'] )
                      name = data['cateName']
                      children = data['children']

                      item['id'] = int(cateId)
                      item['name'] = name
                      item['parent_id'] = 0

                      data_list.append([item['id'], item['name'], item['parent_id']])
                      cateId_list.append(int(item['id']))

                      if len(children):
                          pool_children(children,cateId)

              def pool_children(data,parent): # 二级、多级 ... 分类

                     for data in data:

                          print('pool_children', type(data), data)

                          cateId =  data['cateId']
                          name = data['cateName']
                          children = data['children']

                          item['id'] = int(cateId)
                          item['name'] = name
                          item['parent_id'] = parent  #int(cateId) - 10

                          data_list.append([item['id'], item['name'], item['parent_id']])
                          cateId_list.append(int(item['id']))

                          if len(children):
                              pool_children(children,cateId)

              if data != "":
                for data in data:
                    print('pool start ')
                    print(type(data),data)
                    pool_supper( data )
                    print('pool end  ')

              if all(cateId_list):
                for data in  data_list:

                    item['id'] = data[0]
                    item['name'] = data[1]
                    item['parent_id'] = data[2]

                    yield item
                    print('category_item = ', item)

                print('cateId_list = ', cateId_list)
                for category_id in cateId_list:

                    print('category_id = {} '.format(category_id))

                    yield scrapy.Request(url=self.CourseList_urls.format(self.pageIndex,category_id),  callback=self.parse_course,  meta={'driver': True,'category_id':int(category_id) }, dont_filter=True)
                    # self.pageIndex += 1


    def parse_course(self, response):

        print('= parse_course = 课程方法 ',type(response.text),response.text)

        category_id = response.meta["category_id"]
        node_list = response.xpath("//pre/text()").extract_first()
        str_dict = eval(node_list)
        data_list = str_dict['data']['list']

        if len(data_list):

            for data in data_list:

                context = []
                item = CourseItem()

                auditions = data['auditions']
                qqs = data['qqs']
                quality = data['quality']

                cover = data['cover']
                intro = data['descrption']
                title = data['title']
                price = data['price']
                id = data['id']
                classId = data['classId']
                context.append([classId ,id ,auditions,intro,title  ,quality ,cover ,qqs ,price  ])

                item['title'] = title
                item['price'] = int(price)
                item['intro'] = intro
                item['cover_url'] = self.base_cover_url + cover
                item['content'] = ''
                item['video_url'] = ''
                item['duration'] = random.randint(1,100)

                self.auto_increment += 1                         # content  video_url duration
                item['id'] = int( self.auto_increment )
                item['category_id'] = int(category_id)
                item['chapter_id'] = int( self.auto_increment ) #   self.chapter_id
                item['teacher_id'] = int( self.auto_increment ) #    self.teacher_id
                item['course_detail_id'] = int( self.auto_increment ) #    self.course_detail_id

                yield scrapy.Request(url=self.TeacherList_urls.format(classId,id),  callback=self.parse_teacher,  meta={'driver': True, 'teacher_id': self.auto_increment }, dont_filter=True)  # classId    courseId

                yield scrapy.Request(url=self.CourseInfo_urls.format(id),  callback=self.parse_courseInfo,  meta={'driver': True, 'course_detail_id': self.auto_increment }, dont_filter=True)  # courseId

                yield scrapy.Request(url=self.ChapterInfo_urls.format(classId), callback=self.parse_chapterInfo,  meta={'driver': True, 'chapter_id': self.auto_increment },  dont_filter=True)  # classId

                print('课程item={}'.format(item) )
                yield item


                # context.append(cateId)
                if all(context):

                    for i in context:
                        self.classId = i[0]
                        self.courseId = i[1]

                        print('context_course = ', i)
                        print('classId = {}, courseId= {}'.format(self.classId, self.courseId))




    def parse_teacher(self, response):

        print('= parse_teacher = 老师方法 ',type(response.text),response.text)

        teacher_id = response.meta["teacher_id"]

        node_list = response.xpath("//pre/text()").extract_first()
        str_dict = eval(node_list)
        data_list = str_dict['data']['list']

        if len(data_list):

            context = []
            for data in data_list:

                item = TeacherItem()

                userId = data['userId']
                nick = data['nick']
                headImg = data['headImg']
                intro = data['intro']

                context.append([userId,nick,headImg,intro])

                item['id'] = teacher_id
                item['name'] = nick
                item['avatar_url'] = headImg
                item['profile'] = intro
                item['positional_title'] = '讲师'

                # self.teacher_id = userId
                #   video_url content  duration  chapter_id
                print('老师item: {}'.format(item))

                yield item

            if all(context):
                for i in context:
                    print('context_teacher = ',i)



    def parse_courseInfo(self,response):
        """
        """
        print('详情_courseInfo')

        course_detail_id = response.meta["course_detail_id"]

        node_list = response.xpath("//pre/text()").extract_first()
        str_dict = eval(node_list)
        data_list = str_dict['data']

        if len(data_list) :

            content_courseInfo = []

            intro = data_list['descrption']
            title = data_list['title']
            video_url = data_list['coverVideo']
            content = data_list['content']
            cover_url = data_list['cover']
            price = data_list['price']
            id = data_list['id']

            content_courseInfo.append([  id,intro,  title,video_url,content,cover_url,price ])

            item = CourseDetailItem()

            item['id'] = course_detail_id
            item['title'] = title
            item['intro'] = intro
            item['cover_url'] = self.base_cover_url + cover_url
            item['content'] = content
            item['video_url'] = video_url
            item['price'] = price
            item['duration'] = random.randint(1,100)
            # id, title,intro, cover_url,content,video_url,price,duration

            print('详情 ={}\n',item)
            yield item

            print('content_courseInfo =',content_courseInfo)


    def parse_chapterInfo(self,response):
        """
        """
        print('章节_chapterInfo')

        chapter_id = response.meta["chapter_id"]

        node_list = response.xpath("//pre/text()").extract_first()
        str_dict = eval(node_list)
        data_list = str_dict['data']['list']

        if len(data_list):

            item = ChapterItem()

            context = []
            for data in data_list:

                id = data['id']
                title = data['title']
                courseId = data['courseId']
                classId = data['classId']
                name = data['name']
                startTime = data['startTime']
                endTime = data['endTime']
                liveStatus = data['liveStatus']
                flowerCount = data['flowerCount']
                type = data['type']
                videoList = data['videoList']

                context.append([id,title,courseId,classId, name, startTime,endTime ,liveStatus ,flowerCount , type,videoList])

                item['id'] = chapter_id  # 一个课程里面有多个章节的数据，但是章节数据很多，所以他们是多对多关系；id值应该写在外面;
                item['name'] = name
                item['title'] = title

                print('章节item =',item)

                yield item

            if all(context):
                for i in context:
                    print('context_chapterInfo = ',i)

