# -*- coding: utf-8 -*-
import json
import re

import scrapy
from ..items import *

# from scrapy.spiders import CrawlSpider, Rule
# from scrapy.linkextractors import LinkExtractor

def convert_int(s):
    if s:
        return int(s.replace(',', ""))
    return 0
ci = convert_int

# cookies
cookies = {
    "Authorization": "6573616B8714C4A648714C45418714CBEE18714CCDBF98342592"
}

class DicoverySpider(scrapy.Spider):
    name = 'discovery'
    allowed_domains = ['xinpianchang.com', "openapi-vtom.vmovier.com"]
    start_urls = ['http://www.xinpianchang.com/channel/index/sort-like']

    # rules = [
    #     Rule(
    #         LinkExtractor(
    #             allow=('page\-\d+',),
    #             restrict_xpaths=('//div[@class="page-wrap"]',)
    #         ),
    #         callback='parse_item',
    #         follow=True,
    #     )
    # ]


    # 重写start_requests
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url, dont_filter=True, cookies=cookies)

    # 列表页
    def parse(self, response):

        # 列表页面的每个视频
        post_list = response.xpath('//ul[@class="video-list"]/li')
        for post in post_list:
            pid = post.xpath('./@data-articleid').extract_first()  # 视频id
            thumbnail = post.xpath('./a/img/@_src').get()  # 视频缩略图
            # print("thumbnail:", thumbnail)

            # 视频详情
            url = "http://www.xinpianchang.com/a%s?from=ArticleList" % pid
            request = scrapy.Request(url=url, callback=self.parse_post)
            # 传入pid和缩略图
            request.meta['pid'] = pid
            request.meta['thumbnail'] = thumbnail
            yield request


        # 爬取其他页码的数据
        # page_urls = response.xpath('//div[@class="page-wrap"]//a/@href').extract()
        # for page_url in page_urls:
        #     print("page：", page_url)
        #     yield scrapy.Request(url=page_url, cookies=cookies)


    # 视频详情页
    def parse_post(self, response):
        # 视频id
        pid = response.meta['pid']
        # 缩略图
        thumbnail = response.meta['thumbnail']
        # 标题
        title = response.xpath('//div[@class="title-wrap"]/h3/text()').get()
        # 分类
        cates = response.xpath('//span[@class="cate v-center"]/a/text()').extract()
        category = '-'.join([cate.strip() for cate in cates])
        # 创建时间
        created_at = response.xpath('//span[contains(@class, "update-time")]/i/text()').get()
        # 播放次数
        play_counts = response.xpath('//i[@data-curplaycounts]/@data-curplaycounts').get()
        # 喜欢次数
        like_counts = response.xpath('//span[@data-counts]/@data-counts').get()
        # 描述
        description = response.xpath('//p[contains(@class, "desc")]/text()').get()
        if description:
            description = description.strip()
        else:
            description = ""

        post_item = PostItem()
        post_item['pid'] = pid
        post_item['thumbnail'] = thumbnail
        post_item['title'] = title
        post_item['category'] = category
        post_item['created_at'] = created_at
        post_item['play_counts'] = play_counts
        post_item['like_counts'] = like_counts
        post_item['description'] = description

        # 视频资源video
        vid, = re.findall('vid: \"(.+?)\"', response.text)
        url = "https://openapi-vtom.vmovier.com/v3/video/%s?expand=resource,resource_origin?" % vid
        request = scrapy.Request(url=url, callback=self.parse_video)
        request.meta['post_item'] = post_item
        yield request

        # 评论
        comment_url = "http://www.xinpianchang.com/article/filmplay/ts-getCommentApi?id=%s&ajax=0&page=1" % pid
        request = scrapy.Request(url=comment_url, callback=self.parse_comment)
        yield request

        # 作者
        composer_list = response.xpath('//div[@class="filmplay-creator right-section"]/ul[@class="creator-list"]/li')
        print("composer_list:", len(composer_list))

        for composer in composer_list:
            c_id = composer.xpath('./a/@data-userid').get()
            url = "http://www.xinpianchang.com/u%s?from=articleList" % c_id
            request = scrapy.Request(url=url, callback=self.parse_composer)
            request.meta['c_id'] = c_id
            yield request

            # 每一个作者
            copyright_item = CopyrightItem()
            # print("pid:", pid)
            # print("c_id:", c_id)
            copyright_item['pcid'] = "%s_%s" % (pid, c_id)
            copyright_item['pid'] = pid
            copyright_item['cid'] = c_id
            copyright_item['roles'] = composer.xpath('.//*[contains(@class, "roles")]/text()').get()
            yield copyright_item



    # 视频资源video
    def parse_video(self, response):
        res = json.loads(response.text)
        # print(res)

        post_item = response.meta['post_item']
        # 视频预览图
        post_item['preview'] = res['data']['video']['cover']
        # 视频链接
        post_item['video'] = res['data']['resource']['default']['url']
        # 视频格式
        post_item['video_format'] = res['data']['resource']['default']['profile_code']
        # 视频时长
        post_item['duration'] = res['data']['resource']['default']['duration']

        yield post_item

    # 评论数据
    def parse_comment(self, response):
        res = json.loads(response.text)
        comment_list = res['data']['list']
        for comment in comment_list:

            comment_item = CommentItem()
            comment_item["commentid"] = comment['commentid']
            comment_item["pid"] = comment['articleid']
            comment_item["cid"] = comment['userInfo']["userid"]
            comment_item["avatar"] = comment['userInfo']["face"]
            comment_item["uname"] = comment['userInfo']["username"]
            comment_item["created_at"] = comment['addtime_int']
            comment_item["content"] = comment['content']
            comment_item["like_counts"] = comment['count_approve']
            comment_item["reply"] = comment['reply']
            if comment_item["reply"]:
                comment_item["reply"] = comment_item["reply"]["commentid"]
            else:
                comment_item["reply"] = 0
            yield comment_item

        next_page = res['data']["next_page_url"]
        if next_page:
            print("next_page:", next_page)
            yield scrapy.Request(url=next_page, callback=self.parse_comment)
            # yield response.follow(url=next_page, callback=self.parse_comment)

    # 作者
    def parse_composer(self, response):
        # 创作者id,表主键
        cid = response.meta['c_id']
        # 用户主页banner图片'
        banner = response.xpath('//div[@class="banner-wrap"]/@style').get()
        banner = re.findall('background-image:url\((.+?)\)', banner)[0]
        # '用户头像'
        avatar = response.xpath('//span[@class="avator-wrap-s"]/img/@src').get()
        # '是否加V'
        verified = response.xpath('//span[@class="avator-wrap-s"]/span[contains(@class, "author-v")]').get()
        verified = 'yes' if verified else "no"
        # '名字'
        name = response.xpath('//div[@class="creator-info"]/p[1]/text()').get()
        # '自我介绍'
        intro = response.xpath('//div[@class="creator-info"]/p[2]/text()').get()

        # '被点赞次数' 人气
        like_counts = ci(response.xpath('//div[contains(@class,"creator-detail")]/span[1]/span[2]/text()').get())
        # '被关注数量',
        fans_counts = ci(response.xpath('//div[contains(@class,"creator-detail")]/span[2]/span[2]/text()').get())
        # '关注数量',
        follow_counts = ci(response.xpath('//div[contains(@class,"creator-detail")]/span[3]/span[2]/text()').get())

        # '所在位置',
        location = response.xpath('//div[contains(@class,"creator-detail")]/span[5]/text()').get()
        location = location if location else ""

        # '职业',
        career = response.xpath('//div[contains(@class,"creator-detail")]/span[7]/text()').get()
        career = career if career else ""

        composer_item = ComposerItem()
        composer_item['cid'] = cid
        composer_item['banner'] = banner
        composer_item['avatar'] = avatar
        composer_item['verified'] = verified
        composer_item['name'] = name
        composer_item['intro'] = intro
        composer_item['like_counts'] = like_counts
        composer_item['fans_counts'] = fans_counts
        composer_item['follow_counts'] = follow_counts
        composer_item['location'] = location
        composer_item['career'] = career
        yield composer_item













