import json
import math

import scrapy
from scrapy import Request

from xpc.itemloaders import XpcWorkLoader
from xpc.items import XpcWorkItem

cookies = dict(
    Authorization='C6DFFBE0609B63B2F609B64C6D609B6B9EE609B6B04475750EB3'
)


class WorksSpider(scrapy.Spider):
    name = 'works'
    allowed_domains = ['xinpianchang.com']
    start_urls = [
        'https://www.xinpianchang.com/discover/article-0-0-all-all-0-0-hot?page=']
    headers = {
        'User-Agent': 'Baiduspider'
    }

    # def __init__(self):
    #     self.pageHead = 'https://www.xinpianchang.com/discover/article-0-0-all-all-0-0-hot?page='

    def start_requests(self):
        return [Request(url=self.start_urls[0], callback=self.parse)]

    def parse(self, response):
        data_str = response.xpath('//script[@id="__NEXT_DATA__"]/text()').get()
        data = json.loads(data_str)
        per_page = data['props']['pageProps']['discoverArticleData']['per_page']
        total = data['props']['pageProps']['discoverArticleData']['total']
        pages = math.ceil(total / per_page)
        # for i in range(1, pages + 1):
        for i in range(1, 2):
            yield response.follow(self.start_urls[0] + str(i), self.parse_page, cookies=cookies)

    def parse_page(self, response):
        data_str = response.xpath('//script[@id="__NEXT_DATA__"]/text()').get()
        data = json.loads(data_str)
        lists = data['props']['pageProps']['discoverArticleData']['list']
        for v in lists:
            yield response.follow(str.replace(v['url'], 'newstudios', 'https'), self.parse_data, cookies=cookies)

    def parse_data(self, response):
        # 产品对象加载器
        il = XpcWorkLoader(item=XpcWorkItem(), response=response)
        # 响应数据转换为json
        ret = json.loads(response.text)
        # print(ret['data'])
        # 设置产品对象字段
        # #id
        il.add_value('article_id', ret['data']['id'])
        # #视频id
        il.add_value('media_id', ret['data']['media_id'])
        # #信息url
        il.add_value('info_url', 'https://app.xinpianchang.com/article/%s' % ret['data']['id'])
        # #视频标题
        il.add_value('title', ret['data']['title'])
        # #视频appKey
        il.add_value('app_key', ret['data']['video']['appKey'])
        # #视频源地址
        il.add_value('video_url', ret['data']['video']['content']['progressive'][0]['url'])
        # #封面
        il.add_value('cover', ret['data']['cover'])
        # #分类
        cat0 = ret['data']['categories'][0]
        il.add_value('category', cat0['category_name'] + '-' + cat0['sub']['category_name'])
        # #描述
        il.add_value('description', ret['data']['content'])
        # #点赞
        il.add_value('likes', ret['data']['count']['count_like'])
        # #收藏
        il.add_value('collections', ret['data']['count']['count_collect'])
        # #播放量
        il.add_value('play_counts', ret['data']['count']['count_view'])
        # #发布时间
        il.add_value('publish_time', ret['data']['publish_time'])
        # #发布地址
        il.add_value('ip_location', ret['data']['ip_location'])

        return il.load_item()
