# -*- coding: UTF-8 -*-
import scrapy
import messAround.util.help as util


# 腾讯视频-热门排行榜
# https://v.qq.com/biu/ranks/?t=hotsearch
class TencentVideoSpider(scrapy.Spider):
    source = 1

    name = 'tencent_video'

    allowed_domains = ['v.qq.com']

    start_urls = ['http://v.qq.com/']

    # 日排行
    def start_requests(self):
        # 影视排行
        video_urls = [
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_hot',
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_origin',
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_movie',
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_variety',
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_cartoon',
            'https://v.qq.com/biu/ranks/?t=hotplay&channel=all&ct=pc_all_rank_child',
        ]

        for url in video_urls:
            yield scrapy.Request(url=url, callback=self.parse, headers=util.default_headers)

        # 热搜排行
        search_urls = [
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=rank',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=tv',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=variety',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=cartoon',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=child',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=movie',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=doco',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=ent',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=doki',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=games',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=music',
            'https://v.qq.com/biu/ranks/?t=hotsearch&channel=hot',
        ]

        for url in search_urls:
            yield scrapy.Request(url=url, callback=self.parse_search, headers=util.default_headers)

    # 处理热搜
    def parse_search(self, response):
        trend_name = {"": "none", "icon_rise_xs": "up", "icon_hold_xs": "hold", "icon_decline_xs": "down"}

        category_name_xpath = '//*[@id="app"]/div/div/div/div[1]/div/a/text()'
        category_name = response.xpath(category_name_xpath).get()
        for index in range(2, 51):
            item_xpath = f'//*[@id="app"]/div/div/div/div[2]/ul/li[{index}]/'

            # 升降趋势
            # icon_rise_xs icon_hold_xs icon_decline_xs
            trend = (response.xpath(item_xpath + 'div[4]/i/@class').get()).replace("icon_xs ", "")

            # 热度
            hot = (response.xpath(item_xpath + 'div[3]/div/span/@style').get()).replace("width:", "").replace(
                "%;;", "")

            yield util.make_data({
                'category_name': category_name,
                'source': 2,
                'no': index,
                'title': response.xpath(item_xpath + 'div[1]/a/text()').get(),
                'link': response.xpath(item_xpath + 'div[1]/a/@href').get(),
                'area': response.xpath(item_xpath + 'div[2]/a/text()').get(),
                'hot': hot,
                'trend': trend_name[trend],
            })

    # 处理视频榜单
    def parse(self, response):

        first_xpath = '//*[@id="app"]/div/div/div/div[2]/div/div/div/div/div[2]/div/'

        category_name = response.xpath('//*[@id="app"]/div/div/div/div[1]/div/a/text()').get()

        yield util.make_data({
            'category_name': category_name,
            'source': self.source,
            'no': 1,
            'title': response.xpath(first_xpath + 'strong/a/text()').get(),
            'alt_title': response.xpath(first_xpath + 'strong/a/text()').get(),
            'link': response.xpath(first_xpath + 'strong/a/@href').get(),
            'cover': response.xpath(first_xpath + 'a/img/@src').get(),
            'info': response.xpath(first_xpath + 'div[1]/text()').get(),
        })

        # 获取榜单2-20
        for index in range(1, 20):
            item_xpath = f'//*[@id="app"]/div/div/div/div[2]/div/div/div/div/div[3]/div/div[{index}]/div/'

            yield util.make_data({
                'category_name': category_name,
                'source': self.source,
                'no': index + 1,
                'title': response.xpath(item_xpath + 'strong/a/text()').get(),
                'alt_title': response.xpath(item_xpath + 'a/img/@alt').get(),
                'link': response.xpath(item_xpath + 'a/@href').get(),
                'cover': response.xpath(item_xpath + 'a/img/@src').get(),
            })

        # for index in range(2, 4):
        #     category_name = ''
        #     category_more_link = ''
        #     for list_index in range(1, 4):
        #         first_xpath = f'//*[@id="app"]/div/div/div[2]/div/div[{index}]/div[{list_index}]/div[2]/div[1]/'
        #         category_xpath = f'//*[@id="app"]/div/div/div[2]/div/div[{index}]/div[{list_index}]/div[1]/'
        #         # 获取分类名称
        #         category_name = response.xpath(category_xpath + 'h3/text()').get(),
        #         # 获取分类链接
        #         category_more_link = response.xpath(category_xpath + 'div/a/@href').get(),
        #         # 获取榜单地一名
        #         yield util.make_data({
        #             'category_name': category_name,
        #             'category_more_link': category_more_link,
        #             'source': self.source,
        #             'no': 1,
        #             'title': response.xpath(first_xpath + 'a/img/@alt').get(),
        #             'alt_title': response.xpath(first_xpath + 'a/img/@alt').get(),
        #             'link': response.xpath(first_xpath + 'a/@href').get(),
        #             'cover': response.xpath(first_xpath + 'img/@src').get(),
        #             'info': response.xpath(first_xpath + 'div/text()').get(),
        #         })
        #
        #         # # 获取榜单 1-4名
        #         # list_xpath = f'//*[@id="app"]/div/div/div[2]/div/div[{index}]/div[{list_index}]/div[2]/div[2]/ol/'
        #         for item_index in range(1, 5):
        #             item_xpath = list_xpath + f'li[{item_index}]/a/'
        #             yield util.make_data({
        #                 'category_name': category_name,
        #                 'category_more_link': category_more_link,
        #                 'source': self.source,
        #                 'no': item_index + 1,
        #                 'title': response.xpath(item_xpath + 'img/@alt').get(),
        #                 'alt_title': response.xpath(item_xpath + 'img/@alt').get(),
        #                 'link': response.xpath(item_xpath + '/@href').get(),
        #                 'cover': response.xpath(item_xpath + 'img/@src').get(),
        #             })
        pass
