import scrapy
import time
import json
from urllib.parse import urlencode
from ..items import CollectItem
from tool import util

class TxvSpider(scrapy.Spider):
    name = 'txv'
    allowed_domains = ['qq.com']
    start_urls = [
        'https://v.qq.com/x/search/?'
    ]

    # https://pbaccess.video.qq.com/trpc.creator_center.header_page.personal_page/GetUserMeta?vcuid=9000123992
    creator_info_url = "https://pbaccess.video.qq.com/trpc.creator_center.header_page.personal_page/GetUserMeta?vcuid="

    # 'https://v.qq.com/biu/creator/home?vcuid=9000114188'
    # "dt-eid="cp_name""

    def start_requests(self):

        # 王者荣耀 LOL 吃鸡 lol 英雄联盟 绝地求生 刺激战场 和平精英
        # 动漫
        params = {
            "queryFrom": "4",
            "isNeedQc": "true",
            "filterValue": "firstTabid=0&sortTabid=1&tabid=0&timeLongTabid=1&publishTimeTabid=1",
            "preQid": "deVGwptT99JcLAIHdB1yxENVRqvCCcxSo-XK8C8YtUTxAuPfy9hr6g",
            "q": "英雄联盟",
            "cur": "1",
        }
        # search = ["王者荣耀", "英雄联盟", "绝地求生", "和平精英",
        #           "刺激战场", "LOL", "吃鸡", "动漫"]
        configDic = util.get_collect_config()
        search = configDic["keyword"].split(",")
        pagesize = int(configDic["page"]) + 1
        for url in self.start_urls:
            for q in search:
                params["q"] = q
                for index in range(1, pagesize):
                    t = int(round(time.time() * 1000))
                    params["cur"] = str(index)
                    newUrl = url + urlencode(params) + "&_=" + str(t) + "#!filtering=1"
                    yield scrapy.Request(url=newUrl, method="GET", callback=self.parse)

    def parse(self, response):
        divLists = response.xpath(
            '//div[@class="result_item result_item_h _quickopen"]')
        for div in divLists:
            url = div.xpath('./a/@href').get()
            img = div.xpath('./a/img/@src').get()
            name = div.xpath('./a/img/@alt').get()
            timeStr = div.xpath(
                './div/div/div[@class="info_item info_item_odd"]/span[@class="content"]/text()').get()
            creatorId = div.xpath(
                './div/div/div[@class="info_item info_item_even"]/span/a/@data-view-account-id').get()
            t = util.parse_ymd(timeStr)
            img = util.getCompleteUrl(img)
            vid = url.split('/')[-1]
            vid = vid.replace('.html', '')
            duration = div.xpath('./a/span/span/text()').get()
            
            if util.isNotEmpty(name) and util.isNotEmpty(t) and util.isNotEmpty(url) and util.isNotEmpty(vid) and util.isNotEmpty(creatorId):
                item = CollectItem()
                # TODO
                name = name.replace("\x05", "").replace(
                    "\x06", "").replace("\x07", "")
                item['name'] = name
                item['url'] = url
                item['vid'] = vid
                item['img'] = img
                item['create_time'] = t
                item['creator_id'] = creatorId
                item['platform'] = 'tx'
                item['duration'] = duration
                # yield item
                info_url = self.creator_info_url + creatorId
                yield scrapy.Request(url=info_url, method="GET", callback=self.parse_creator_info, meta=item)

    def parse_creator_info(self, response):
        params = response.meta

        item = CollectItem()
        item['name'] = params['name']
        item['url'] = params['url']
        item['vid'] = params['vid']
        item['img'] = params['img']
        item['create_time'] = params['create_time']
        item['creator_id'] = params['creator_id']
        item['platform'] = params['platform']
        item['duration'] = params['duration']

        js = json.loads(response.body)
        if int(js["data"]["ret"] > 0):
            # https://v.qq.com/biu/videoplus?vuid=1406458153
            yield item
        else:
            item["creator_nick"] = js["data"]["data"]["creator_nick"]
            item["creator_desc"] = js["data"]["data"]["creator_desc"]
            yield item
