import scrapy
from urllib.parse import urlencode
import json
from ..items import CollectItem
from tool import util, dbutil, storage

CollectConfigKey = "CollectConfigKey"

class TxcreatorSpider(scrapy.Spider):
    name = 'txcreator'
    allowed_domains = ['qq.com']
    start_urls = [
        'https://pbaccess.video.qq.com/trpc.creator_center.header_page.personal_page/GetUserVideoList?']
    base_url = 'https://pbaccess.video.qq.com/trpc.creator_center.header_page.personal_page/GetUserVideoList?'


    # 最大页码
    max_total_page = 1

    def start_requests(self):
        configDic = util.get_collect_config()
        self.max_total_page = int(configDic["creatorPage"])
        update_time = configDic["creatorTime"]
        if int(update_time) > 0:
            params = None
        else:
            params = {"datediff(update_time, now())": update_time}

        results = dbutil.fetchCreator(params)
        creatorIdList = []
        for row in results:
            creatorId = row[2]
            if creatorId != None and len(creatorId) > 0:
                creatorIdList.append(creatorId)
    
        for creatorId in creatorIdList:
            params = {
                "vcuid": creatorId,
                "page_size": 30,
                "page": 1,
                "list_type": 1,
            }
            newUrl = self.base_url + urlencode(params)
            yield scrapy.Request(url=newUrl, method="GET", callback=self.parse, meta=params)

    def parse(self, response):
        params = response.meta
        creatorId = params["vcuid"]
        page = params["page"]

        js = json.loads(response.body)
        ret = js["data"]["ret"]
        if ret > 0:
            return
        list = js["data"]["data"]["list"]

        for data in list:    
            item = CollectItem()
            name = data["title"]
            name = name.replace("\x05", "").replace(
                "\x06", "").replace("\x07", "")
            item['name'] = name
            item['vid'] = data["vid"]
            item['url'] = "https://v.qq.com/x/page/{}.html".format(data["vid"])
            item['img'] = data["image"]
            item['create_time'] = util.timestamp(data["last_page_condition"])
            item['creator_id'] = creatorId
            item['platform'] = 'tx'
            item['duration'] = data['duration']

            yield item

        total_page = js["data"]["data"]["total_page"]
        
        if total_page > self.max_total_page:
            total_page = self.max_total_page

        if total_page > page:
            params = {
                "vcuid": creatorId,
                "page_size": 30,
                "page": page+1,
                "list_type": 1,
            }
            newUrl = self.base_url + urlencode(params)
            yield scrapy.Request(url=newUrl, method="GET", callback=self.parse, meta=params)




