# -*- coding: utf-8 -*-
import pprint

import requests
import execjs
import scrapy
import json
from BiliBiliSpider.items import TencentSpiderItem
import re

TENCENT_HEADERS = {
    'accept': 'application/json, text/javascript, */*; q=0.01',
    'accept-encoding': 'gzip, deflate, br',
    'accept-language': 'zh-CN,zh;q=0.9,ja;q=0.8',
    'content-type': 'text/plain',
    'origin': 'https://v.qq.com',
    'referer': 'https://v.qq.com/',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': "Windows",
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': 'cors',
    'sec-fetch-site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'
}

TENCENT_HEADERS_USE = {
    'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'
}


class TencentSpider(scrapy.Spider):
    name = 'tencent'
    # 多个爬虫文件多个item管道时，需要指定各个爬虫文件所对应的item管道，否则不生效
    custom_settings = {
        'ITEM_PIPELINES': {
            'BiliBiliSpider.pipelines.DownloadTxVideoPipeline': 305,
            'BiliBiliSpider.pipelines.TencentSaveMysqlPipeline': 350,
            'scrapy_redis.pipelines.RedisPipeline': 300,
        },
    }

    def start_requests(self):
        urls = 'https://v.qq.com/channel/choice'
        yield scrapy.Request(
            url=urls,
            callback=self.oneTxHtml,
            headers=TENCENT_HEADERS_USE,
        )

    def oneTxHtml(self, response):
        """解析一级页面，提取腾讯视频各个频道的url"""
        oneHtml = response.text
        # 提取各个频道url
        indexUrlList = re.findall('<a href="(//v.qq.com/channel/.*?)"', oneHtml, re.S)
        indexUrlList = list(set(indexUrlList))  # 给url去重
        # 拼接各个频道url
        for index in range(0, 3):
            indexUrlList[index] = 'https:' + indexUrlList[index]
            yield scrapy.Request(
                url=indexUrlList[index],
                callback=self.twoTxHtml,
                headers=TENCENT_HEADERS_USE
            )

    def twoTxHtml(self, response):
        """解析二级页面，提取腾讯各个视频播放地址url"""
        towHtml = response.text
        # 提取各个视频url
        allVideoList = re.findall('<a href="(https://v.qq.com/x/cover/.*?.html)', towHtml, re.S)
        allVideoList = list(set(allVideoList))
        item = TencentSpiderItem()
        count = 0
        for url in allVideoList:
            count += 1
            if count > 3:
                continue
            # 这里是取一个url的拼接前面部分
            item['videoPortion'] = url
            yield scrapy.Request(
                url=url,
                meta={'item': item},
                callback=self.threeTxHtml,
                headers=TENCENT_HEADERS_USE
            )

    def threeTxHtml(self, response):
        """
        解析三级页面, 提取每集视频vid、名称以及拼接每集url
        """
        threeHtml = response.text
        item = response.meta['item']
        # print(threeHtml)
        # 这里得到的是一个列表, 列表里包含所需要的json内容
        vidList = re.findall('var COVER_INFO = (.*?)var COLUMN_INFO', threeHtml, re.S)
        # 将内容转换为json格式
        vidList = json.loads(vidList[0])
        # 这是视频vid   '[{"F":2,"V":"w0034a9x5wj"},{"F":7,"V":"i0034s3sd9z"}]'
        videoVid = vidList['nomal_ids']
        # 视频封面图片链接
        item['txVideoImage'] = re.findall('"new_pic_hz":"(.*?)"', threeHtml, re.S)
        # 给视频排序集数
        for index in range(0, len(videoVid)):
            videoVid[index]['number'] = index + 1
            videoVid[index] = videoVid[index]
            # pprint.pprint(vidList['nomal_ids'])  # 还可以查看是否是VIP视频, F=7是需要会员才能看
        # 这是视频名称
        item['videoName'] = vidList['title'].replace(" ", "")
        for dicts in videoVid:
            # https://v.qq.com/x/cover/e13cyu7h55k62ps.html   # 这是提取出的前面拼接部分
            # https://v.qq.com/x/cover/e13cyu7h55k62ps/x003470h0be.html # 完整的视频url必须包含前面部分加上vid
            if dicts['F'] == 2:
                # item['play_tx_video'] = item['videoPortion'][0:-5] + '/' + videoVid[0]['V'] + '.html'
                item['play_tx_video'] = item['videoPortion'][0:-5] + '/' + dicts['V'] + '.html'
                # 这是视频集数
                # item['txNumber'] = videoVid[0]['number']
                item['txNumber'] = dicts['number']
                # 获取post表单的data
                data = self.getFormData(item['play_tx_video'])
                # print('发送请求之前', data)
                res = requests.post('https://vd.l.qq.com/proxyhttp', data=json.dumps(data), headers=TENCENT_HEADERS)
                videoInfo = res.text
                videoInfo = json.loads(videoInfo)
                item = response.meta['item']
                print('errCodes', videoInfo['errCode'])
                # 获取到的文本格式 {'ad': '{}', 'errCode': 0, 'vinfo': '{}'}, 所有第一次json索引到值之后还得再次转json
                if videoInfo['errCode'] == 0:
                    # 这是获取m3u8文件
                    item['videoM3u8'] = json.loads(videoInfo["vinfo"])["vl"]["vi"][0]["ul"]["ui"][0]['url']
                    # print('返回item之前', item)
                    yield item

    def getFormData(self, url):
        '''
        返回获取到的form表单数据
        :param url: 视频真实播放地址
        :return: data-->dict
        '''
        # print('url是', url)
        with open("./tencentJs.js", "r", encoding="gbk") as f:
            js_code = f.read()
        vinfoparam = 'spsrt=1&charge=0&defaultfmt=auto&otype=ojson&guid={}&flowid={}&platform={}&sdtfrom={}&defnpayver=1&appVer={}&host=v.qq.com&refer=v.qq.com&sphttps=1&tm={}&spwm=4&vid={}&defn=&fhdswitch=0&show1080p=1&isHLS=1&dtype=3&sphls=2&spgzip=1&dlver=2&drm=32&hdcp=1&spau=1&spaudio=15&defsrc=1&encryptVer=9.1&cKey={}'
        data = {}
        data["buid"] = "vinfoad"
        guid = execjs.compile(js_code).call('createGUID')
        # 区分腾讯视频还是公众号视频
        if "mp.weixin.qq.com" in url:
            vid = re.compile(r"&vid=(.*?)&").findall(url)[0]  # ?非贪婪
            plateform = "70201"
            flowid = guid + '_' + plateform
            sdtfrom = "v1104"
            appVer = "3.4.40"
            tm = execjs.compile(js_code).call('getTime')
            ckey = execjs.compile(js_code).call('getCKey', plateform, appVer, vid)
        else:
            vid = url.split("/")[-1].split(".")[0]
            plateform = "10201"
            flowid = guid + '_' + plateform
            sdtfrom = "v1010"
            appVer = "3.5.57"
            tm = execjs.compile(js_code).call('getTime')
            ckey = execjs.compile(js_code).call('getCKey', plateform, appVer, vid)

        data["vinfoparam"] = vinfoparam.format(guid, flowid, plateform, sdtfrom, appVer, tm, vid, ckey)
        # 'url, url, appver, vid, flowid, guid, '
        adparam = "pf=in&ad_type=LD%7CKB%7CPVL&pf_ex=pc&url={}&refer={}&ty=web&plugin=1.0.0&v={}&vid={}&pt=&flowid={}&vptag=www_baidu_com&pu=0&chid=0&adaptor=2&dtype=1&live=0&resp_type=json&guid={}&req_type=1&from=0&appversion=1.0.173&platform=10201&tpid=3"
        data['adparam'] = adparam.format(url, url, appVer, vid, flowid, guid)
        return data
