from requests.packages.urllib3.exceptions import InsecureRequestWarning
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: lenovo by XYF
@file: amidoufu.py
@time: 2020/03/10
@function: 
"""
import requests
import json
import time
import os
import re
import ssl
import fileinput
import sys
import random
from selenium import webdriver


ssl._create_default_https_context = ssl._create_unverified_context

INFO = {
    "name": "快手音乐话题批量下载",
    "author": "阿米豆腐丶",
    "repository": "最初版本oGsLP提供,最初源码下载地址:www.github.com/oGsLP/kuaishou-crawler",
    "version": "0.0.1",
    "publishDate": "20-05-06"
}

PROFILE_URL = "https://live.kuaishou.com/profile/"
DATA_URL = "https://live.kuaishou.com/m_graphql"
WORK_URL = "https://v.kuaishou.com/fw/photo/"
MUSIC_URL = "https://v.kuaishou.com/rest/kd/tag/feed/music"
TEXT_URL =  "https://v.kuaishou.com/rest/kd/tag/feed/text"


class Amidoufu:
    __param_did = ""

    __headers_web = {
        'accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Connection': 'keep-alive',
        'Content-Type': 'application/json',
        'Host': 'live.kuaishou.com',
        'Origin': 'https://live.kuaishou.com',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        # User-Agent/Cookie 根据自己的电脑修改
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
        'Cookie': ''
    }
    __headers_mobile = {
        'accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-cn',
        'Connection': 'close',
        'Host': 'v.kuaishou.com',
        'User-Agent': 'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36',
        'Cookie': ''
    }
    __headers_Requests_web = {
            'accept': '*/*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
            'Connection': 'keep-alive',
            'Host': 'v.kuaishou.com',
            'Upgrade-Insecure-Requests': '1',
            # User-Agent/Cookie 根据自己的电脑修改
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
            'Cookie': ''
        }
    __headers_Music_web = {
        'accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Connection': 'keep-alive',
        'Host': 'v.kuaishou.com',
        'Upgrade-Insecure-Requests': '1',
        # User-Agent/Cookie 根据自己的电脑修改
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
        'Cookie': ''
    }
    #User-Agent': 'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36


    userid_cook = ""
    xiladaili = ""
    bakuid = ""
    bakname = ""
    __doufu_list = []

    __date_cache = ""
    __date_pic_count = 0

    def __init__(self, prod=True):
        self.__intro()
        if prod:
            time.sleep(5)
        else:
            self.__read_preset()

    def set_did(self, did, did2, did3, did4, did5):
        self.__param_did = did
        #self.__param_did1 = did1
        self.__param_did2 = did2
        self.__param_did3 = did3
        self.__param_did4 = did4
        self.__param_did5 = did5
        #self.__param_did3 = did3
        self.__headers_web['Cookie'] = 'did=' + did + ';userId=' + did5 + ';kuaishou.live.web_st=' + did2  + ';kuaishou.live.web_ph=' + did3 + ';kuaishou.live.bfb1s=' + did4
        self.__headers_mobile['Cookie'] = 'did=' + did + ';userId=' + did5 + ';didv=' + str(int(time.time())*1000)
        print("用户cookie的值：" + 'did=' + did + ';userId=' + did5 + ';kuaishou.live.web_st=' + did2  + ';kuaishou.live.web_ph=' + did3 + ';kuaishou.live.bfb1s=' + did4)
        #self.__headers_web['Cookie'] = 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId='
        #self.__headers_mobile['Cookie'] = 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId='
        #print("用户cookie的值：" + 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId=')
        #print(self.__headers_mobile['Cookie'])


    def doufu(self):
        if not os.path.exists("自动化"):
            latetime = input("输入延时时间(秒/s)：")  # str(random.randint(1,5))
        else:
            latetime = str(0)
        print("")
        print("延时时间:" + latetime)
        print("")
        print("准备开始爬取，共有%d个用户..." % len(self.__doufu_list))
        print()
        time.sleep(3)
        for uid in self.__doufu_list:
            #print(uid)
            global bakuid
            bakuid = uid
            #print(bakuid)
            self.__date_cache = ""
            self.__date_count = 0
            if "kuaishou" in uid:
                if "musicId" in uid:
                    recovery3 = uid
                    #recovery_list = recovery3.split("=")
                    waitreplace = '&musicId=(.*?)&appType='
                    uid2 = re.findall(waitreplace, recovery3)
                    uid1 = uid2[0]
                    print("Short chain:" + uid + "丨Uid:" + uid1)
                    time.sleep(3)
                    self.__doufu_user(uid1, latetime)
                else :
                    r = requests.get(uid, headers=self.__headers_Music_web)
                    # print(r.history)
                    reditList = r.history  # 可以看出获取的是一个地址序列
                    # print(f'获取重定向的历史记录：{reditList}')
                    # print(f'获取第一次重定向的headers头部信息：{reditList[0].headers}')
                    # print(f'获取重定向最终的url：{reditList[len(reditList) - 1].headers["location"]}')
                    recovery1 = reditList
                    #print(recovery1)
                    recovery2 = reditList[0].headers
                    recovery3 = reditList[len(reditList) - 1].headers["location"]
                    #recovery_list = recovery3.split("=")
                    waitreplace = '&musicId=(.*?)&appType='
                    uid2 = re.findall(waitreplace, recovery3)
                    uid1 = uid2[0]
                    print("Short chain:" + uid + "丨Uid:" + uid1)
                    # print(recovery_list[4])
                    # print(recovery3)
                    self.__doufu_user(uid1, latetime)

            else:
                print("Uid:" + uid)
                self.__doufu_user(uid, latetime)




    def add_to_list(self, uid):
        self.__doufu_list.append(uid)

    def __doufu_user(self, uid , latetime):
        if uid.isdigit():
            uid = self.__switch_id(uid)

        pcursor = ""
        payload = {"music": uid,
                   "type": "9",
                   "count": "100",
                   "pcursor": pcursor}
        # payload = {"operationName": "privateFeedsQuery",
        #            "variables":{"principalId":uid,"pcursor":"","count":99999},
        #            "query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n  privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n    pcursor\n    list {\n      id\n      thumbnailUrl\n      poster\n      workType\n      type\n      useVideoPlayer\n      imgUrls\n      imgSizes\n      magicFace\n      musicName\n      caption\n      location\n      liked\n      onlyFollowerCanComment\n      relativeHeight\n      timestamp\n      width\n      height\n      counts {\n        displayView\n        displayLike\n        displayComment\n        __typename\n      }\n      user {\n        id\n        eid\n        name\n        avatar\n        __typename\n      }\n      expTag\n      __typename\n    }\n    __typename\n  }\n}\n"}
        res = requests.post(MUSIC_URL, headers=self.__headers_Music_web, json=payload)
        # print(uid)
        # print(res.text)
        works = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['feeds']
        # print(works)
        print(json.loads(res.content.decode(encoding='utf-8', errors='strict'))['pcursor'])
        pcursor = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['pcursor']
        if works != []:
            if not os.path.exists("../快手音乐话题data"):
                os.makedirs("../快手音乐话题data")

            # 这两行代码将response写入json供分析
            # with open("data/" + uid + ".json", "w") as fp:
            #     fp.write(json.dumps(works, indent=2))

            # 防止该用户在直播，第一个作品默认为直播，导致获取信息为NoneType
            # print(works)
            # if works[0]['id'] is None:
            #     works.pop(0)
            name = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['music']['name'])

            # eid = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userEid'])
            # usename = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userName'])
            dir = "快手data/" + name + "丨(" + uid + ")/"
            # print(len(works))
            if not os.path.exists(dir):
                os.makedirs(dir)

            music_url = works[0]['music']['audioUrls'][0]['url']
            # print(music_url)
            music_music = dir + name + ".m4a"
            if not os.path.exists(music_music):
                r = requests.get(music_url)
                r.raise_for_status()

                with open(music_music, "wb") as f:
                    f.write(r.content)
                print(name + "音乐下载成功 √")

            print("开始爬取话题 " + name + "，保存在目录 " + dir)
            print(" 共有" + str(len(works)) + "个作品")
            print("")
            time.sleep(1)

            for j in range(len(works)):
                self.__doufu_work(uid, dir, works[j], j + 1)
                # print(j)
                # print(self.__headers_mobile['Cookie'])
                time.sleep(int(latetime))
        while pcursor != "no_more":
            print(pcursor)
            payload = {"music": uid,
                       "type": "9",
                       "count": "100",
                       "pcursor": pcursor}
            # payload = {"operationName": "privateFeedsQuery",
            #            "variables":{"principalId":uid,"pcursor":"","count":99999},
            #            "query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n  privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n    pcursor\n    list {\n      id\n      thumbnailUrl\n      poster\n      workType\n      type\n      useVideoPlayer\n      imgUrls\n      imgSizes\n      magicFace\n      musicName\n      caption\n      location\n      liked\n      onlyFollowerCanComment\n      relativeHeight\n      timestamp\n      width\n      height\n      counts {\n        displayView\n        displayLike\n        displayComment\n        __typename\n      }\n      user {\n        id\n        eid\n        name\n        avatar\n        __typename\n      }\n      expTag\n      __typename\n    }\n    __typename\n  }\n}\n"}
            res = requests.post(MUSIC_URL, headers=self.__headers_Music_web, json=payload)
            # print(uid)
            # print(res.text)
            works = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['feeds']
            # print(works)
            print(pcursor)
            pcursor = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['pcursor']
            if works != []:
                if not os.path.exists("../快手音乐话题data"):
                    os.makedirs("../快手音乐话题data")

                # 这两行代码将response写入json供分析
                # with open("data/" + uid + ".json", "w") as fp:
                #     fp.write(json.dumps(works, indent=2))

                # 防止该用户在直播，第一个作品默认为直播，导致获取信息为NoneType
                # print(works)
                # if works[0]['id'] is None:
                #     works.pop(0)
                name = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['music']['name'])
                # eid = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userEid'])
                # usename = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userName'])
                dir = "快手data/" + name + "丨(" + uid + ")/"
                # print(len(works))
                if not os.path.exists(dir):
                    os.makedirs(dir)

                music_url = works[0]['music']['audioUrls'][0]['url']
                # print(music_url)
                music_music = dir + name + ".m4a"
                if not os.path.exists(music_music):
                    r = requests.get(music_url)
                    r.raise_for_status()

                    with open(music_music, "wb") as f:
                        f.write(r.content)
                    print(name + "音乐下载成功 √")

                print("开始爬取话题 " + name + "，保存在目录 " + dir)
                print(" 共有" + str(len(works)) + "个作品")
                print("")
                time.sleep(1)

                for j in range(len(works)):
                    self.__doufu_work(uid, dir, works[j], j + 1)
                    # print(j)
                    # print(self.__headers_mobile['Cookie'])
                    time.sleep(int(latetime))
        else:
            pcursor = ""
            payload = {"music": uid,
                       "type": "9",
                       "count": "100",
                       "pcursor": pcursor}
            # payload = {"operationName": "privateFeedsQuery",
            #            "variables":{"principalId":uid,"pcursor":"","count":99999},
            #            "query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n  privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n    pcursor\n    list {\n      id\n      thumbnailUrl\n      poster\n      workType\n      type\n      useVideoPlayer\n      imgUrls\n      imgSizes\n      magicFace\n      musicName\n      caption\n      location\n      liked\n      onlyFollowerCanComment\n      relativeHeight\n      timestamp\n      width\n      height\n      counts {\n        displayView\n        displayLike\n        displayComment\n        __typename\n      }\n      user {\n        id\n        eid\n        name\n        avatar\n        __typename\n      }\n      expTag\n      __typename\n    }\n    __typename\n  }\n}\n"}
            res = requests.post(MUSIC_URL, headers=self.__headers_Music_web, json=payload)
            # print(uid)
            # print(res.text)
            works = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['topPhotos']
            #print(works)
            #print(pcursor)
            #pcursor = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['pcursor']
            if not os.path.exists("../快手音乐话题data"):
                os.makedirs("../快手音乐话题data")

            # 这两行代码将response写入json供分析
            # with open("data/" + uid + ".json", "w") as fp:
            #     fp.write(json.dumps(works, indent=2))

            # 防止该用户在直播，第一个作品默认为直播，导致获取信息为NoneType
            # print(works)
            # if works[0]['id'] is None:
            #     works.pop(0)
            name = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[1]['music']['name'])

            # eid = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userEid'])
            # usename = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['userName'])
            dir = "快手data/" + name + "丨(" + uid + ")/"
            # print(len(works))
            if not os.path.exists(dir):
                os.makedirs(dir)

            #works_yinyue = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['topPhotos']
            music_url = works[0]['music']['audioUrls'][0]['url']
            # print(music_url)
            music_music = dir + name + ".m4a"
            if not os.path.exists(music_music):
                r = requests.get(music_url)
                r.raise_for_status()

                with open(music_music, "wb") as f:
                    f.write(r.content)
                print(name + "音乐下载成功 √")

            print("开始爬取话题 " + name + "，保存在目录 " + dir)
            print(" 共有" + str(len(works)) + "个作品")
            print("")
            time.sleep(1)

            for j in range(len(works)):
                self.__doufu_work(uid, dir, works[j], j + 1)
                # print(j)
                # print(self.__headers_mobile['Cookie'])
                time.sleep(int(latetime))



        print("用户 " + name + "视频爬取完成!")

        print("")
        time.sleep(1)
        print(bakuid)
        print("等待3秒执行下一个")
        time.sleep(1)
        print("等待2秒执行下一个")
        time.sleep(1)
        print("等待1秒执行下一个")
        with open("yinyuehuati_log.txt", "r+") as f:
            # print(bakuid)
            p = re.compile(bakuid)
            lines = [line for line in f.readlines() if p.search(line) is None]
            fd = open("yinyuehuati_log.txt", "w")
            fd.seek(0)
            fd.truncate(0)
            fd.writelines(lines)
            fd.close()
        time.sleep(1)
        print("")




        # lines = [l for l in open("yinyuehuati.txt", "r") if l.find(uid, 0, 8) != 0]
        # fd = open("yinyuehuati_log.txt", "w")
        # fd.writelines(lines)
        # fd.close()

        #print(path_list)



        # with open("a.txt", "r+") as f:
        #     p = re.compile("gmail|aol|yahoo")
        #     lines = [line for line in f.readlines() if p.search(line) is None]
        #     f.seek(0)
        #     f.truncate(0)
        #     f.writelines(lines)

        # list = []
        # matchPattern = re.compile(uid)
        # f_path = r'yinyuehuati.txt'
        # with open(f_path)as f:
        #     while 1:
        #         line = f.readline()
        #         if not line:
        #             break
        #         elif matchPattern.search(line):
        #             pass
        #         else:
        #             list.append(line)
        #     f.close()
        #     f = open('yinyuehuati_log.txt', 'w')
        #     for i in list:
        #         f.write(i)
        #     f.close()



        '''
        快手分为五种类型的作品，在作品里面表现为workType属性
         * 其中两种图集: `vertical`和`multiple`，意味着拼接长图和多图，所有图片的链接在imgUrls里
         * 一种单张图片: `single` 图片链接也在imgUrls里
         * K歌: `ksong` 图片链接一样，不考虑爬取音频...
         * 视频: `video` 需要解析html获得视频链接
        '''




    def __doufu_work(self, uid, dir, work, wdx):
        w_type = work['video']
        #print(w_type)
        w_caption = re.sub(r"\s+", " ", work['caption'])
        #print(w_caption)
        w_name = re.sub(r'[\\/:*?"<>|\r\n]+', "", w_caption)[0:24]
        # print(work['ext_params']['atlas']['list'])
        # print(work['ext_params']['atlas']['cdn'][0])
        # print(work['mainMvUrls'][0]['cdn'])
        # print(work['mainMvUrls'][0]['url'])
        w_time = time.strftime('%Y-%m-%d', time.localtime(work['timestamp'] / 1000))
        w_index = ""
        if self.__date_cache == w_time:
            self.__date_count = self.__date_count + 1
            if self.__date_count > 0:
                w_index = "(%d)" % self.__date_count
        else:
            self.__date_cache = w_time
            self.__date_count = 0

        if w_type == False:
            #print("我在图集")
            #print(work['ext_params']['atlas']['list'])
            #print(work['ext_params']['atlas']['cdn'][0])
            if 'atlas' in work['ext_params'].keys() == True:
                w_urls = work['ext_params']['atlas']['list']
                w_cdn = "https://" + work['ext_params']['atlas']['cdn'][0]
                #print(w_urls)
                #print(w_cdn)
                l = len(w_urls)
                print("  " + str(wdx) + ")图集作品：" + w_caption + "，" + "共有" + str(l) + "张图片")
                for i in range(l):
                    p_name = w_time + w_name + "_" + work['userEid']  + "_" + work['photoId'] + "_" + str(i + 1) + '.jpg'
                    dir2 = dir + "图集/" + w_time + "_" + w_name + "_" + work['userEid'] + "_" + work['photoId'] + "/"
                    if not os.path.exists(dir2):
                        os.makedirs(dir2)
                    pic = dir2 + p_name
                    if not os.path.exists(pic):
                        r = requests.get(w_urls[i].replace("webp", "jpg"))
                        r.raise_for_status()
                        with open(pic, "wb") as f:
                            f.write(r.content)
                        print("    " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 下载成功 √")
                    else:
                        print("    " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 已存在 √")
            else:
                w_urls = work['coverUrls'][0]['url']
                #w_cdn = "https://" + work['ext_params']['atlas']['cdn'][0]work['userEid']
                #print(w_urls)
                print("  " + str(wdx) + ")图集作品：" + w_caption + "，" + "共有1张图片")
                p_name = w_time + "_" + w_name + "_" + work['userEid'] + "_" + work['photoId'] + '.jpg'
                dir2 = dir + "图集/" + w_time + "_" + w_name + "_" + work['userEid'] + "_" + work['photoId'] + "/"
                if not os.path.exists(dir2):
                    os.makedirs(dir2)
                pic = dir2 + p_name
                if not os.path.exists(pic):
                    r = requests.get(w_urls)
                    r.raise_for_status()
                    with open(pic, "wb") as f:
                        f.write(r.content)
                    print("    " + "/" + " 图片 " + p_name + " 下载成功 √")
                else:
                    print("    " + "/" + " 图片 " + p_name + " 已存在 √")
        elif w_type == True:
            v_name = w_time + "_" + w_name + "_" + work['userEid']  + "_" + work['photoId'] + ".mp4"
            dir2 = dir + "视频/"
            video = dir2 + v_name
            if os.path.exists(video):
                print("    视频 " + v_name + " 已存在 √")
            else:
                v_url = work['mainMvUrls'][0]['url']
                # print(html)
                # pattern = re.compile(r"playUrl", re.MULTILINE | re.DOTALL)
                # script = soup.find("script", text=pattern)
                # s = pattern.search(script.text).string
                # v_url = s.split('playUrl":"')[1].split('.mp4')[0].encode('utf-8').decode('unicode-escape') + '.mp4'
                try:
                    print("  " + str(wdx) + ")视频作品：" + "丨" + work['photoId'] + "丨" + w_caption)
                except:
                    print("  这里似乎有点小错误，已跳过")
                v_name = w_time + "_" + w_name + "_" + work['userEid'] + "_" + work['photoId'] + ".mp4"
                dir2 = dir + "视频/"
                if not os.path.exists(dir2):
                    os.makedirs(dir2)
                video = dir2 + v_name

                if v_url:
                    if not os.path.exists(video):
                        # r = requests.get(v_url)
                        r = requests.get(v_url)
                        r.raise_for_status()

                        with open(video, "wb") as f:
                            f.write(r.content)
                        print("    视频 " + v_name + " 下载成功 √")
                    else:
                        print("    视频 " + v_name + " 已存在 √")
                else:
                    print("未找到视频")
                    print(self.restart_program())
        else:
            print("错误的类型")
            print(self.restart_program())





    def __read_preset(self):
        p_path = "yinyuehuati.txt"
        if not os.path.exists(p_path):
            print("创建预设文件 uid ...")
            open(p_path, "w")
        if not os.path.getsize(p_path):
            print("请在预设文件 uid 中记录需要爬取的用户id，一行一个")
            exit(0)
        if not os.path.exists("yinyuehuati_log.txt"):
            p_path = "yinyuehuati.txt"
            r_w = open("yinyuehuati.txt", "r+")
            s_w = open("yinyuehuati_log.txt", "w")
            s_w.write(r_w.read())
        else:
            p_path = "yinyuehuati_log.txt"
            with open("yinyuehuati_log.txt", "r+") as q:
                if q.read() == "":
                    r_w = open("yinyuehuati.txt", "r+")
                    q.write(r_w.read())
        #print(p_path)
        with open(p_path, "r") as f:
            for line in f:
                if line[0] != "#":
                    self.__doufu_list.append(line.strip())

    def __switch_id(self, uid):
        payload = {"operationName": "SearchOverviewQuery",
                   "variables": {"keyword": uid, "ussid": None},
                   "query": "query SearchOverviewQuery($keyword: String, $ussid: String) {\n  pcSearchOverview(keyword: $keyword, ussid: $ussid) {\n    list {\n      ... on SearchCategoryList {\n        type\n        list {\n          categoryId\n          categoryAbbr\n          title\n          src\n          __typename\n        }\n        __typename\n      }\n      ... on SearchUserList {\n        type\n        ussid\n        list {\n          id\n          name\n          living\n          avatar\n          sex\n          description\n          counts {\n            fan\n            follow\n            photo\n            __typename\n          }\n          __typename\n        }\n        __typename\n      }\n      ... on SearchLivestreamList {\n        type\n        lssid\n        list {\n          user {\n            id\n            avatar\n            name\n            __typename\n          }\n          poster\n          coverUrl\n          caption\n          id\n          playUrls {\n            quality\n            url\n            __typename\n          }\n          quality\n          gameInfo {\n            category\n            name\n            pubgSurvival\n            type\n            kingHero\n            __typename\n          }\n          hasRedPack\n          liveGuess\n          expTag\n          __typename\n        }\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}\n"}

        res = requests.post(DATA_URL, headers=self.__headers_web, json=payload)
        dt = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['data']
        print(res.text)
        # with open("data/jj_" + uid + ".json", "w") as fp:
        #     fp.write(json.dumps(dt, indent=2))

        return dt['pcSearchOverview']['list'][1]['list'][0]['id']

    def __openchrome(self):
        driver = webdriver.Chrome(executable_path="Google Chrome/chromedriver.exe")
        driver.get("https://live.kuaishou.com/v/hot/")
        name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        name_title = name.get_attribute('title')
        # cookie= driver.get_cookies()
        while name_title == "":
            print("你还没有登录,请登录!!!")
            time.sleep(10)
            name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
            name_title = name.get_attribute('title')
        print("你已经登录,登录名:" + name_title)
        cookie = driver.get_cookies()
        driver.get("https://live.kuaishou.com/u/G338217245/3xjz7rnjnh6hjts")
        cookie = [item['value'] for item in driver.get_cookies()]
        cookie1 = [item['name'] for item in driver.get_cookies()]
        # cookie = [item['name'] + '=' + item['value'] for item in driver.get_cookies()]
        #print(cookie)
        #print(cookie1)
        param_did = cookie[4]
        #param_did1 = cookie[1]
        #param_did2 = cookie[2]
        #param_did3 = cookie[7]
        param_did2 = cookie[1]
        param_did3 = cookie[0]
        param_did4 = cookie[7]
        param_did5 = cookie[2]
        global userid_cook
        userid_cook = cookie[2]
        # param_kuaishou.live.web_st = cookie[1]
        driver.get("https://live.kuaishou.com/v/hot/")
        # param_did = "123"
        #print(driver.title)  # 在编辑器的终端可以看到网站的标题打印出来
        #driver.quit()  # 关闭Chrome浏览器，如果不写这句话浏览器就会停留在百度首页在后台运行不会关闭浏览器
        self.set_did(param_did, param_did2, param_did3, param_did4, param_did5)

    def restart_program(self):#重启程序
        python = sys.executable
        os.execl(python, python, *sys.argv)

    def __intro(self):
        print()
        print("|  %s (v%s %s)" % (INFO["name"], INFO["version"], INFO["publishDate"]))
        print("|  本程序由%s提供, %s, 喜欢的话可以给个star >_<" % (INFO["author"], INFO["repository"]))
        print()
        self.__openchrome()
        # driver = webdriver.Chrome()
        # driver.get("https://live.kuaishou.com/v/hot/")
        # name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        # name_title = name.get_attribute('title')
        # # cookie= driver.get_cookies()
        # while name_title == "":
        #     print("你还没有登录,请登录!!!")
        #     time.sleep(10)
        #     name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        #     name_title = name.get_attribute('title')
        # # 打印cookie信息
        # print("你已经登录,登录名:" + name_title)
        # cookie = driver.get_cookies()
        # driver.get("https://live.kuaishou.com/u/G338217245/3xjz7rnjnh6hjts")
        # cookie = [item['value'] for item in driver.get_cookies()]
        # cookie1 = [item['name'] for item in driver.get_cookies()]
        # # cookie = [item['name'] + '=' + item['value'] for item in driver.get_cookies()]
        # print(cookie)
        # print(cookie1)
        # param_did = cookie[4]
        # #param_did1 = cookie[1]
        # #param_did2 = cookie[2]
        # #param_did3 = cookie[7]
        # param_did2 = cookie[1]
        # param_did3 = cookie[0]
        # param_did4 = cookie[7]
        # param_did5 = cookie[2]
        # # param_kuaishou.live.web_st = cookie[1]
        # driver.get("https://live.kuaishou.com/v/hot/")
        # # param_did = "123"
        # #print(driver.title)  # 在编辑器的终端可以看到网站的标题打印出来
        # #driver.quit()  # 关闭Chrome浏览器，如果不写这句话浏览器就会停留在百度首页在后台运行不会关闭浏览器
        # self.set_did(param_did, param_did2, param_did3, param_did4, param_did5)