from requests.packages.urllib3.exceptions import InsecureRequestWarning
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: lenovo by XYF
@file: amidoufu.py
@time: 2020/03/10
@function: 
"""
import requests
import json
import time
import os
import re
import ssl
import fileinput
import sys
import random
from selenium import webdriver

ssl._create_default_https_context = ssl._create_unverified_context

INFO = {
    "name": "快手用户批量下载",
    "author": "阿米豆腐丶",
    "repository": "最初版本oGsLP提供,最初源码下载地址:www.github.com/oGsLP/kuaishou-crawler",
    "version": "0.0.1",
    "publishDate": "20-05-06"
}

PROFILE_URL = "https://live.kuaishou.com/profile/"
DATA_URL = "https://live.kuaishou.com/m_graphql"
WORK_URL = "https://v.kuaishou.com/fw/photo/"


class Amidoufu:
    __param_did = ""

    __headers_web = {
        'accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Connection': 'keep-alive',
        'Content-Type': 'application/json',
        'Host': 'live.kuaishou.com',
        'Origin': 'https://live.kuaishou.com',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        # User-Agent/Cookie 根据自己的电脑修改
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
        'Cookie': ''
    }
    __headers_mobile = {
        'accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-cn',
        'Connection': 'close',
        'Host': 'v.kuaishou.com',
        'User-Agent': 'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36',
        'Cookie': ''
    }
    __headers_Requests_web = {
            'accept': '*/*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
            'Connection': 'keep-alive',
            'Host': 'v.kuaishou.com',
            'Upgrade-Insecure-Requests': '1',
            # User-Agent/Cookie 根据自己的电脑修改
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
            'Cookie': ''
        }
    #User-Agent': 'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36


    userid_cook = ""
    xiladaili = ""
    bakuid = ""
    __doufu_list = []

    __date_cache = ""
    __date_pic_count = 0

    def __init__(self, prod=True):
        self.__intro()
        if prod:
            time.sleep(5)
        else:
            self.__read_preset()

    def set_did(self, did, did2, did3, did4, did5):
        self.__param_did = did
        #self.__param_did1 = did1
        self.__param_did2 = did2
        self.__param_did3 = did3
        self.__param_did4 = did4
        self.__param_did5 = did5
        #self.__param_did3 = did3
        self.__headers_web['Cookie'] = 'did=' + did + ';userId=' + did5 + ';kuaishou.live.web_st=' + did2  + ';kuaishou.live.web_ph=' + did3 + ';kuaishou.live.bfb1s=' + did4
        self.__headers_mobile['Cookie'] = 'did=' + did + ';userId=' + did5 + ';didv=' + str(int(time.time())*1000)
        print("用户cookie的值：" + 'did=' + did + ';userId=' + did5 + ';kuaishou.live.web_st=' + did2  + ';kuaishou.live.web_ph=' + did3 + ';kuaishou.live.bfb1s=' + did4)
        #self.__headers_web['Cookie'] = 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId='
        #self.__headers_mobile['Cookie'] = 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId='
        #print("用户cookie的值：" + 'did=' + did + ';kuaishou.live.web_ph=' + did1 + ';kuaishou.live.web_st=' + did2 + ';kuaishou.live.bfb1s=' + did3 +';didv=' + str(int(time.time())*1000) + ';userId=')
        #print(self.__headers_mobile['Cookie'])


    def doufu(self):
        if not os.path.exists("自动化"):
            latetime = input("输入延时时间(秒/s)：")  # str(random.randint(1,5))
        else:
            latetime = str(random.randint(5,10))
        print("")
        print("延时时间:" + latetime)
        print("")
        print("准备开始爬取，共有%d个用户..." % len(self.__doufu_list))
        print()
        time.sleep(3)
        for uid in self.__doufu_list:
            #print(uid)
            global bakuid
            bakuid = uid
            #print(bakuid)
            self.__date_cache = ""
            self.__date_count = 0
            if "kuaishou" in uid: #kuaishouapp.com
                if "/profile" in uid:
                    recovery3 = uid
                    recovery_list = recovery3.split("/")
                    uid1 = recovery_list[4]
                    print("Short chain:" + uid + "丨Uid:" + uid1)
                    time.sleep(3)
                    self.__doufu_user(uid1, latetime)
                else :
                    r = requests.get(uid, headers=self.__headers_Requests_web)
                    # print(r.history)
                    reditList = r.history  # 可以看出获取的是一个地址序列
                    # print(f'获取重定向的历史记录：{reditList}')
                    # print(f'获取第一次重定向的headers头部信息：{reditList[0].headers}')
                    # print(f'获取重定向最终的url：{reditList[len(reditList) - 1].headers["location"]}')
                    recovery1 = reditList
                    recovery2 = reditList[0].headers
                    recovery3 = reditList[len(reditList) - 1].headers["location"]
                    recovery_list = recovery3.split("/")
                    uid1 = recovery_list[4]
                    print("Short chain:" + uid + "丨Uid:" + uid1)
                    # print(recovery_list[4])
                    # print(recovery3)
                    self.__doufu_user(uid1, latetime)
            else:
                print("Uid:" + uid)
                self.__doufu_user(uid, latetime)




    def add_to_list(self, uid):
        self.__doufu_list.append(uid)

    def __doufu_user(self, uid , latetime):
        if uid.isdigit():
            uid = self.__switch_id(uid)

        payload = {"operationName": "privateFeedsQuery",
                   "variables": {"principalId": uid, "pcursor": "", "count": 99999},
                   "query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n  privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n    pcursor\n    list {\n      id\n      thumbnailUrl\n      poster\n      workType\n      type\n      useVideoPlayer\n      imgUrls\n      imgSizes\n      magicFace\n      musicName\n      caption\n      location\n      liked\n      onlyFollowerCanComment\n      relativeHeight\n      timestamp\n      width\n      height\n      counts {\n        displayView\n        displayLike\n        displayComment\n        __typename\n      }\n      user {\n        id\n        eid\n        name\n        avatar\n        __typename\n      }\n      expTag\n      __typename\n    }\n    __typename\n  }\n}\n"}
        # payload = {"operationName": "privateFeedsQuery",
        #            "variables":{"principalId":uid,"pcursor":"","count":99999},
        #            "query": "query privateFeedsQuery($principalId: String, $pcursor: String, $count: Int) {\n  privateFeeds(principalId: $principalId, pcursor: $pcursor, count: $count) {\n    pcursor\n    list {\n      id\n      thumbnailUrl\n      poster\n      workType\n      type\n      useVideoPlayer\n      imgUrls\n      imgSizes\n      magicFace\n      musicName\n      caption\n      location\n      liked\n      onlyFollowerCanComment\n      relativeHeight\n      timestamp\n      width\n      height\n      counts {\n        displayView\n        displayLike\n        displayComment\n        __typename\n      }\n      user {\n        id\n        eid\n        name\n        avatar\n        __typename\n      }\n      expTag\n      __typename\n    }\n    __typename\n  }\n}\n"}
        res = requests.post(DATA_URL, headers=self.__headers_web, json=payload)
        #print(uid)
        #print(res.text)
        works = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['data']['privateFeeds']['list']

        if works != []:
            if not os.path.exists("../快手data"):
                os.makedirs("../快手data")

            # 这两行代码将response写入json供分析
            # with open("data/" + uid + ".json", "w") as fp:
            #     fp.write(json.dumps(works, indent=2))

            # 防止该用户在直播，第一个作品默认为直播，导致获取信息为NoneType
            #print(works)
            if works[0]['id'] is None:
                works.pop(0)
            name = re.sub(r'[\\/:*?"<>|\r\n]+', "", works[0]['user']['name'])
            dir = "快手data/" + name + "(" + uid + ")/"
            # print(len(works))
            if not os.path.exists(dir):
                os.makedirs(dir)

            # if not os.path.exists(dir + ".list"):
            #print("")

            print("开始爬取用户 " + name + "，保存在目录 " + dir)
            print(" 共有" + str(len(works)) + "个作品")
            print("")
            time.sleep(1)

            if not os.path.exists(dir + "图集/"):
                photos = 0
            else:
                photos = len([lists for lists in os.listdir(dir + "图集/") if os.path.isdir(os.path.join(dir + "图集/", lists))])

            if not os.path.exists(dir + "视频/"):
                videos = 0
            else:
                videos = len([lists for lists in os.listdir(dir + "视频/") if os.path.isfile(os.path.join(dir + "视频/", lists))])

            allnumber = photos + videos
            print("丨当前电脑共有" + "丨"+ name + "丨视频:" + str(videos) + "个丨图集:" + str(photos) + "个丨总共作品数:" + str(allnumber) + "丨")
            print("")
            time.sleep(1)

            # print(allnumber)
            # print(len(works))

            if  allnumber == len(works):
                print("用户 " + name + "视频没有更新!")
                print("")
                time.sleep(1)

            if allnumber != len(works):
                with open("list_num.txt", "r+") as l_num:
                    lnm_text = l_num.read()
                    # print(lnm_text)
                    lum_list = lnm_text.split("丨")
                    lum_uid = lum_list[1]
                    # print(lum_uid)
                    lum_num = int(lum_list[3])
                    # print(lum_num)

                if lum_uid == bakuid:
                    print("上次记录:" + "Uid or Short chain丨" + bakuid + "丨第丨" + str(lum_num + 1) + "丨个视频")
                    allnumber = 0
                    print("")
                else:
                    print("新的开始:" + "Uid or Short chain丨" + bakuid + "丨第丨0丨个视频")
                    with open("list_num.txt", "w") as l_num_w:
                        l_num_w.write("Uid or Short chain丨" + bakuid + "丨第丨0丨个视频")
                    lum_num = 0
                    allnumber = 0
                    print("")

                for j in range(lum_num, len(works)):
                    for line in fileinput.input("list_num.txt", inplace=1):
                        if not fileinput.isfirstline():
                            print(fileinput.replace("\n", ""))

                    if not os.path.exists(dir + "图集/"):
                        photos = 0
                    else:
                        photos = len([lists for lists in os.listdir(dir + "图集/") if os.path.isdir(os.path.join(dir + "图集/", lists))])

                    if not os.path.exists(dir + "视频/"):
                        videos = 0
                    else:
                        videos = len([lists for lists in os.listdir(dir + "视频/") if os.path.isfile(os.path.join(dir + "视频/", lists))])

                    allnumber = photos + videos
                    #" + "丨"+ name + "
                    #print("丨用户:" + name)
                    # print("丨下载信息如下:丨")
                    # print("丨总下载数:" + str(len(works)) + "个丨" + "当前下载数:" + str(j) + "个丨" + "等待下载数:" + str(len(works) - j) + "个丨")
                    # print("丨当前电脑共有作品数:" + str(allnumber) + "丨视频:" + str(videos) + "个丨图集:" + str(photos) + "个丨")
                    # print("")

                    if allnumber == len(works):
                        break
                    else:
                        self.__doufu_work(uid, dir, works[j], j + 1)
                        print("丨下载信息如下:丨")
                        print("丨总下载数:" + str(len(works)) + "个丨" + "当前下载数:" + str(j + 1) + "个丨" + "等待下载数:" + str(len(works) - j -1) + "个丨")
                        print("丨当前电脑共有作品数:" + str(allnumber) + "丨视频:" + str(videos) + "个丨图集:" + str(photos) + "个丨")
                        print("")
                    # print(j)
                    # print(self.__headers_mobile['Cookie'])
                    time.sleep(int(latetime))
                print("")

                if not os.path.exists(dir + "图集/"):
                    photos = 0
                else:
                    photos = len(
                        [lists for lists in os.listdir(dir + "图集/") if os.path.isdir(os.path.join(dir + "图集/", lists))])

                if not os.path.exists(dir + "视频/"):
                    videos = 0
                else:
                    videos = len([lists for lists in os.listdir(dir + "视频/") if
                                  os.path.isfile(os.path.join(dir + "视频/", lists))])

                allnumber = photos + videos
                print("丨当前电脑共有" + "丨" + name + "丨视频:" + str(videos) + "个丨图集:" + str(photos) + "个丨总共作品数:" + str(allnumber) + "丨")
                print("")
                print("用户 " + name + "视频爬取完成!")





            print("")
            time.sleep(1)
            print("等待3秒执行下一个")
            for line in fileinput.input("list_num.txt", inplace=1):
                if not fileinput.isfirstline():
                    print(fileinput.replace("\n", ""))
            time.sleep(1)
            print("等待2秒执行下一个")
            time.sleep(1)
            s_w = open("list_num.txt", "w")
            s_w.write("Uid or Short chain丨0丨第丨0丨个视频")
            s_w.close()
            time.sleep(1)
            print("等待1秒执行下一个")
            with open("uid_log.txt", "r+") as f:
                #print(bakuid)
                p = re.compile(bakuid)
                lines = [line for line in f.readlines() if p.search(line) is None]
                fd = open("uid_log.txt", "w")
                fd.seek(0)
                fd.truncate(0)
                fd.writelines(lines)
                fd.close()
            time.sleep(1)
            print("")




        # lines = [l for l in open("uid.txt", "r") if l.find(uid, 0, 8) != 0]
        # fd = open("uid_log.txt", "w")
        # fd.writelines(lines)
        # fd.close()

        #print(path_list)



        # with open("a.txt", "r+") as f:
        #     p = re.compile("gmail|aol|yahoo")
        #     lines = [line for line in f.readlines() if p.search(line) is None]
        #     f.seek(0)
        #     f.truncate(0)
        #     f.writelines(lines)

        # list = []
        # matchPattern = re.compile(uid)
        # f_path = r'uid.txt'
        # with open(f_path)as f:
        #     while 1:
        #         line = f.readline()
        #         if not line:
        #             break
        #         elif matchPattern.search(line):
        #             pass
        #         else:
        #             list.append(line)
        #     f.close()
        #     f = open('uid_log.txt', 'w')
        #     for i in list:
        #         f.write(i)
        #     f.close()



        '''
        快手分为五种类型的作品，在作品里面表现为workType属性
         * 其中两种图集: `vertical`和`multiple`，意味着拼接长图和多图，所有图片的链接在imgUrls里
         * 一种单张图片: `single` 图片链接也在imgUrls里
         * K歌: `ksong` 图片链接一样，不考虑爬取音频...
         * 视频: `video` 需要解析html获得视频链接
        '''




    def __doufu_work(self, uid, dir, work, wdx):
        s_w = open("list_num.txt", "w")
        s_w.write("Uid or Short chain丨" + bakuid + "丨第丨"+ str(int(wdx-1)) + "丨个视频")
        s_w.close()
        w_type = work['workType']
        w_caption = re.sub(r"\s+", " ", work['caption'])
        w_name = re.sub(r'[\\/:*?"<>|\r\n]+', "", w_caption)[0:24]
        w_time = time.strftime('%Y-%m-%d', time.localtime(work['timestamp'] / 1000))
        w_index = ""
        if self.__date_cache == w_time:
            self.__date_count = self.__date_count + 1
            if self.__date_count > 0:
                w_index = "(%d)" % self.__date_count
        else:
            self.__date_cache = w_time
            self.__date_count = 0

        if w_type == 'vertical' or w_type == 'multiple' or w_type == "single" or w_type == 'ksong':
            w_urls = work['imgUrls']
            l = len(w_urls)
            print("  " + str(wdx) + ")图集作品：" + w_caption + "，" + "共有" + str(l) + "张图片")
            for i in range(l):
                p_name = w_time + w_index + "_" + w_name + "_" + str(i + 1)+'.jpg'
                w_name2 = re.sub(r'[.*?"<>|\r\n]+', "", w_name)
                dir2 = dir + "图集/"+ w_time + w_index + "_" + w_name2 + "/"
                if not os.path.exists(dir2):
                    os.makedirs(dir2)
                pic = dir2 + p_name
                if not os.path.exists(pic):
                    r = requests.get(w_urls[i].replace("webp","jpg"))
                    r.raise_for_status()
                    with open(pic, "wb") as f:
                        f.write(r.content)
                    print("    " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 下载成功 √")
                else:
                    print("    " + str(i + 1) + "/" + str(l) + " 图片 " + p_name + " 已存在 √")
        elif w_type == 'video':
            v_name = w_time + w_index + "_" + w_name + ".mp4"
            dir2 = dir + "视频/"
            video = dir2 + v_name
            if  os.path.exists(video):
                print("    视频 " + v_name + " 已存在 √")
            else:
                w_url = WORK_URL + work['id']
                res = requests.get(w_url, headers=self.__headers_mobile, timeout=30,
                                   params={"fid": int(userid_cook), "cc": "share_copylink",
                                           "shareId": "143108986354"})
                #print(userid_cook)
                # print(proxies)
                requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
                #print(res.text)

                # payload1 = {"operationName": "SharePageQuery",
                #            "variables": {"photoId": work['id'], "principalId": uid},
                #            "query": "query SharePageQuery($principalId: String, $photoId: String) {\n  feedById(principalId: $principalId, photoId: $photoId) {\n    currentWork {\n      playUrl\n      __typename\n    }\n    __typename\n  }\n}\n"}
                # res1 = requests.post(DATA_URL, headers=self.__headers_web, json=payload1)
                # html1 = json.loads(res1.content.decode(encoding='utf-8', errors='strict'))['data']['feedById']['currentWork']
                # v_url = html1['playUrl']
                #print(v_url)

                html = res.text
                waitreplace = work['id'] + '&#34.*?&#34;srcNoMark&#34;:&#34;(.*?)&#34;'

                #print(html)
                #print(html.find('快手验证码'))
                while html.find('操作太快了，请稍微休息一下') != -1:
                    print("提示:操作快了!!!")
                    time.sleep(3)
                    rs1 = requests.get(
                        "http://www.xiladaili.com/api/?uuid="+ xiladaili +"&num=100&place=中国&category=1&protocol=2&sortby=2&repeat=1&format=2&position=1")
                    str1 = rs1.text
                    while str1.find('调用频率过快') != -1:
                        time.sleep(3)
                        rs1 = requests.get(
                            "http://www.xiladaili.com/api/?uuid="+ xiladaili +"&num=100&place=中国&category=1&protocol=2&sortby=2&repeat=1&format=2&position=1")
                        str1 = rs1.text

                    proxy = str1.split()
                    print(proxy)
                    for i in range(len(proxy)):
                        print("当前代理地址:" + proxy[i])
                        proxies = {
                            'https': 'https://' + proxy[i],
                        }
                        try:
                            response = requests.get('http://httpbin.org/get', proxies=proxies)
                            print("成功")  # + response.text
                            w_url = WORK_URL + work['id']
                            res = requests.get(w_url, headers=self.__headers_mobile,proxies=proxies, timeout=30,
                                               params={"fid": int(userid_cook), "cc": "share_copylink",
                                                       "shareId": "143108986354"})
                            #print(userid_cook)
                            #print(proxies)
                            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
                            #res = requests.get(w_url, headers=self.__headers_mobile, proxies=proxies, timeout=30)
                            html = res.text
                            if html.find('操作太快了，请稍微休息一下') == -1:
                                break
                        except requests.exceptions.ConnectionError as e:
                            print('失败丨', e.args)
                    time.sleep(10)

                    #print(res.text)

                    # payload1 = {"operationName": "SharePageQuery",
                    #            "variables": {"photoId": work['id'], "principalId": uid},
                    #            "query": "query SharePageQuery($principalId: String, $photoId: String) {\n  feedById(principalId: $principalId, photoId: $photoId) {\n    currentWork {\n      playUrl\n      __typename\n    }\n    __typename\n  }\n}\n"}
                    # res1 = requests.post(DATA_URL, headers=self.__headers_web, json=payload1)
                    # html1 = json.loads(res1.content.decode(encoding='utf-8', errors='strict'))['data']['feedById']['currentWork']
                    # v_url = html1['playUrl']
                    # print(v_url)

                    html = res.text
                    waitreplace = work['id'] + '&#34.*?&#34;srcNoMark&#34;:&#34;(.*?)&#34;'
                    #print(html.find('快手验证码'))
                    if html.find('操作太快了，请稍微休息一下') != -1:
                        print("服务器返回操作太快，可能触发反爬机制------程序自动重新运行!!!")
                        print(self.restart_program())


                while html.find('快手验证码') != -1:
                    print("提示:出现验证码!!!")
                    time.sleep(3)
                    rs1 = requests.get(
                        "http://www.xiladaili.com/api/?uuid="+ xiladaili +"&num=100&place=中国&category=1&protocol=2&sortby=2&repeat=1&format=2&position=1")
                    str1 = rs1.text
                    while  str1.find('调用频率过快') != -1:
                        time .sleep(3)
                        rs1 = requests.get(
                            "http://www.xiladaili.com/api/?uuid="+ xiladaili +"&num=100&place=中国&category=1&protocol=2&sortby=2&repeat=1&format=2&position=1")
                        str1 = rs1.text

                    proxy = str1.split()
                    print(proxy)
                    for i in range(len(proxy)):
                        print("当前代理地址:" + proxy[i])
                        proxies = {
                            'https': 'https://' + proxy[i],
                        }
                        try:
                            response = requests.get('http://httpbin.org/get', proxies=proxies)
                            print("成功")# + response.text
                            w_url = WORK_URL + work['id']
                            res = requests.get(w_url, headers=self.__headers_mobile, proxies=proxies, timeout=30,
                                               params={"fid": int(userid_cook), "cc": "share_copylink",
                                                       "shareId": "143108986354"})
                            # print(proxies)
                            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
                            # res = requests.get(w_url, headers=self.__headers_mobile, proxies=proxies, timeout=30)
                            html = res.text
                            if html.find('快手验证码') == -1:
                                break
                        except requests.exceptions.ConnectionError as e:
                            print('失败丨', e.args)
                    time.sleep(10)


                    # payload1 = {"operationName": "SharePageQuery",
                    #            "variables": {"photoId": work['id'], "principalId": uid},
                    #            "query": "query SharePageQuery($principalId: String, $photoId: String) {\n  feedById(principalId: $principalId, photoId: $photoId) {\n    currentWork {\n      playUrl\n      __typename\n    }\n    __typename\n  }\n}\n"}
                    # res1 = requests.post(DATA_URL, headers=self.__headers_web, json=payload1)
                    # html1 = json.loads(res1.content.decode(encoding='utf-8', errors='strict'))['data']['feedById']['currentWork']
                    # v_url = html1['playUrl']
                    #print(v_url)

                    html = res.text
                    waitreplace = work['id'] + '&#34.*?&#34;srcNoMark&#34;:&#34;(.*?)&#34;'
                    #print(html.find('快手验证码'))
                    if html.find('快手验证码') != -1:
                        print("服务器返回操作太快，可能触发反爬机制------程序自动重新运行!!!")
                        print(self.restart_program())


                v_url = re.findall(waitreplace, html)
                #print(html)
                # pattern = re.compile(r"playUrl", re.MULTILINE | re.DOTALL)
                # script = soup.find("script", text=pattern)
                # s = pattern.search(script.text).string
                # v_url = s.split('playUrl":"')[1].split('.mp4')[0].encode('utf-8').decode('unicode-escape') + '.mp4'
                try:
                    print("  " + str(wdx) + ")视频作品：" + "丨id" + work['id'] + "丨" + w_caption)
                except:
                    print("  这里似乎有点小错误，已跳过")
                v_name = w_time + w_index + "_" + w_name + ".mp4"
                dir2 = dir + "视频/"
                if not os.path.exists(dir2):
                    os.makedirs(dir2)
                video = dir2 + v_name

                if v_url:
                    if not os.path.exists(video):
                        # r = requests.get(v_url)
                        r = requests.get(v_url[0])
                        r.raise_for_status()

                        with open(video, "wb") as f:
                            f.write(r.content)
                        print("    视频 " + v_name + " 下载成功 √")
                    else:
                        print("    视频 " + v_name + " 已存在 √")
                else:
                    print("未找到视频")
                    print(self.restart_program())
        else:
            print("错误的类型")
            print(self.restart_program())





    def __read_preset(self):
        p_path = "uid.txt"
        if not os.path.exists(p_path):
            print("创建预设文件 uid ...")
            open(p_path, "w")
        if not os.path.getsize(p_path):
            print("请在预设文件 uid 中记录需要爬取的用户id，一行一个")
            exit(0)
        if not os.path.exists("uid_log.txt"):
            p_path = "uid.txt"
            r_w = open("uid.txt", "r+")
            s_w = open("uid_log.txt", "w")
            s_w.write(r_w.read())
        else:
            p_path = "uid_log.txt"
            with open("uid_log.txt", "r+") as q:
                if q.read() == "":
                    r_w = open("uid.txt", "r+")
                    q.write(r_w.read())
        if not os.path.exists("list_num.txt"):
            l_n = open("list_num.txt", "w")
            l_n.write("Uid or Short chain丨0丨第丨0丨个视频")
            l_n.close()
        else:
            with open("list_num.txt", "r+") as qq:
                if qq.read() == "":
                    qq.write("Uid or Short chain丨0丨第丨0丨个视频")
        #print(p_path)
        with open("xiladaili.txt", "r+") as xila:
            global xiladaili
            xiladaili = xila.read()
        with open(p_path, "r") as f:
            for line in f:
                if line[0] != "#":
                    self.__doufu_list.append(line.strip())

    def __switch_id(self, uid):
        payload = {"operationName": "SearchOverviewQuery",
                   "variables": {"keyword": uid, "ussid": None},
                   "query": "query SearchOverviewQuery($keyword: String, $ussid: String) {\n  pcSearchOverview(keyword: $keyword, ussid: $ussid) {\n    list {\n      ... on SearchCategoryList {\n        type\n        list {\n          categoryId\n          categoryAbbr\n          title\n          src\n          __typename\n        }\n        __typename\n      }\n      ... on SearchUserList {\n        type\n        ussid\n        list {\n          id\n          name\n          living\n          avatar\n          sex\n          description\n          counts {\n            fan\n            follow\n            photo\n            __typename\n          }\n          __typename\n        }\n        __typename\n      }\n      ... on SearchLivestreamList {\n        type\n        lssid\n        list {\n          user {\n            id\n            avatar\n            name\n            __typename\n          }\n          poster\n          coverUrl\n          caption\n          id\n          playUrls {\n            quality\n            url\n            __typename\n          }\n          quality\n          gameInfo {\n            category\n            name\n            pubgSurvival\n            type\n            kingHero\n            __typename\n          }\n          hasRedPack\n          liveGuess\n          expTag\n          __typename\n        }\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}\n"}

        res = requests.post(DATA_URL, headers=self.__headers_web, json=payload)
        dt = json.loads(res.content.decode(encoding='utf-8', errors='strict'))['data']
        print(res.text)
        # with open("data/jj_" + uid + ".json", "w") as fp:
        #     fp.write(json.dumps(dt, indent=2))

        return dt['pcSearchOverview']['list'][1]['list'][0]['id']

    def __openchrome(self):
        driver = webdriver.Chrome(executable_path="Google Chrome/chromedriver.exe")
        driver.get("https://live.kuaishou.com/v/hot/")
        name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        name_title = name.get_attribute('title')
        # cookie= driver.get_cookies()
        while name_title == "":
            print("你还没有登录,请登录!!!")
            time.sleep(10)
            name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
            name_title = name.get_attribute('title')
        print("你已经登录,登录名:" + name_title)
        cookie = driver.get_cookies()
        driver.get("https://live.kuaishou.com/u/G338217245/3xjz7rnjnh6hjts")
        cookie = [item['value'] for item in driver.get_cookies()]
        cookie1 = [item['name'] for item in driver.get_cookies()]
        # cookie = [item['name'] + '=' + item['value'] for item in driver.get_cookies()]
        #print(cookie)
        #print(cookie1)
        param_did = cookie[4]
        #param_did1 = cookie[1]
        #param_did2 = cookie[2]
        #param_did3 = cookie[7]
        param_did2 = cookie[1]
        param_did3 = cookie[0]
        param_did4 = cookie[7]
        param_did5 = cookie[2]
        global userid_cook
        userid_cook = cookie[2]
        # param_kuaishou.live.web_st = cookie[1]
        driver.get("https://live.kuaishou.com/v/hot/")
        # param_did = "123"
        #print(driver.title)  # 在编辑器的终端可以看到网站的标题打印出来
        #driver.quit()  # 关闭Chrome浏览器，如果不写这句话浏览器就会停留在百度首页在后台运行不会关闭浏览器
        #driverOptions = webdriver.ChromeOptions()
        #driverOptions.add_experimental_option("detach", True)
        self.set_did(param_did, param_did2, param_did3, param_did4, param_did5)

    def restart_program(self):#重启程序
        python = sys.executable
        os.execl(python, python, *sys.argv)

    def __intro(self):
        print()
        print("|  %s (v%s %s)" % (INFO["name"], INFO["version"], INFO["publishDate"]))
        print("|  本程序由%s提供, %s, 喜欢的话可以给个star >_<" % (INFO["author"], INFO["repository"]))
        print()
        self.__openchrome()
        # driver = webdriver.Chrome()
        # driver.get("https://live.kuaishou.com/v/hot/")
        # name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        # name_title = name.get_attribute('title')
        # # cookie= driver.get_cookies()
        # while name_title == "":
        #     print("你还没有登录,请登录!!!")
        #     time.sleep(10)
        #     name = driver.find_element_by_xpath('//*[@id="app"]/div[1]/div[1]/header/div/div[2]/div[3]/a/span')
        #     name_title = name.get_attribute('title')
        # # 打印cookie信息
        # print("你已经登录,登录名:" + name_title)
        # cookie = driver.get_cookies()
        # driver.get("https://live.kuaishou.com/u/G338217245/3xjz7rnjnh6hjts")
        # cookie = [item['value'] for item in driver.get_cookies()]
        # cookie1 = [item['name'] for item in driver.get_cookies()]
        # # cookie = [item['name'] + '=' + item['value'] for item in driver.get_cookies()]
        # print(cookie)
        # print(cookie1)
        # param_did = cookie[4]
        # #param_did1 = cookie[1]
        # #param_did2 = cookie[2]
        # #param_did3 = cookie[7]
        # param_did2 = cookie[1]
        # param_did3 = cookie[0]
        # param_did4 = cookie[7]
        # param_did5 = cookie[2]
        # # param_kuaishou.live.web_st = cookie[1]
        # driver.get("https://live.kuaishou.com/v/hot/")
        # # param_did = "123"
        # #print(driver.title)  # 在编辑器的终端可以看到网站的标题打印出来
        # #driver.quit()  # 关闭Chrome浏览器，如果不写这句话浏览器就会停留在百度首页在后台运行不会关闭浏览器
        # self.set_did(param_did, param_did2, param_did3, param_did4, param_did5)