from bs4 import BeautifulSoup
import requests
import json
import jsonpath
import execjs
import hashlib
import time
import threading
from dbSql import ZHIHUDB

# （1）基本属性信息：用户名、性别、
#           一句话介绍、居住地、
#           所在行业、职业经历、个人简介
def get_base_info(name, cookie):
    #url地址
    url = 'https://www.zhihu.com/people/{}'.format(name)

    headers = {
        'cookie': cookie,
        'referer': 'https://www.zhihu.com/topic/19550228/hot',
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
    }

    #获取页面源码
    response = requests.get(url=url, headers=headers)
    content = response.text

    # 爬取用户相关信息，是html中的json部分
    soup = BeautifulSoup(content, 'lxml')
    usrInfo = soup.select('script[id="js-initialData"]')[0].text
    with open('./info_file/user.json', 'w', encoding='utf-8') as fp:
        fp.write(usrInfo)

    #使用jsonpath进行分析
    infoJson = json.load(open('./info_file/user.json', 'r', encoding='utf-8'))

    #获取用户名
    userName = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..name')[0]

    #获取性别
    sex = ['','female', 'male']
    userSex = sex[jsonpath.jsonpath(infoJson, '$.initialState.entities.users..gender')[0]+1]

    #获取一句话介绍
    userOneIntro = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..headline')[0]

    #获取居住地
    userLive = ''
    live_list = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..locations..name')
    if live_list != False:
        userLive = live_list[0]

    #获取所在行业
    userIndus = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..business.name')[0]

    #获取职业经历
    userOccup = ''
    if jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments') != [[]]:
        if jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..company.name') != False and jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..job.name') != False:
            userOccup = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..company.name')[0] + '-' + jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..job.name')[0]
        elif jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..company.name') != False:
            userOccup = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..company.name')[0] + '-'
        elif jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..job.name') != False:
            userOccup = ' ' + '-' + jsonpath.jsonpath(infoJson, '$.initialState.entities.users..employments..job.name')[0]

    #获取个人简历
    userIntro = jsonpath.jsonpath(infoJson, '$.initialState.entities.users..description')[0]
    # print('（1）基本属性信息：')
    # print('用户名：' + userName)
    # print('性别：' + userSex)
    # print('一句话介绍：' + userOneIntro)
    # print('居住地：' + userLive)
    # print('所在行业：' + userIndus)
    # print('职业经历：' + userOccup)
    # print('个人简历：' + userIntro)
    return [{'From_name':name, 'Name':userName, 'Sex':userSex, 'One_intro':userOneIntro, 'Live_place':userLive, 'Indus':userIndus, 'Occup':userOccup, 'Full_intro':userIntro}]

# （2）社交关系信息：所有关注人和粉丝（如果关注人数量或者粉丝数量超过10，则只采集前10个），
#           每个人的信息包括
#           用户昵称、链接地址、回答问题数、文章数、关注者人数。
#  2.1 抓取关注人信息
def get_following_info(name, cookie):
    #url地址
    urlFollowing = 'https://www.zhihu.com/people/{}/following'.format(name)

    headers = {
        'cookie': cookie,
        'referer': 'https://www.zhihu.com/topic/19550228/hot',
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
    }

    #获取页面源码
    response = requests.get(url=urlFollowing, headers=headers)
    content = response.text

    # 爬取用户相关信息，是html中的json部分
    soup = BeautifulSoup(content, 'lxml')
    usrInfo = soup.select('script[id="js-initialData"]')[0].text
    with open('./info_file/following.json', 'w', encoding='utf-8') as fp:
        fp.write(usrInfo)

    #使用jsonpath进行分析
    infoJson = json.load(open('./info_file/following.json', 'r', encoding='utf-8'))

    #爬取关注者信息
    followingLsit = jsonpath.jsonpath(infoJson, '$.initialState.entities.users.*')
    if followingLsit != False:
        followingNum = len(followingLsit)
        if len(followingLsit) >= 12:
            followingNum = 12
    else:
        followingNum = 0

    #爬取每个人的用户昵称、链接地址、回答问题数、文章数、关注者人数
    #前两个都是用户个人的信息
    result = list()
    for i in range(2, followingNum):
        following_info = dict()
        following_info['From_name'] = name
        following_info['Name'] = followingLsit[i]['name']
        # url = followingLsit[i]['url'] 这个url是访问不到的
        following_info['Url'] = followingLsit[i]['url'].split('api')[0] + followingLsit[i]['url'].split('v4/')[1]
        following_info['Question_num'] = followingLsit[i]['answerCount']
        following_info['Articles_num'] = followingLsit[i]['articlesCount']
        following_info['Follower_num'] = followingLsit[i]['followerCount']
        result.append(following_info)
    return result

#  2.2 抓取粉丝信息
#       难点：此处用了x-zse-96的字段作为随机加密字段，难以破解
#       解决方案：
#           （1）逆向获取x-zse-96的加密方式，得到加密字段 参考：https://blog.csdn.net/weixin_40352715/article/details/123381876
#           （2）采用selenium方式，xpath解析获得（缺点：效率不高、打开浏览器过程慢） 参考：https://blog.csdn.net/qq_45722494/article/details/120079838
def get_followers_info(name, cookie):
    #url地址（便于作为传入参数的值）
    urlFollowers = '/api/v4/members/{}/followers?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics&offset=0&limit=20'.format(name)

    #构造x-zse-96字段
    f = "+".join(["101_3_2.0", urlFollowers, '"APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"'])
    fmd5 = hashlib.new('md5', f.encode()).hexdigest()
    with open('g_encrypt.js', 'r') as f:
        ctx1 = execjs.compile(f.read(), cwd=r'D:\nodejs\node_modules')
    encrypt_str = ctx1.call('b', fmd5)

    headers = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
        "cookie": cookie,
        "x-api-version": "3.0.91",
        "x-zse-93": "101_3_2.0",
        "x-zse-96": "2.0_%s" % encrypt_str
    }

    # 利用api获取用户数据json文件
    urlFollowers = 'https://www.zhihu.com' + urlFollowers
    response = requests.get(url=urlFollowers, headers=headers)
    content = response.text

    # 保存用户粉丝相关信息
    with open('./info_file/followers.json', 'w', encoding='utf-8') as fp:
        fp.write(content)

    #使用jsonpath进行分析
    infoJson = json.load(open('./info_file/followers.json', 'r', encoding='utf-8'))

    #爬取关注者信息
    followerLsit = jsonpath.jsonpath(infoJson, '$.data.*')
    if followerLsit != False:
        followerNum = len(followerLsit)
        if len(followerLsit) >= 12:
            followerNum = 10
    else:
        followerNum = 0

    #爬取每个人的用户昵称、链接地址、回答问题数、文章数、关注者人数
    result = list()
    for i in range(0, followerNum):
        follower_info = dict()
        follower_info['From_name'] = name
        follower_info['Name'] = followerLsit[i]['name']
        # url = followingLsit[i]['url'] 这个url是访问不到的
        follower_info['Url'] = followerLsit[i]['url'].split('api')[0] + followerLsit[i]['url'].split('v4/')[1]
        #这三个数据的标签名称同关注人信息中的不一样
        follower_info['Question_num'] = followerLsit[i]['answer_count']
        follower_info['Articles_num'] = followerLsit[i]['articles_count']
        follower_info['Follower_num'] = followerLsit[i]['follower_count']
        result.append(follower_info)
    return result

#  （3）动态信息：所有回答和提问（如果回答和提问的总量超过10，则只采集前10条），
#               每个回答或评论的信息包括
#                   发帖时间、发帖内容、
#                   评论次数、点赞次数、
#                   前10条评论（评论人ID、评论人昵称、评论时间、评论内容、点赞次数）。

# 时间戳转时间
def timeStampToDatetime(timeStamp):
    timeArray = time.localtime(timeStamp)
    datetime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
    return datetime

#获取贴文下的评论信息(贴文来源人（便于数据库存储逻辑性实现）)
def get_question_comment_info(from_name, questionId, cookie):
    #url地址(limit=10, offset=0)
    baseUrl = '/api/v4/questions/{}/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Creaction_instruction%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cvip_info%2Cbadge%5B%2A%5D.topics%3Bdata%5B%2A%5D.settings.table_of_content.enabled&limit=10&offset=0&platform=desktop&sort_by=default'.format(questionId)

    #构造x-zse-96字段
    f = "+".join(["101_3_2.0", baseUrl, '"APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"'])
    fmd5 = hashlib.new('md5', f.encode()).hexdigest()
    with open('g_encrypt.js', 'r') as f:
        ctx1 = execjs.compile(f.read(), cwd=r'D:\nodejs\node_modules')
    encrypt_str = ctx1.call('b', fmd5)

    headers = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
        "cookie": cookie,
        "x-api-version": "3.0.91",
        "x-zse-93": "101_3_2.0",
        "x-zse-96": "2.0_%s" % encrypt_str
    }

    # 利用api获取回答数据json文件
    baseUrl = 'https://www.zhihu.com' + baseUrl
    response = requests.get(url=baseUrl, headers=headers)
    content = response.text

    # 保存回答相关信息
    with open('./info_file/questionBaseInfo.json', 'w', encoding='utf-8') as fp:
        fp.write(content)

    # 利用jsonpath对json文件进行分析
    infoJson = json.load(open('./info_file/questionBaseInfo.json', 'r', encoding='utf-8'))

    # 爬取回答信息
    answerLsit = jsonpath.jsonpath(infoJson, '$.data.*')
    if answerLsit != False:
        answerNum = len(answerLsit)
        if len(answerLsit) >= 12:
            answerNum = 10
    else:
        answerNum = 0

    # 抓取 评论人ID、评论人昵称、评论时间、评论内容、点赞次数
    result = list()
    for i in range(0,answerNum):
        post_info = dict()
        post_info['From_name'] = from_name
        post_info['User_id'] = answerLsit[i]['author']['id']
        post_info['Title'] = answerLsit[i]['question']['title']
        post_info['Name'] = answerLsit[i]['author']['name']
        post_info['Comment_time'] = timeStampToDatetime(answerLsit[i]['updated_time'])
        post_info['Comment'] = answerLsit[i]['content'][0:2056]
        post_info['Vote_num'] = answerLsit[i]['voteup_count']
        post_info['Question_id'] = questionId
        result.append(post_info)
    # print(result)
    return result

# 获取回答信息
def get_answer_info(name, cookie):
    #url地址（便于作为传入参数的值）
    urlAnswer = '/api/v4/members/{}/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Cmark_infos%2Ccreated_time%2Cupdated_time%2Creview_info%2Cexcerpt%2Cis_labeled%2Clabel_info%2Crelationship.is_authorized%2Cvoting%2Cis_author%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.vessay_info%3Bdata%5B*%5D.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B*%5D.author.vip_info%3Bdata%5B*%5D.question.has_publishing_draft%2Crelationship&offset=0&limit=20&sort_by=created'.format(name)

    #构造x-zse-96字段
    f = "+".join(["101_3_2.0", urlAnswer, '"APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"'])
    fmd5 = hashlib.new('md5', f.encode()).hexdigest()
    with open('g_encrypt.js', 'r') as f:
        ctx1 = execjs.compile(f.read(), cwd=r'D:\nodejs\node_modules')
    encrypt_str = ctx1.call('b', fmd5)

    headers = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
        "cookie": cookie,
        "x-api-version": "3.0.91",
        "x-zse-93": "101_3_2.0",
        "x-zse-96": "2.0_%s" % encrypt_str
    }

    # 利用api获取回答数据json文件
    urlAnswer = 'https://www.zhihu.com' + urlAnswer
    response = requests.get(url=urlAnswer, headers=headers)
    content = response.text

    # 保存回答相关信息
    with open('./info_file/answer.json', 'w', encoding='utf-8') as fp:
        fp.write(content)

    # 利用jsonpath对json文件进行分析
    infoJson = json.load(open('./info_file/answer.json', 'r', encoding='utf-8'))

    # 爬取回答信息
    answerLsit = jsonpath.jsonpath(infoJson, '$.data.*')
    if answerLsit != False:
        answerNum = len(answerLsit)
        if len(answerLsit) >= 12:
            answerNum = 10
    else:
        answerNum = 0

    # 抓取 回答的时间、发帖标题、发表的评论、回答的评论次数、回答的点赞次数信息
    result = list()
    post_result = list()
    for i in range(0,answerNum):
        answer_info = dict()
        answer_info['From_name'] = name
        answer_info['Time'] = timeStampToDatetime(answerLsit[i]['created_time'])
        answer_info['Title'] = answerLsit[i]['question']['title']
        answer_info['Comment'] = answerLsit[i]['content'][0:2056]
        answer_info['Comment_num'] = answerLsit[i]['comment_count']
        answer_info['Vote_num'] = answerLsit[i]['voteup_count']
        answer_info['Question_id'] = answerLsit[i]['question']['id']
        result.append(answer_info)

        # 抓取问题ID作为参数传入，获得前十条评论信息
        # print(title + '=======================================================================================')
        questionId = answerLsit[i]['question']['id']
        post_result = post_result + get_question_comment_info(name, questionId, cookie)
    return result, post_result

# 获取提问信息
def get_quiz_info(name, cookie):
    #url地址（便于作为传入参数的值）
    urlQuiz = '/api/v4/members/{}/questions?include=data%5B*%5D.created%2Canswer_count%2Cfollower_count%2Cauthor%2Cadmin_closed_comment&offset=0&limit=20'.format(name)

    #构造x-zse-96字段
    f = "+".join(["101_3_2.0", urlQuiz, '"APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"'])
    fmd5 = hashlib.new('md5', f.encode()).hexdigest()
    with open('g_encrypt.js', 'r') as f:
        ctx1 = execjs.compile(f.read(), cwd=r'D:\nodejs\node_modules')
    encrypt_str = ctx1.call('b', fmd5)

    headers = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
        "cookie": cookie,
        "x-api-version": "3.0.91",
        "x-zse-93": "101_3_2.0",
        "x-zse-96": "2.0_%s" % encrypt_str
    }

    # 利用api获取回答数据json文件
    urlQuiz = 'https://www.zhihu.com' + urlQuiz
    response = requests.get(url=urlQuiz, headers=headers)
    content = response.text

    # 保存提问相关信息
    with open('./info_file/quiz.json', 'w', encoding='utf-8') as fp:
        fp.write(content)

    # 利用jsonpath对json文件进行分析
    infoJson = json.load(open('./info_file/quiz.json', 'r', encoding='utf-8'))

    # 爬提问信息
    quizLsit = jsonpath.jsonpath(infoJson, '$.data.*')
    if quizLsit != False:
        quizNum = len(quizLsit)
        if len(quizLsit) >= 12:
            quizNum = 10
    else:
        quizNum = 0

    # 抓取 发帖的时间、发帖标题、评论个数、关注该话题的人数
    result = list()
    post_result = list()
    for i in range(0,quizNum):
        quiz_info = dict()
        quiz_info['From_name'] = name
        quiz_info['Time'] = timeStampToDatetime(quizLsit[i]['created'])
        quiz_info['Title'] = quizLsit[i]['title']
        quiz_info['Comment_num'] = quizLsit[i]['answer_count']
        quiz_info['Follow_num'] = quizLsit[i]['follower_count']
        quiz_info['Question_id'] = quizLsit[i]['id']
        result.append(quiz_info)

        # 抓取问题ID作为参数传入，获得前十条评论信息
        questionId = quizLsit[i]['id']
        get_question_comment_info(name, questionId, cookie)
        post_result = post_result + get_question_comment_info(name, questionId, cookie)
    return result, post_result

# 将数据存入数据库中
def to_database(name, cookie):
    # 创建数据库连接
    db = ZHIHUDB('localhost', 'xiongdb', 'root', '280513')
    db.connect()
    db.create_tables()

    # (1)
    personal_info = get_base_info(name,cookie)
    db.populate_personal_table(personal_info)

    # (2)
    following_info = get_following_info(name,cookie)
    if len(following_info) != 0:
        db.populate_following_table(following_info)
    follower_info = get_followers_info(name, cookie)
    if len(follower_info) != 0:
        db.populate_follower_table(follower_info)

    # (3)
    # 持续性获取动态信息
    while(1):
        answer_info, answer_post_info = get_answer_info(name, cookie)
        if len(answer_info) != 0:
            db.populate_answer_table(answer_info)
        if len(answer_post_info) != 0:
            db.populate_post_table(answer_post_info)

        quiz_info, quiz_post_info = get_quiz_info(name, cookie)
        if len(quiz_info) != 0:
            db.populate_quiz_table(quiz_info)
        if len(quiz_post_info) != 0:
            db.populate_post_table(quiz_post_info)
        time.sleep(30)

    db.close()

# 用于持续性获取评论数据
def get_start(name):
    cookie = 'SESSIONID=BggS53z6jatlDPa7wnuEIMdypXalx0CJCULGIUcTA3e; JOID=W1wWC0qL247kQDAtMopuGHC4oNErpf-mzmseCRqg9arMahsDFiNcB4dFOiEwf5A0RxBBaFP1gSPk6E71zEYO7ng=; osd=UVoVBUiB3Y3qQjorMYRsEna7rtMho_yozGEYChSi_6zPZBkJECBSBY1DOS8ydZY3SRJLblD7gyni60D3xkAN4Ho=; _zap=24fffadb-145f-4581-8685-48109d479e20; d_c0="APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=D9KYDF3xOQBABQEEQRM%2Fpfw264clMXqe; _xsrf=02bIf8VlWNPgJA3DmPSqrwvSqStuIkuS; __snaker__id=A7NeAARFUQL8RWPS; q_c1=aee51212ad734f3684169cdaae2eee52|1650956008000|1650956008000; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeccfc34b7978599d04898eb8ab7d45a968f8aadd54aa7e78f90bb3f91b1e196e22af0fea7c3b92ab4b8a496c94481b09dccb8548eb79cacb25b97e79da3d57df3ad99ade54bfc999d89fb50898c9eb5c868ad9cfaa7c653b3b9e594fb218ab08cd4c14baa8d8c8ece54f499a3aac16aa19eaf8de880e9e7bad0e425f6878aa6cf7bf5988c98d066f1aca3d1f63fac8db8a9f26794f0a4bbcd67aa868cd3d76a8fa7fc8bb43ba99c9ea8f637e2a3; YD00517437729195%3AWM_NI=phluJHCpS%2B5XNiTK3QU9SBVw1sWLfA8GShxDKTRTEloD6FGYqg%2Bsx%2BJpD3BAjSDmOX0GmIBbcBalet%2B9v%2Fa%2F6YFELq7HDwMn3tm0w5hSxHsmP0lZNmjZD2TbMy4j11kDRGw%3D; l_cap_id="MDFiZDQwY2ZlMDFiNGZhMjljNjM4ODc2YWQ0M2EwOTU=|1653308621|885d9a9f73f78d2b708077ba7c4b8e28ad06fe04"; r_cap_id="MmQzMGNiZTU5Yzg2NDkyMmExM2FmNzUwNjMwMWY3YWQ=|1653308621|c47a594b7fb0003bb2ee0f3650ee6baeb265ad68"; cap_id="NzA4MjliZThjOWRkNGE0N2JlZDA2ZWQzNTIzYjY5ZjI=|1653308621|2e1aa37d91b475425342dc8e9495a3eda3052748"; capsion_ticket=2|1:0|10:1653308632|14:capsion_ticket|44:MTk4ZTA4MjdiMmE3NDBlMGJlNGYxMWVhYzk2MTIwMzI=|a62d440541377464c8dfb2e0b650640b72e58dec77b51356a2c332e6654a67d7; tst=r; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1653269889,1653290428,1653307172,1653312213; captcha_session_v2=2|1:0|10:1653312212|18:captcha_session_v2|88:SGFKNTFscUwzdHhadjNZMmRqSjRGRW9wT3NqWktoT2lVTTc4M1N4VXBVamVqMWpBdWxBdzhNMzhqN1dqS3JBYw==|9a1b2878c1512c2cab88792869ce9fd67e596a7dc3f7a3cde26fbe6d2f14f60c; gdxidpyhxdE=1%5ChZZk32KQ%5C83MqVs1vBWOIVlx1OwjnVCIEnP8BkayNdIYRqf%5Cm0G%5CdApIEaCrwVB03AOALo0AQCcpHPrOVXKUDwxM%2BY21OelCNn9YOkCqHTCUBnLs4v%5CW1JIe5zwPhmPMKv4ggHRvOxwKRrz3A6PIS1UoHCGi6hoSNKJYbQ%5Cr0pKHA1%3A1653313114246; captcha_ticket_v2=2|1:0|10:1653312223|17:captcha_ticket_v2|704:eyJ2YWxpZGF0ZSI6IkNOMzFfNUhwNXJMLk9rS3JIcUlTV3ZPTlZCRml2d1lVUHVSNG9mdWtqbHpUOEZ0RHdBTlh5alJnSmNva29zMlFPc2xyTWxHQURjdDVRVEdtSmlpZzdhdklzNUUyZFU2OTRSY0l5U3E3TUtIaEtBcDVJNnRUS3Fsa2U2cHlMMlprdEVkLWhhRjRlVElUOXguMTZTVF93VVdlNE15VFBqREUxajdRbXNTa1cwYmc3WXQ5RUFzZUpGWkhONmQ5Mi44eDdoMWxVXzhqSFl0MTFqTGlla19LRm1TRTJRNWdaRGp5eHdaNk9IcEFSMnRmUUhtUmk5LWNzUHJSdnk4Q0xGMEtlOXgtR21hZFdjbjQycGJoWU13V05XS1hNdy5tTEI1dFI1d2JLbldqQTVvRG5jc2xlcjJqVy5WZmI4czZtQmdkbXJ5QTdEVWtJc2pJbmRMV2lYaVJUUmowRDVjU3VsOE43Mnk0OFRBLk1sSTh5MVNYY2dLb2hLQ3Y4MnhfUUtZSGo2Ul9lVVkwSEF2OWF2eFV1NW5BQzVsQU1wWDVKekpYTXZJLThEeFRrUzJFZE5kNUxidHV6WFRDSGxFLUtsSVpId2phUDlPdmZ6MHppR185aUZNZ3E1d1pLSFNxQUt6VTI5by1fNVhQTnd0cDdMU3RpSFFzRFg3cUJ0SmllZVNtMyJ9|5eab3b03ae02401e1ea8c39b9190ce2bb7f5c6bfaf23b97d72cba96531ea497b; z_c0=2|1:0|10:1653312223|4:z_c0|92:Mi4xeFZGR0h3QUFBQUFBOE54eXhYWXdFeVlBQUFCZ0FsVk4zOWg0WXdDUS1tNVBjUHg3TGFQUmRNOEN0MzQ1NmRoTTF3|65676b7378c3e5db0538830881793bf22f020eb02dd93d561afb8a1d9a49be62; NOT_UNREGISTER_WAITING=1; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1653312418; KLBRSID=4843ceb2c0de43091e0ff7c22eadca8c|1653312418|1653312210'
    t = threading.Thread(target=to_database, args=(name, cookie,))
    t.start()

if __name__ == "__main__":
    name = 'liu-peng-cheng-sai-l'
    cookie = 'SESSIONID=BggS53z6jatlDPa7wnuEIMdypXalx0CJCULGIUcTA3e; JOID=W1wWC0qL247kQDAtMopuGHC4oNErpf-mzmseCRqg9arMahsDFiNcB4dFOiEwf5A0RxBBaFP1gSPk6E71zEYO7ng=; osd=UVoVBUiB3Y3qQjorMYRsEna7rtMho_yozGEYChSi_6zPZBkJECBSBY1DOS8ydZY3SRJLblD7gyni60D3xkAN4Ho=; _zap=24fffadb-145f-4581-8685-48109d479e20; d_c0="APDccsV2MBOPTj3WXBx_kjFftzj5rntM-ew=|1622449875"; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=D9KYDF3xOQBABQEEQRM%2Fpfw264clMXqe; _xsrf=02bIf8VlWNPgJA3DmPSqrwvSqStuIkuS; __snaker__id=A7NeAARFUQL8RWPS; q_c1=aee51212ad734f3684169cdaae2eee52|1650956008000|1650956008000; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeccfc34b7978599d04898eb8ab7d45a968f8aadd54aa7e78f90bb3f91b1e196e22af0fea7c3b92ab4b8a496c94481b09dccb8548eb79cacb25b97e79da3d57df3ad99ade54bfc999d89fb50898c9eb5c868ad9cfaa7c653b3b9e594fb218ab08cd4c14baa8d8c8ece54f499a3aac16aa19eaf8de880e9e7bad0e425f6878aa6cf7bf5988c98d066f1aca3d1f63fac8db8a9f26794f0a4bbcd67aa868cd3d76a8fa7fc8bb43ba99c9ea8f637e2a3; YD00517437729195%3AWM_NI=phluJHCpS%2B5XNiTK3QU9SBVw1sWLfA8GShxDKTRTEloD6FGYqg%2Bsx%2BJpD3BAjSDmOX0GmIBbcBalet%2B9v%2Fa%2F6YFELq7HDwMn3tm0w5hSxHsmP0lZNmjZD2TbMy4j11kDRGw%3D; l_cap_id="MDFiZDQwY2ZlMDFiNGZhMjljNjM4ODc2YWQ0M2EwOTU=|1653308621|885d9a9f73f78d2b708077ba7c4b8e28ad06fe04"; r_cap_id="MmQzMGNiZTU5Yzg2NDkyMmExM2FmNzUwNjMwMWY3YWQ=|1653308621|c47a594b7fb0003bb2ee0f3650ee6baeb265ad68"; cap_id="NzA4MjliZThjOWRkNGE0N2JlZDA2ZWQzNTIzYjY5ZjI=|1653308621|2e1aa37d91b475425342dc8e9495a3eda3052748"; capsion_ticket=2|1:0|10:1653308632|14:capsion_ticket|44:MTk4ZTA4MjdiMmE3NDBlMGJlNGYxMWVhYzk2MTIwMzI=|a62d440541377464c8dfb2e0b650640b72e58dec77b51356a2c332e6654a67d7; tst=r; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1653269889,1653290428,1653307172,1653312213; captcha_session_v2=2|1:0|10:1653312212|18:captcha_session_v2|88:SGFKNTFscUwzdHhadjNZMmRqSjRGRW9wT3NqWktoT2lVTTc4M1N4VXBVamVqMWpBdWxBdzhNMzhqN1dqS3JBYw==|9a1b2878c1512c2cab88792869ce9fd67e596a7dc3f7a3cde26fbe6d2f14f60c; gdxidpyhxdE=1%5ChZZk32KQ%5C83MqVs1vBWOIVlx1OwjnVCIEnP8BkayNdIYRqf%5Cm0G%5CdApIEaCrwVB03AOALo0AQCcpHPrOVXKUDwxM%2BY21OelCNn9YOkCqHTCUBnLs4v%5CW1JIe5zwPhmPMKv4ggHRvOxwKRrz3A6PIS1UoHCGi6hoSNKJYbQ%5Cr0pKHA1%3A1653313114246; captcha_ticket_v2=2|1:0|10:1653312223|17:captcha_ticket_v2|704:eyJ2YWxpZGF0ZSI6IkNOMzFfNUhwNXJMLk9rS3JIcUlTV3ZPTlZCRml2d1lVUHVSNG9mdWtqbHpUOEZ0RHdBTlh5alJnSmNva29zMlFPc2xyTWxHQURjdDVRVEdtSmlpZzdhdklzNUUyZFU2OTRSY0l5U3E3TUtIaEtBcDVJNnRUS3Fsa2U2cHlMMlprdEVkLWhhRjRlVElUOXguMTZTVF93VVdlNE15VFBqREUxajdRbXNTa1cwYmc3WXQ5RUFzZUpGWkhONmQ5Mi44eDdoMWxVXzhqSFl0MTFqTGlla19LRm1TRTJRNWdaRGp5eHdaNk9IcEFSMnRmUUhtUmk5LWNzUHJSdnk4Q0xGMEtlOXgtR21hZFdjbjQycGJoWU13V05XS1hNdy5tTEI1dFI1d2JLbldqQTVvRG5jc2xlcjJqVy5WZmI4czZtQmdkbXJ5QTdEVWtJc2pJbmRMV2lYaVJUUmowRDVjU3VsOE43Mnk0OFRBLk1sSTh5MVNYY2dLb2hLQ3Y4MnhfUUtZSGo2Ul9lVVkwSEF2OWF2eFV1NW5BQzVsQU1wWDVKekpYTXZJLThEeFRrUzJFZE5kNUxidHV6WFRDSGxFLUtsSVpId2phUDlPdmZ6MHppR185aUZNZ3E1d1pLSFNxQUt6VTI5by1fNVhQTnd0cDdMU3RpSFFzRFg3cUJ0SmllZVNtMyJ9|5eab3b03ae02401e1ea8c39b9190ce2bb7f5c6bfaf23b97d72cba96531ea497b; z_c0=2|1:0|10:1653312223|4:z_c0|92:Mi4xeFZGR0h3QUFBQUFBOE54eXhYWXdFeVlBQUFCZ0FsVk4zOWg0WXdDUS1tNVBjUHg3TGFQUmRNOEN0MzQ1NmRoTTF3|65676b7378c3e5db0538830881793bf22f020eb02dd93d561afb8a1d9a49be62; NOT_UNREGISTER_WAITING=1; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1653312418; KLBRSID=4843ceb2c0de43091e0ff7c22eadca8c|1653312418|1653312210'
    to_database(name, cookie)