import math
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver import Edge
import json
from selenium.webdriver.edge import webdriver
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import pandas as pd
import requests
import datetime


def get_user_id(name):
    option = webdriver.Options()
    option.add_argument("--window-size=1,1")

    browser = Edge(options=option)
    browser.set_window_size(0, 1)

    browser.get('https://www.douyin.com/search/' + name + '?type=user')
    # 使用cookies，免登录
    with open("D:\EdgeDownload\cookies.txt", 'r', encoding='utf8') as f:
        listCookies = json.loads(f.read())
    for cookie in listCookies:
        cookie_dict = {
            # 这个domain看cookies第一个字段就知道了，需要找到并填入
            'domain': '.douyin.com',
            'name': cookie.get('name'),
            'value': cookie.get('value'),
            "expires": '',
            'path': '/',
            'httpOnly': False,
            'HostOnly': False,
            'Secure': False
        }
        browser.add_cookie(cookie_dict)
        # browser.refresh()  # 刷新网页,cookies才成功
    browser.get('https://www.douyin.com/search/' + name + '?type=user')
    time.sleep(2)
    try:

        user_id = (browser.find_element(By.CLASS_NAME, "search-result-card")
                   .find_element(By.TAG_NAME, "a").get_attribute('href'))
    except:
        print("get_user_id()错误")
        return False
    else:
        print(user_id[28:].split('?')[0])
        return user_id[28:].split('?')[0]


def get_video_arr(user_id, video_total, comment_total):
    # 设置隐藏浏览器
    option = webdriver.Options()
    option.add_argument('--headless')
    browser = Edge(options=option)
    browser.set_window_size(0, 1)
    browser.get("https://www.douyin.com/user/" + user_id)

    # 使用cookies，免登录
    with open("D:\EdgeDownload\cookies.txt", 'r', encoding='utf8') as f:
        listCookies = json.loads(f.read())
    for cookie in listCookies:
        cookie_dict = {
            # 这个domain看cookies第一个字段就知道了，需要找到并填入
            'domain': '.douyin.com',
            'name': cookie.get('name'),
            'value': cookie.get('value'),
            "expires": '',
            'path': '/',
            'httpOnly': False,
            'HostOnly': False,
            'Secure': False
        }
        browser.add_cookie(cookie_dict)
        # browser.refresh()  # 刷新网页,cookies才成功

    browser.get("https://www.douyin.com/user/" + user_id)
    time.sleep(5)
    # 1 54
    # 2 90 36
    # 3 126 36
    # 4 162 36
    episode = 1
    y_episode = video_total - 54
    if y_episode > 0:
        if y_episode % 36 == 0:
            episode = int(y_episode / 36)
        else:
            episode = math.ceil(y_episode / 36)

    for i in range(episode):
        browser.execute_script("window.scrollTo(0,document.body.scrollHeight)")
        time.sleep(5)
    time.sleep(5)
    try:

        user_icon=browser.find_element(By.CLASS_NAME,'BhdsqJgJ').find_element(By.TAG_NAME,'img').get_attribute('src')
        user_name = (
            browser.find_element(By.CLASS_NAME, "j5WZzJdp")
            .find_element(By.TAG_NAME, 'span')
            .find_element(By.TAG_NAME,'span')
            .find_element(By.TAG_NAME, 'span')
            .find_element(By.TAG_NAME, 'span')
            .get_attribute("textContent"))
        user_arr = browser.find_elements(By.CLASS_NAME, "sCnO6dhe")


        user_tiktok_id=browser.find_element(By.CLASS_NAME,'cOO9eQ6W').find_element(By.CLASS_NAME,'TVGQz3SI').get_attribute('textContent')



        try:
            Signatures=(browser.find_element(By.CLASS_NAME,'X45g5WK0')
                        .find_element(By.CLASS_NAME,'j5WZzJdp')
                        .find_element(By.TAG_NAME,'span')
                        .find_element(By.TAG_NAME, 'span')
                        .find_element(By.TAG_NAME, 'span')
                        .find_element(By.TAG_NAME, 'span')
                        .get_attribute('textContent')
                        )
        except:
            Signatures=" "

        result_user = []
        img_arr = []
        img_text_arr = []
        url_arr = []
        like_arr = []
        vide_num = video_total
        for x in user_arr:
            if vide_num != 0:
                result_user.append(x.get_attribute('textContent'))

                vide_num -= 1
            else:
                continue

        li_list = (browser.find_element(By.CLASS_NAME, "LPv6KBIL").find_element(By.TAG_NAME, "ul").find_elements(By.TAG_NAME, "li"))
        result = []
        vide_num = video_total
        for li in li_list:
            if vide_num != 0:
                x = (li.find_element(By.TAG_NAME, "a")
                     .get_attribute('href'))

                img_arr.append(li.find_element(By.TAG_NAME, "img")
                               .get_attribute('src'))

                img_text_arr.append(li.find_element(By.CLASS_NAME, "Ja95nb2Z")
                                    .get_attribute('textContent'))

                url_arr.append(x)

                like_arr.append(li.find_element(By.CLASS_NAME, 'YzDRRUWc.author-card-user-video-like')
                                .find_element(By.TAG_NAME,'span')
                                .get_attribute('textContent'))

                print(x[29:])
                result.append(x[29:])

                vide_num -= 1
            else:
                continue
    except:
        print("get_video_arr()错误")
        return False
    else:
    # Signatures  j5WZzJdp
        return video_total, comment_total, result[:video_total],user_tiktok_id,user_icon,Signatures,result_user[0], result_user[1], result_user[2], user_name, img_arr[:video_total], img_text_arr[:video_total], url_arr[:video_total], like_arr[:video_total]





def check_key_exists(json_data, key):
    return key in json_data

# 找文件夹下的csv文件
def find_csv_files(folder_path):
    csv_files = []
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            if file.endswith(".csv"):
                csv_files.append(os.path.join(root, file))
    return csv_files[0]


def delete_csv(folder_path):
    if os.path.isdir(folder_path):
        # 遍历文件夹，删除所有文件
        for filename in os.listdir(folder_path):
            file_path = os.path.join(folder_path, filename)

            # 如果是一个文件，删除它
            if os.path.isfile(file_path):
                try:
                    os.remove(file_path)
                    print(f"文件 {filename} 已成功删除。")
                except Exception as e:
                    print(f"删除文件 {filename} 时出错: {e}")


# 爬取评论信息
def requerst_tiktok(video_id_arr, file_len, name,count=20):
    # 请求地址
    url = 'https://www.douyin.com/aweme/v1/web/comment/list/'
    # 请求头
    h1 = {
        'accept': 'application/json, text/plain, */*',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
        'cookie': 'LOGIN_STATUS=1; store-region=cn-sc; store-region-src=uid; my_rd=2; xgplayer_device_id=63538777348; __live_version__=%221.1.1.5086%22; live_use_vvc=%22false%22; xgplayer_user_id=291379087538; d_ticket=59e249d91121d58b71aaab1b6efa6bd1cd632; n_mh=tUbVBq7lfkY-CrNICBn4eTBXDweUbH0LvOGvXsXISx0; passport_csrf_token=fa0077a193a25c0eced725e7c217f2ee; passport_csrf_token_default=fa0077a193a25c0eced725e7c217f2ee; bd_ticket_guard_client_web_domain=2; passport_assist_user=CkEAxq2-ps5ynnRNnclijyshJh_Vg__QWOfpPGBdnIajXOtvxCqNmp1eklwgG412MpM70Fjb2jkdVKVvAHdxcX51cxpKCjwNAn5arzStUE6yOhVR3jyCQ03-F1LXa2LYLC7f_UXBJeTYsu2R26G9sZJC4B35n85DZXY5_xMT1vml7ugQq93QDRiJr9ZUIAEiAQNeN-Xr; sso_uid_tt=87c0e74a30d890d4b98524d73a13bfe2; sso_uid_tt_ss=87c0e74a30d890d4b98524d73a13bfe2; toutiao_sso_user=c85c3098a241d168b19fe45b21564c02; toutiao_sso_user_ss=c85c3098a241d168b19fe45b21564c02; sid_ucp_sso_v1=1.0.0-KDM4MDA0ZTZmYWIyYjQ2YjE1ZWI3MTg0ZmQ5ZTVhODkwNDk5MGIyOTkKHwj-_cCe2Y3MBxDZpeyxBhjvMSAMMPGA5YYGOAZA9AcaAmhsIiBjODVjMzA5OGEyNDFkMTY4YjE5ZmU0NWIyMTU2NGMwMg; ssid_ucp_sso_v1=1.0.0-KDM4MDA0ZTZmYWIyYjQ2YjE1ZWI3MTg0ZmQ5ZTVhODkwNDk5MGIyOTkKHwj-_cCe2Y3MBxDZpeyxBhjvMSAMMPGA5YYGOAZA9AcaAmhsIiBjODVjMzA5OGEyNDFkMTY4YjE5ZmU0NWIyMTU2NGMwMg; passport_auth_status=56e6820133e226cefb748196d2e42e0e%2C; passport_auth_status_ss=56e6820133e226cefb748196d2e42e0e%2C; uid_tt=a8e0f5fe1c49f56a77405a603df9b8cc; uid_tt_ss=a8e0f5fe1c49f56a77405a603df9b8cc; sid_tt=b23a0f63304f14b94ae5a36102011e47; sessionid=b23a0f63304f14b94ae5a36102011e47; sessionid_ss=b23a0f63304f14b94ae5a36102011e47; s_v_web_id=verify_lvxej01o_dUpSlGV2_VUdp_45Ss_9kBO_IKXdHUcm14Zx; _bd_ticket_crypt_doamin=2; _bd_ticket_crypt_cookie=7da5989eccda82feff45b1b3144bcfff; __security_server_data_status=1; sid_guard=b23a0f63304f14b94ae5a36102011e47%7C1715147671%7C5183813%7CSun%2C+07-Jul-2024+05%3A51%3A24+GMT; sid_ucp_v1=1.0.0-KDYxMGI1Mzg4N2FmOWNjYjZjNGIzNTk1ZGY4MDJiNGQxNWJmMDc2N2MKGwj-_cCe2Y3MBxCXp-yxBhjvMSAMOAZA9AdIBBoCaGwiIGIyM2EwZjYzMzA0ZjE0Yjk0YWU1YTM2MTAyMDExZTQ3; ssid_ucp_v1=1.0.0-KDYxMGI1Mzg4N2FmOWNjYjZjNGIzNTk1ZGY4MDJiNGQxNWJmMDc2N2MKGwj-_cCe2Y3MBxCXp-yxBhjvMSAMOAZA9AdIBBoCaGwiIGIyM2EwZjYzMzA0ZjE0Yjk0YWU1YTM2MTAyMDExZTQ3; SEARCH_RESULT_LIST_TYPE=%22single%22; ttwid=1%7Cq-P4iEEa8gs1xv-euUBDZGZN8HUYPGjHTYD7NnMQkTE%7C1715163295%7Ccf43f1ce57730f3fd649a29e98e32947ea11dd641e9b10e08b317a8f90fb37fb; download_guide=%223%2F20240508%2F1%22; volume_info=%7B%22isUserMute%22%3Afalse%2C%22isMute%22%3Afalse%2C%22volume%22%3A0.266%7D; dy_swidth=1707; dy_sheight=1067; publish_badge_show_info=%220%2C0%2C0%2C1715781452948%22; douyin.com; device_web_cpu_core=32; device_web_memory_size=8; architecture=amd64; strategyABtestKey=%221716084646.626%22; csrf_session_id=70400c63d6126f675078dca9ca62f363; passport_fe_beating_status=true; __ac_nonce=06649c5c2006525e4c559; __ac_signature=_02B4Z6wo00f011MVMmgAAIDA-y7avfqpvidTNTbAALKX1gaUu3zAuFaJ5eMoV7fe69flANwBwGESm6YcTkTKzbz2leWIARoEl4BDYSMYZlF0flY0-np0IX.qFBeI6Z4CzmcWAomvFllb36Pf0d; FOLLOW_NUMBER_YELLOW_POINT_INFO=%22MS4wLjABAAAAEZ7KRn9fHUzJ_bB888WmlIxSYpCUOKiDa5F974fC6Pdj9E0vVh6R6MeKOROzAg7N%2F1716134400000%2F0%2F1716111589759%2F0%22; WallpaperGuide=%7B%22showTime%22%3A1716092910447%2C%22closeTime%22%3A0%2C%22showCount%22%3A4%2C%22cursor1%22%3A243%2C%22cursor2%22%3A0%2C%22hoverTime%22%3A1715147776243%7D; pwa2=%220%7C0%7C3%7C1%22; IsDouyinActive=true; stream_recommend_feed_params=%22%7B%5C%22cookie_enabled%5C%22%3Atrue%2C%5C%22screen_width%5C%22%3A1707%2C%5C%22screen_height%5C%22%3A1067%2C%5C%22browser_online%5C%22%3Atrue%2C%5C%22cpu_core_num%5C%22%3A32%2C%5C%22device_memory%5C%22%3A8%2C%5C%22downlink%5C%22%3A10%2C%5C%22effective_type%5C%22%3A%5C%224g%5C%22%2C%5C%22round_trip_time%5C%22%3A50%7D%22; FOLLOW_LIVE_POINT_INFO=%22MS4wLjABAAAAEZ7KRn9fHUzJ_bB888WmlIxSYpCUOKiDa5F974fC6Pdj9E0vVh6R6MeKOROzAg7N%2F1716134400000%2F0%2F1716112228769%2F0%22; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtcmVlLXB1YmxpYy1rZXkiOiJCT3dxVU9VZGtXSndPZVJFQTlic3FJVEtXM0F1clZhVHU1QlZqbVl3ZXd4Q1AvYUlHZTRlQ0FuQ1FxSzNROVV2QldaRXBPRzV5dzZWU3cyWHN4V0o4Nnc9IiwiYmQtdGlja2V0LWd1YXJkLXdlYi12ZXJzaW9uIjoxfQ%3D%3D; home_can_add_dy_2_desktop=%221%22; xg_device_score=7.719683499872079; stream_player_status_params=%22%7B%5C%22is_auto_play%5C%22%3A0%2C%5C%22is_full_screen%5C%22%3A0%2C%5C%22is_full_webscreen%5C%22%3A0%2C%5C%22is_mute%5C%22%3A0%2C%5C%22is_speed%5C%22%3A1%2C%5C%22is_visible%5C%22%3A1%7D%22; msToken=fYx2py12PLPlPIaSi60dsSGcJdQSsBSwmA4BVJuahJ9nRJ-qtqNC070D3M-SEQh3XcwH7tmOwL8wQwyFf3ANfH8hUU5Br1fPwi7qa5Z37N5qwtbBNEKizmibpKuTusc=; odin_tt=71282eba22cac0b70001776cc6ccd10309047d215ef2101bfde629e97fb4b6f6faab2b38ba443dae0fc71ac5990c83811aac998a7a4397390849e200694ab61e',
        'referer': 'https://www.douyin.com/',
        'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Microsoft Edge";v="122"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0',
    }
    ip_list = []  # ip属地
    text_list = []  # 评论内容
    create_time_list = []  # 评论时间
    user_name_list = []  # 评论者昵称
    like_count_list = []  # 点赞数
    tiktok_id = []  # 视频id

    def begin(video_id_index):
        #应该写一个异常处理免得爬一半被封了
        episode = int(file_len / count)
        print(str(video_id_index) + "视频爬取总轮次：", episode)
        for i in range(episode):
            print("第" + str(i + 1) + "轮次")
            # 请求参数
            params = {
                'device_platform': 'webapp',
                'aid': 6383,
                'channel': 'channel_pc_web',
                'aweme_id': video_id_index,  # 视频id
                'cursor': i * count,
                'count': count,
                'item_type': 0,
                'insert_ids': '',
                'rcFT': '',
                'pc_client_type': 1,
                'version_code': '170400',
                'version_name': '17.4.0',
                'cookie_enabled': 'true',
                'screen_width': 1440,
                'screen_height': 900,
                'browser_language': 'zh-CN',
                'browser_platform': 'MacIntel',
                'browser_name': 'Chrome',
                'browser_version': '109.0.0.0',
                'browser_online': 'true',
                'engine_name': 'Blink',
                'engine_version': '109.0.0.0',
                'os_name': 'Mac OS',
                'os_version': '10.15.7',
                'cpu_core_num': 4,
                'device_memory': 8,
                'platform': 'PC',
                'downlink': 10,
                'effective_type': '4g',
                'round_trip_time': 50,
                'webid': 7335665906297947660,
                'msToken': 'LZ3nJ12qCwmFPM1NgmgYAz73RHVG_5ytxc_EMHr_3Mnc9CxfayXlm2kbvRaaisoAdLjRVPdLx5UDrc0snb5UDyQVRdGpd3qHgk64gLh6Tb6lR16WG7VHZQ==',
            }

            proxies = {
                'http': 'http://202.117.115.6:80'
            }

            # 发送请求,防爬，使用代理和定时
            r = requests.get(url, headers=h1, params=params,proxies=proxies)
            # 转json格式
            json_data = r.json()
            if not check_key_exists(json_data,"comments"):
                continue
            comment_len = len(list(json_data['comments']))

            if comment_len != 0:
                # 获取得到的评论的条数
                for i2 in range(int(comment_len)):
                    if not check_key_exists(json_data['comments'][i2],"ip_label"):
                        continue
                    ip_list.append(json_data['comments'][i2]['ip_label'])
                    text_list.append(str(json_data['comments'][i2]['text']).replace('\n', '').replace('\r', ''))
                    create_time_list.append(json_data['comments'][i2]['create_time'])
                    user_name_list.append(json_data['comments'][i2]['user']['nickname'])
                    like_count_list.append(json_data['comments'][i2]['digg_count'] if check_key_exists(json_data['comments'][i2],'digg_count') else 0)
                    tiktok_id.append(str(video_id_index))
                if comment_len != count:
                    print("comment_len != count")
                    continue

    for i in video_id_arr:
        try:
            begin(i)
        except:
            print("本轮次有错误，直接跳过该轮次")
            continue
    print("准备写入comments1.csv，请稍后")


    load_arr_len = len(tiktok_id)
    print(len(tiktok_id), len(user_name_list), len(create_time_list), len(ip_list), len(like_count_list), len(text_list))
    # 保存数据到DF
    df = pd.DataFrame(
        {
            'video_id': tiktok_id[:load_arr_len],
            'nickname': user_name_list[:load_arr_len],
            'time': create_time_list[:load_arr_len],
            'ip': ip_list[:load_arr_len],
            'like': like_count_list[:load_arr_len],
            'text': text_list[:load_arr_len],
        }
    )
    result_file = './linux/date/' + name + '_comments1.csv'
    df.to_csv(result_file, mode='w', index=False, encoding='utf_8_sig')



# 判断是否分析过此id
def check_file_exists(folder_path, target_string):
    # 遍历文件夹中的所有文件
    for filename in os.listdir(folder_path):
        # 检查文件名是否与目标字符串相等
        if filename == target_string:
            return True
    return False


# 数据预处理
def data_preprocessing(name):

    # 读取CSV文件
    data = pd.read_csv('./linux/date/' + name + '_comments1.csv', header=0)
    df_clean_copy = data.copy()
    stoparr = [r'@', r'[', r']', r'\s', r'“', r'”', r'。', r'\n', r'\t', r'，',
               r',', r'.', r'》', r'《', r'>', r'<', r' ', r'.', r'    '
               ]
    df_clean_copy['text'] = df_clean_copy['text'].str.replace(r'@[^\s]+', '', regex=True)
    for i in stoparr:
        df_clean_copy['text'] = df_clean_copy['text'].str.replace(i, '')
    df_clean_copy = df_clean_copy.dropna(subset=['text'], inplace=False)
    df_clean_copy.replace(to_replace=r'^\s*$', value=np.nan, regex=True, inplace=True)
    df_clean_copy = df_clean_copy.dropna()
    data = df_clean_copy.copy()

    arr1 = []
    for i in range(0, len(data)):
        row = data.iloc[i]['time']
        a = int(row)
        date = datetime.datetime.utcfromtimestamp(a)
        targetDate = date.strftime("%Y-%m-%d %H:%M:%S")
        arr1.append(targetDate)

    data['time'] = arr1
    # 保存回CSV文件
    data.to_csv('./linux/date/' + name + '_comments2.csv', index=False, encoding='utf_8_sig')



def drew_wordcloud(name):
    from jieba import analyse
    import jieba

    tfidf = analyse.extract_tags
    data = pd.read_csv('./linux/date/' + name + '_comments2.csv', header=0)
    '''
    sentence 为待提取的文本
    topK 为返回几个 TF/IDF 权重最大的关键词，默认值为 20
    withWeight 为是否一并返回关键词权重值，默认值为 False
    allowPOS 仅包括指定词性的词，默认值为空，即不筛选
    '''
    result = data['text'].str.join('')
    result = result.to_string(index=False)
    result.replace(" ", "")
    keywords = tfidf(result, topK=50, withWeight=False, allowPOS=())
    keywords = ','.join(keywords)
    from wordcloud import WordCloud
    wcd = WordCloud(background_color='white', max_words=50, repeat=True, max_font_size=100,
                    font_path='./fonts/STXINGKA.TTF', height=600, width=800).generate(keywords)
    wcd.to_file('./linux/Data_visualization/wordcloud_img/' + name + '.png')
    wcd.to_file('./files_fast/photo/' + name + '.png')


# 画饼图
def drew_pie(df, title, name, hide_percentage=0.02):
    # 画图
    df = df[df['count'] > hide_percentage * df['count'].sum()]
    # 按值的大小对数据进行降序排序
    df = df.sort_values(by='count', ascending=False)
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体
    plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像时负号'-'显示为方块的问题
    plt.figure(figsize=(10, 10))

    plt.pie(df['count'], labels=df['ip'], autopct='%1.1f%%', startangle=90)
    # 将饼图的中心设置为图中心，使标签在饼图外部
    centre_circle = plt.Circle((0, 0), 0.90, fc='white')
    fig = plt.gcf()
    fig.gca().add_artist(centre_circle)
    plt.gca().set_aspect('equal')
    plt.title(name + '评论区' + title + '所属ip饼图')
    fig.savefig('./linux/Data_visualization/pie/' + name + '_' + title + '.png')


def get_label(name):
    # 记得------------------------------！！！！！！！！--------------------------------------------
    test_data = pd.read_csv('./linux/date/' + name + '_comments2.csv', header=0)
    #test_data = pd.read_csv('./date/' + name + '_comments.csv', header=0)
    provinces = []
    # 创建字典，键为省份名称（不包含“省”），值为0
    province_dict = {province: 0 for province in provinces}
    province_dict2 = {province: 0 for province in provinces}
    # snownlp适合电商其实，不适合做网络热词评论的情绪分析
    from snownlp import SnowNLP
    withe_count = 0
    withe_dict = province_dict
    black_count = 0
    black_dict = province_dict2

    for index, word in test_data.iterrows():
        if word['ip'] not in provinces:
            provinces.append(word['ip'])
        if word['ip'] not in withe_dict:
            withe_dict[word['ip']] = 0
        if word['ip'] not in black_dict:
            black_dict[word['ip']] = 0
        s = SnowNLP(word['text'])
        sentiments = s.sentiments
        # print(sentiments)
        if sentiments > 0.45:
            # print("白子")
            withe_dict[str(word['ip'])] += 1
            withe_count = withe_count + 1
            test_data.loc[index, 'label'] = 1
        else:
            # print("黑子")
            black_dict[str(word['ip'])] += 1
            black_count = black_count + 1
            test_data.loc[index, 'label'] = 0

    # 备份文件
    test_data.to_csv('./linux/date/' + name + '_comments3.csv', index=False, encoding='utf_8_sig')
    # 上传hdfs
    hdfs_input = pd.DataFrame([], columns=['video_id', 'nickname', 'time', 'ip', 'like', 'text', 'label'])
    hdfs_input['video_id'] = test_data['video_id']
    hdfs_input['nickname'] = test_data['nickname']
    hdfs_input['time'] = test_data['time']
    hdfs_input['ip'] = test_data['ip']
    hdfs_input['like'] = test_data['like']
    hdfs_input['text'] = test_data['text']
    hdfs_input['label'] = test_data['label']
    hdfs_input.to_csv('./linux/date/demo_input.csv', index=False, encoding='utf_8_sig')

    from utils.csv_table_to_train_date import csv_table_to_train_date
    csv_table_to_train_date(name, test_data)

    # analysis_dataframe1 = {'省份': withe_dict.keys(), '白子数': withe_dict.values(), '黑子数': black_dict.values()}
    # pd.DataFrame(analysis_dataframe1).to_csv('./date/' + name + '_数据分析表.csv', index=False, encoding='utf_8_sig')

    # provinces_df = pd.DataFrame([], columns=['provinces', 'count', 'count_0','count_1'])
    # provinces_df['provinces'] = provinces
    # provinces_df['count'].fillna(value=0, inplace=True)
    # provinces_df['count_0'].fillna(value=0, inplace=True)
    # provinces_df['count_1'].fillna(value=0, inplace=True)
    # provinces_df.to_csv('./date/provinces.csv', index=False, encoding='utf_8_sig')
    print("get_label()执行完成")
    return withe_dict, black_dict




# 数据可视化
def data_visualization_drew(withe_dict, black_dict):
    df = pd.DataFrame(list(withe_dict.items()), columns=['ip', 'count'])
    df2 = pd.DataFrame(list(black_dict.items()), columns=['ip', 'count'])
    drew_pie(df, "白子")
    drew_pie(df2, "黑子")
    print(df['count'].sum(), df2['count'].sum())


def load_linux_spark():
    delete_csv(r"D:\PycharmProjects\tik-tok-comment-analysis\tik-tok_-comment_-analysis\linux\linux_load_to_dir\out1")
    delete_csv(r"D:\PycharmProjects\tik-tok-comment-analysis\tik-tok_-comment_-analysis\linux\linux_load_to_dir\out2")
    import subprocess, sys
    os.system("docker cp D:/PycharmProjects/tik-tok-comment-analysis/tik-tok_-comment_-analysis/linux/date/demo_input.csv master://root")
    time.sleep(2)
    p = subprocess.Popen(["powershell.exe", """docker exec -i master bash -c '//root/run.sh'"""], stdout=sys.stdout)
    p.communicate()
    time.sleep(2)
    os.system("docker cp master://root/out1 D:\\PycharmProjects\\tik-tok-comment-analysis\\tik-tok_-comment_-analysis\\linux\\linux_load_to_dir")
    time.sleep(2)
    os.system("docker cp master://root/out2 D:\\PycharmProjects\\tik-tok-comment-analysis\\tik-tok_-comment_-analysis\\linux\\linux_load_to_dir")
    time.sleep(2)


# 得到linux的out计算需要的返回给前端
def out_to_front():
    # 读取spark返回的结果
    out1 = pd.read_csv(find_csv_files(r"./linux/linux_load_to_dir/out1"), header=0)
    out2 = pd.read_csv(find_csv_files(r"./linux/linux_load_to_dir/out2"), header=0)

    df_clean = out1[~out1['ip'].str.contains('未知')]
    # 饼图所需的黑子白子的数量
    pie_value1 = out1['count_1'].sum()
    pie_value2 = out1['count_0'].sum()

    # 省份排名前七的数组，在pyspark中已按序拍好，取出前七个即可
    province_top7 = [word['ip'] for index, word in df_clean[:7].iterrows()]
    # 每个省份的白子数
    province_top7_value1 = [word['count_1'] for index, word in df_clean[:7].iterrows()]
    # 每个省份的黑子数
    province_top7_value2 = [word['count_0'] for index, word in df_clean[:7].iterrows()]
    print(province_top7)
    print(pie_value1, pie_value2)
    # 最多的评论的省的占比
    province_max = (int((out1[:1]['count'].sum() / out1['count'].sum()) * 10000) / 100)
    # max的省份名称
    province = out1[:1]['ip'].values
    print(province)

    # 把csv总date时间类型转为可以处理的datetime类型
    out2['date'] = pd.to_datetime(out2['date'])
    # print(type(out2['date']))
    # 计算每年的黑子白子数，放数组里
    result1 = out2.groupby(pd.Grouper(key='date', freq='Y'))['count_1'].sum()
    result2 = out2.groupby(pd.Grouper(key='date', freq='Y'))['count_0'].sum()

    # result5 = out2.groupby(pd.Grouper(key='date', freq='H'))['count'].sum()
    # result5.sort_values(ascending=False, inplace=True)
    # print(result5)

    # 计算周几评论的人数最多
    out2_copy = out2.copy()
    out2_copy['date'] = pd.to_datetime(out2_copy['date'])
    out2_copy['week_day'] = out2_copy['date'].dt.weekday
    result_week = out2_copy.groupby(pd.Grouper(key='week_day'))[['count_1', 'count_0']].sum()
    result_week['count'] = result_week['count_1'] + result_week['count_0']
    result_week.sort_values(by='count', ascending=False, inplace=True)
    first_row_weekday = result_week.index[0]
    try:
        max_weekday_pair = (int((result_week['count'][0] / result_week['count'].sum()) * 10000) / 100)
    except:
        max_weekday_pair=100
    # 占比
    #print(result_week['count'][0] / result_week['count'].sum())
    print(first_row_weekday)
    week_arr = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期天"]
    max_weekday = week_arr[int(first_row_weekday)]
    # 周几人数最多，返回星期几
    print("max_weekday", max_weekday)

    # 计算每天哪个时间段评论人数最多
    out3 = pd.read_csv(r'D:\PycharmProjects\tik-tok-comment-analysis\tik-tok_-comment_-analysis\linux\date\demo_input.csv', header=0)
    # out3.dropna(subset=['hour'], inplace=False)
    # print("out3",out3[1:])
    # hour_max=str(out3[1:].iloc[0]['hour'])[11:16]
    # hour_max_count=out3[1:].iloc[0]['count']
    out3['time'] = pd.to_datetime(out3['time'], errors='coerce')
    hour_counts_pd = out3.groupby(out3['time'].dt.hour).size().reset_index(name='counts')
    max_count_hour = hour_counts_pd.loc[hour_counts_pd['counts'].idxmax()]
    hour_max = str(max_count_hour['time']) + ":00"
    hour_max_count = int(max_count_hour['counts'])
    print(hour_max_count)

    # 计算热力图，2024年每天的评论人数
    year_arr = [*out2['date'].dt.year.drop_duplicates().dropna()][::-1]
    year_arr = map(lambda x: str(x), year_arr)
    year_arr = [str(i)[:4] for i in year_arr]
    print(year_arr)
    result3 = out2[out2['date'].dt.year == 2024]
    result4 = pd.DataFrame([], columns=['date', 'count'])
    result4['date'] = result3['date']
    result4['count'] = result3['count_1'] + result3['count_0']
    # print(result4)
    result4_date_count = []
    for index, word in result4.iterrows():
        result4_date_count.append([str(word['date'])[:10], word['count']])
    print(result4_date_count)

    # 计算各省收入，支出曲线
    province_top7_revenue = []
    province_top7_expenditures = []
    # 各省的收支数据
    economy_data = pd.read_csv(r'D:\PycharmProjects\tik-tok-comment-analysis\tik-tok_-comment_-analysis\linux\date\economy.csv', header=0)
    input = province_top7
    for i in input:
        revenue = economy_data.loc[economy_data['ip'] == i].iloc[0]['revenue']
        province_top7_revenue.append(int(revenue))
        expenditures = economy_data.loc[economy_data['ip'] == i].iloc[0]['expenditures']
        province_top7_expenditures.append(int(expenditures))
        print(revenue, expenditures)
    # print(result,result2,result3)

    # 活跃最多时间段的人总数,活跃的小时,星期几人最多与占比,评论人数最多的省，最大的省份的人数百分比，年,按年的白子数，按年的黑子数,时间热力图,白子黑子饼图1，2,各省的数据展示
    return hour_max_count, hour_max, max_weekday, max_weekday_pair, province, province_max, year_arr, result1, result2, result4_date_count, pie_value1, pie_value2, province_top7, province_top7_value1, province_top7_value2, province_top7_revenue, province_top7_expenditures





if __name__ == "__main__":
    pass
    # print("示例演示，只爬取前20条视频，每条视频取五千条评论")
    # name=input("请输入你想要分析的艺人的抖音号或抖音名:")
    # (video_arr,user_fs)=get_video_arr(get_user_id(name))
    # requerst_tiktok(video_id_demo=video_arr, count=50, file_len=5000)
    # data_preprocessing()
    # drew_wordcloud()
    # withe_dict, black_dict = get_label()
    # data_analysis()
    # name="蔡徐坤"
    # get_label()
    # out_to_front()
    # # 执行命令行操作，自动化完成
    # import os
    # import subprocess, sys
    # os.system("docker cp D:/PycharmProjects/tik-tok_-comment_-analysis/linux/date/demo_input.csv master://root")
    # time.sleep(2)
    # p = subprocess.Popen(["powershell.exe", """docker exec -i master bash -c '//root/run.sh'"""], stdout=sys.stdout)
    # p.communicate()
    # time.sleep(2)
    # os.system("docker cp master://root/out1 D:/PycharmProjects/tik-tok_-comment_-analysis/linux/linux_load_to_dir")
    #
    # os.system("docker cp master://root/out2 D:/PycharmProjects/tik-tok_-comment_-analysis/linux/linux_load_to_dir")
    #
    #
    #
    #
    # # from utils.Big_data_processing import big_data_processing
    # # big_data_processing(name)
    # data_visualization_drew(withe_dict, black_dict)
