import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import os

data = pd.read_csv(os.path.abspath('..') + os.sep + 'data' + os.sep + 'ted_main.csv')
data_c = data.comments
data_v = data.views

# 将数据转化成KMeans函数能够处理的形式
data_c1 = np.array(data_c)
data_c2 = [[i] for i in data_c1]
data_v1 = np.array(data_v)
data_v2 = [[i] for i in data_v1]

# 进行KMeans聚类，将comments和views数据分别划分为5个等级
jl_c = KMeans(n_clusters=5).fit(data_c2)
jl_v = KMeans(n_clusters=5).fit(data_v2)
c_cc = jl_c.cluster_centers_
v_cc = jl_v.cluster_centers_
c_cc1 = sorted([i for i in c_cc[:, 0]])  # 将类中心按从小到大顺序排列
v_cc1 = sorted([i for i in v_cc[:, 0]])
c_level = jl_c.labels_
v_level = jl_v.labels_


def fenji_c(x):
    for i in range(5):
        if x == c_cc1[i]:
            return "comments_level_" + str(i + 1)


data['comments_level'] = [fenji_c(x) for x in c_cc[c_level]]  # 在表格添加comments_level列


def fenji_v(x):
    for i in range(5):
        if x == v_cc1[i]:
            return "views_level_" + str(i + 1)


data['views_level'] = [fenji_v(x) for x in v_cc[v_level]]  # 在表格添加comments_level列

data.to_csv(os.path.abspath('..') + os.sep + 'data' + os.sep + 'ted_main_new1.csv')
