import tensorflow as tf
from random import choice, shuffle
from numpy import array


def TFKMeansCluster(vectors, noofclusters):
    """
    K-Means Clustering using TensorFlow.
    'vectors' should be a n*k 2-D NumPy array, where n is the number
    of vectors of dimensionality k.
    'noofclusters' should be an integer.
    译：使用TensorFlow的K均值聚类。 “vectors”应为n * k二维NumPy数组，其中n是数字 维数k的集合。 “ noofclusters”应为整数。
    """

    noofclusters = int(noofclusters)
    assert noofclusters < len(vectors)

    # Find out the dimensionality 找出维度
    dim = len(vectors[0])

    # Will help select random centroids from among the available vectors 将有助于从可用向量中选择随机质心
    vector_indices = list(range(len(vectors)))
    shuffle(vector_indices)

    # GRAPH OF COMPUTATION
    # We initialize a new graph and set it as the default during each run
    # of this algorithm. This ensures that as this function is called
    # multiple times, the default graph doesn't keep getting crowded with
    # unused ops and Variables from previous function calls.
    """
    ＃计算图 
    ＃我们初始化一个新图并将其设置为默认值 
    #of此算法。这样可以确保在调用此函数时 
    ＃多次，默认图表不会一直拥挤 
    #unused先前函数调用中的操作和变量。
    """

    graph = tf.Graph()

    with graph.as_default():

        # SESSION OF COMPUTATION 计算SESSION

        sess = tf.Session()

        ##CONSTRUCTING THE ELEMENTS OF COMPUTATION

        ##First lets ensure we have a Variable vector for each centroid,
        ##initialized to one of the vectors from the available data points
        ##构建计算元素
        ##首先，请确保每个质心都有一个变量向量，
        ##从可用数据点初始化为向量之一

        centroids = [tf.Variable((vectors[vector_indices[i]]))
                     for i in range(noofclusters)]

        ##These nodes will assign the centroid Variables the appropriate values
        ##这些节点将为质心变量分配适当的值
        centroid_value = tf.placeholder("float64", [dim])
        cent_assigns = []
        for centroid in centroids:
            cent_assigns.append(tf.assign(centroid, centroid_value))

        ##Variables for cluster assignments of individual vectors(initialize to 0 at first)
        ##用于单个向量的聚类分配的变量（已初始化 首先为0）
        assignments = [tf.Variable(0) for i in range(len(vectors))]

        ##These nodes will assign an assignment Variable the appropriate value
        ##这些节点将为分配变量分配适当的值
        assignment_value = tf.placeholder("int32")
        cluster_assigns = []
        for assignment in assignments:
            cluster_assigns.append(tf.assign(assignment,
                                             assignment_value))

        ##Now lets construct the node that will compute the mean 现在让我们构造将计算均值的节点
        # The placeholder for the input 输入的占位符
        mean_input = tf.placeholder("float", [None, dim])

        # The Node/op takes the input and computes a mean along the 0th dimension, i.e. the list of input vectors
        # Node / op接受输入并沿第0个方向计算平均值 dimension，即输入向量列表
        mean_op = tf.reduce_mean(mean_input, 0)

        ##Node for computing Euclidean distances
        # Placeholders for input
        ##现在让我们构造将计算均值的节点
        # 输入的占位符
        v1 = tf.placeholder("float", [dim])
        v2 = tf.placeholder("float", [dim])
        euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(
            v1, v2), 2)))

        ##This node will figure out which cluster to assign a vector to,
        ##based on Euclidean distances of the vector from the centroids.
        # Placeholder for input
        ##这个节点会找出要分配向量的簇，
        ##基于向量与质心的欧氏距离。
        #输入占位符
        centroid_distances = tf.placeholder("float", [noofclusters])
        cluster_assignment = tf.argmin(centroid_distances, 0)

        ##INITIALIZING STATE VARIABLES
        ##初始化状态变量

        ##This will help initialization of all Variables defined with respect
        ##to the graph. The Variable-initializer should be defined after
        ##all the Variables have been constructed, so that each of them
        ##will be included in the initialization.
        ##这将有助于根据graph初始化。变量初始值设定项应在所有的变量都被构造出来了之后，因此它们每一个将包含在初始化中。
        init_op = tf.initialize_all_variables()

        # Initialize all variables
        # 初始化所有变量
        sess.run(init_op)

        ##CLUSTERING ITERATIONS 聚类迭代

        # Now perform the Expectation-Maximization steps of K-Means clustering
        # iterations. To keep things simple, we will only do a set number of
        # iterations, instead of using a Stopping Criterion.
        # 现在执行K-Means聚类迭代的期望最大化步骤。为了保持简单，我们将只进行一定数量的迭代，而不是使用停止标准。
        noofiterations = 100
        for iteration_n in range(noofiterations):

            ##EXPECTATION STEP
            ##Based on the centroid locations till last iteration, compute
            ##the _expected_ centroid assignments.
            # Iterate over each vector
            ##期望阶跃
            ##基于直到最后一次迭代的质心位置，计算期望的质心分配。
            # 迭代每个向量
            for vector_n in range(len(vect)):
                vect = vect[vector_n]
                # Compute Euclidean distance between this vector and each
                # centroid. Remember that this list cannot be named
                # 'centroid_distances', since that is the input to the
                # cluster assignment node.
                # 计算这个向量和每个质心之间的欧氏距离。请记住，此列表不能命名为“centroid_distances”，因为这是集群分配节点的输入。
                distances = [sess.run(euclid_dist, feed_dict={
                    v1: vect, v2: sess.run(centroid)})
                             for centroid in centroids]

                # Now use the cluster assignment node, with the distances as the input
                # 现在使用cluster assignment节点，将距离作为输入
                assignment = sess.run(cluster_assignment, feed_dict={
                    centroid_distances: distances})

                # Now assign the value to the appropriate state variable
                # 现在将值赋给相应的状态变量
                sess.run(cluster_assigns[vector_n], feed_dict={
                    assignment_value: assignment})

            ##MAXIMIZATION STEP 最大化步骤
            # Based on the expected state computed from the Expectation Step,
            # compute the locations of the centroids so as to maximize the
            # overall objective of minimizing within-cluster Sum-of-Squares
            # 基于由期望步骤计算出的期望状态，计算质心的位置，从而最大化群内平方和最小化的总体目标
            for cluster_n in range(noofclusters):
                # Collect all the vectors assigned to this cluster
                # 收集分配给这个簇的所有向量
                assigned_vects = [vect[i] for i in range(len(vect))
                                  if sess.run(assignments[i]) == cluster_n]
                # Compute new centroid location
                # 计算新质心位置
                new_location = sess.run(mean_op, feed_dict={
                    mean_input: array(assigned_vects)})
                # Assign value to appropriate variable
                # 给适当的变量赋值
                sess.run(cent_assigns[cluster_n], feed_dict={
                    centroid_value: new_location})

        # Return centroids and assignments
        # 返回质心和赋值
        centroids = sess.run(centroids)
        assignments = sess.run(assignments)
        return centroids, assignments


"""
错误做法：
for i in range(100):
    x = sess.run(tf.assign(variable1, placeholder))
乍一看似乎没有什么害处，但是每次您初始化操作时，（例如甚至，您都将新的操作实例添加到默认图中。
相反，如代码中所示，为每个任务定义一个特定的操作（但是专用代码），然后在每次迭代中调用所需的节点。
要检查您是否在图上挤满了不必要的操作，只需在每次迭代中打印出值，看看它是否在增加。
实际上，这应该是您在每次迭代中与图进行交互的唯一方式
tf.assigntf.zerossess.runlen(graph.get_operations())
sess.run
如第138和139行所示，您可以调用操作/变量列表以相同顺序返回输出列表
sess.run

该代码没有涉及很多TensorFlow的复杂性，例如将设备分配给节点，Graph集合，依赖项等。
那部分是因为我仍在逐一理解这些方面。
"""


if "__name__" == "__main__":
    print(TFKMeansCluster(vectors, noofclusters))