# coding=utf8

import networkx as nx
import logging
logger = logging.getLogger('debug')


def graph_cluster(G, node_to_sentence_num, node_word, word_to_vector, type_):
    '''
    graph cluster: 求最小生成树，切割最长的边
    '''
    
    def check(graph, node_to_sentence_num, node_word):
        '''
        检查graph是否满足，存在一条训练数据，该graph包含该训练数据中的两个（以上）不同的词
        如果满足则说明此时聚类还不彻底，该graph还需要就被分解
        '''
        sentence_num_list = []
        word_list = []
        nodes = graph.nodes()
        for node in nodes:
            if node_to_sentence_num[node] not in sentence_num_list:
                sentence_num_list.append(node_to_sentence_num[node])
                word_list.append(node_word[node])
            else:
                cur_word = node_word[node]
                target_word = word_list[sentence_num_list.index(node_to_sentence_num[node])]
                if cur_word == target_word:
                    pass
                else:
                    return True
        return False

    def subgraph_nodes(graph, node):
        '''
        通过一个node获取该graph中与此node相连通的所有node的集合，该集合是为了求子图
        '''
        visited = {}
        visited[node] = True
        nodes = [node]
        nodes_old = graph.neighbors(node)
        
        while nodes_old:
            nodes += nodes_old
            nodes_new = []
            for node_ in nodes_old:
                if node_ not in visited:
                    nodes_new += graph.neighbors(node_)
                    visited[node_] = True
            nodes_old = nodes_new

        return list(set(nodes))

    def find_longest_edge(graph, nodes_num):
        '''
        求graph中最长的边，返回边的两个顶点
        '''
        max_edge_start = -1
        max_edge_end = -1
        max_edge_weight = -1.0
        for i in xrange(nodes_num):
            for j in xrange(i+1, nodes_num):
                if graph.has_edge(i, j) and graph[i][j]['weight'] > max_edge_weight:
                    max_edge_weight = graph[i][j]['weight']
                    max_edge_start = i
                    max_edge_end = j

        return max_edge_start, max_edge_end

    graphs = []
    graphs.append(nx.minimum_spanning_tree(G))
    res_graphs = []

    flag = True
    while flag:
        flag = False
        new_graphs = []
        for graph in graphs:
            if check(graph, node_to_sentence_num, node_word):
                flag = True
                nodes_num = len(node_word)
                max_edge_start, max_edge_end = find_longest_edge(graph, nodes_num)
                
                # 分割图
                graph.remove_edge(max_edge_start, max_edge_end)
                # subgraph1 = graph.subgraph(subgraph_nodes(graph, max_edge_start))
                # subgraph2 = graph.subgraph(subgraph_nodes(graph, max_edge_end))
                subgraph1, subgraph2 = list(nx.connected_component_subgraphs(graph))
                new_graphs.append(subgraph1)
                new_graphs.append(subgraph2)
            else:
                res_graphs.append(graph)
        graphs = new_graphs
    res_graphs += graphs

    print type_+'\t:\t'+'一共 %d 个cluster，%d 个word， 平均每个cluster %.2f 个word' \
          %(len(res_graphs), len(node_word), 1.0*len(node_word)/len(res_graphs))
    logger.info('word的分布是：')
    logger.info(','.join([str(graph.number_of_nodes()) for graph in res_graphs]))

    return res_graphs
