# -*- coding:utf-8 -*-

"""

由  Feature_title_extration 得到 titleCluster_list

titleCluster_vec

然后 agg 聚类
根据 idx titleCluster 字典 替换 idx

得到 最终的 聚类 列表
"""
from feature_data.XmlData2DictData import Xml2Dict
from feature_data.Dict2OrgVenue_Graphfeature import Feature_org_venue_extration
from feature_data.answer_extration import Answer_extration
from feature_data.Dict2Title_VSMfeature import Feature_title_extration
from nd_utils.networkx_util import *
from sklearn.cluster import AgglomerativeClustering
import numpy as np
from nd_utils.write_csv import write_csv_f
class Clust_title_stage():
    def __init__(self, feature_title_extration, answer_extration):
        self.feature_title_extration = feature_title_extration

        self.init_graph = self.feature_title_extration.init_graph

        self.true_cluster_num = answer_extration.true_cluster_num
        self.true_paperidx_cluster_list = answer_extration.true_paperidx_cluster_list

        """
            in fact, this subgraph_list is cluster list
        """
        self.subgraph_list = self._get_subgraph_list()
        self.previous_subgraph_nodes_list = self._get_last_subgraph_nodes_list()

        self.idx_titleCluster_dict = self.feature_title_extration.idx_titleCluster_dict

        self.idx_titleCluster_vec_dict = self.feature_title_extration.idx_titleCluster_vec_dict

        self.titleCluster_vec_list = self.idx_titleCluster_vec_dict.values()

        self.predict_titleCluster_clustering_list = self._get_titleCluster_vec_clustering_list()

        self.predict_paperidx_cluster_list = self._get_predict_paperidx_cluster_list()

    def _get_subgraph_list(self):
            return get_subgraphs(self.init_graph)

    def _get_last_subgraph_nodes_list(self):
        return get_subgraph_nodes_list(self.subgraph_list)

    """
        下面开始 agg 聚类
    """
    def _get_titleCluster_vec_clustering_list(self):
        vec_list = np.array(self.titleCluster_vec_list)
        row = vec_list.shape[0]
        column = vec_list.shape[1]
        y_pred = AgglomerativeClustering(n_clusters=self.true_cluster_num,
                                         linkage="average",
                                         affinity="cosine").fit_predict(  np.asarray(self.titleCluster_vec_list)  )
        predict_label_dict = self.get_predict_label_dict(y_pred)
        predict_titleCluster_clustering_list = predict_label_dict.values()
        return predict_titleCluster_clustering_list

    def _get_predict_paperidx_cluster_list(self):
        predict_paperidx_cluster_list = []
        for titleCluster_clustering in self.predict_titleCluster_clustering_list:
            paperidx_cluster = []
            for titleCluster_idx in titleCluster_clustering:
                paperidx_cluster.extend(self.idx_titleCluster_dict[titleCluster_idx].paperidx_list)
            predict_paperidx_cluster_list.append( paperidx_cluster )
        return predict_paperidx_cluster_list

    def get_predict_label_dict(self, predict_label_list):
        # label  index  倒排列表 {lable:[index]}
        predict_label_dict = {}
        for idx, pred_lbl in enumerate(predict_label_list):
            if pred_lbl not in predict_label_dict:
                predict_label_dict[pred_lbl] = [idx]
            else:
                predict_label_dict[pred_lbl].append(idx)
        return predict_label_dict

from model_metric import Model_metric
from feature_data.const_data import *
from nd_utils.str_util import get_name

if __name__ == '__main__':

    cluster_score_list = []
    new_csv_row_list = []
    for  xml_file_name in xml_experiment_file_name_list:
        new_csv_row = []
        score_list = []
        xml_file_path = xml_dir + xml_file_name
        # xml_file_path = xml_dir + xml_file_name2
        xml2dict = Xml2Dict(xml_file_path)

        feature_title_extration = Feature_title_extration(xml2dict, title_param='stem', vectorizer='tfidf')
        answer_extration = Answer_extration(xml2dict)
        last_stage_clust = Clust_title_stage(feature_title_extration, answer_extration)

        # print first_stage_clust.cluster_list

        # print_subgraphs(second_stage_clust)

        # print_subgraphs(first_stage_clust)
        # print len(last_stage_clust.predict_paperidx_cluster_list), last_stage_clust.true_cluster_num
        # paperidx_pairset_list = get_paperidx_pairset_list(last_stage_clust.true_paperidx_cluster_list)
        #
        # predict_paperidx_pairset_list = get_paperidx_pairset_list(last_stage_clust.predict_paperidx_cluster_list)
        #

        # model_metric = Model_metric(paperidx_pairset_list, predict_paperidx_pairset_list)
        model_metric = Model_metric(last_stage_clust.true_paperidx_cluster_list, last_stage_clust.predict_paperidx_cluster_list)

        print model_metric.pairwise_precision, model_metric.pairwise_recall, model_metric.pairwise_f1

        score_list.append( model_metric.pairwise_precision )
        score_list.append( model_metric.pairwise_recall )
        score_list.append( model_metric.pairwise_f1 )
        new_csv_row.append( get_name( xml_file_name) )
        # new_csv_row.append( get_name(xml_file_name2) )

        # new_csv_row.append( str(model_metric.pairwise_precision))
        # new_csv_row.append( str(model_metric.pairwise_recall))
        # new_csv_row.append( str(model_metric.pairwise_f1))

        new_csv_row.append( model_metric.pairwise_precision )
        new_csv_row.append( model_metric.pairwise_recall )
        new_csv_row.append( model_metric.pairwise_f1 )

        cluster_score_list.append( score_list )
        new_csv_row_list.append( new_csv_row )
    arr = np.asarray( cluster_score_list )
    print np.mean( arr, axis=0 )
    new_csv_row = []
    new_csv_row.append('avg')
    new_csv_row.extend(np.mean( arr, axis=0 ))
    new_csv_row_list.append( new_csv_row )

    # write_csv_f('name_disam', new_csv_row_list, fileHeader1)
"""
Feature_org_venue_extration
[ 0.86716759  0.80810362  0.8278691 ]
"""