# -*- coding:utf-8 -*-
from feature_data.const_data import *
from feature_data.XmlData2DictData import Xml2Dict

from feature_data.Dict2OrgVenue_Graphfeature import Feature_org_venue_extration
from feature_data.answer_extration import Answer_extration

from nd_utils.Graph_Cluster_util import *
from nd_utils.networkx_util import *


class Graph_org_venue_stage():
    def __init__(self, feature_org_venue_extration, answer_extration):
        self.feature_org_venue_extration = feature_org_venue_extration

        self.init_graph = self.feature_org_venue_extration.init_graph
        self.cluster_co_org_venue_pair_set_list = self.feature_org_venue_extration.cluster_co_org_venue_pair_set_list

        self.second_cluster_graph = self._get_cluster_graph()

        self.true_cluster_num = answer_extration.true_cluster_num
        self.true_paperidx_cluster_list = answer_extration.true_paperidx_cluster_list

        self.simulation_occur_flag = self._get_simulation_occur_flag()
        """
            in fact, this subgraph_list is cluster list
        """
        self.subgraph_list = self._get_subgraph_list()
        self.previous_subgraph_nodes_list = self._get_second_subgraph_nodes_list()

    def _get_cluster_graph(self):
        pair_set_list = self.cluster_co_org_venue_pair_set_list
        cluster_graph = add_pair_set_to_graph(pair_set_list, deepcopy_graph(self.init_graph), _weight=2)
        return cluster_graph

    def _get_simulation_occur_flag(self):
        self.predict_cluster_num = get_subgraph_num(self.second_cluster_graph)
        if self.predict_cluster_num >= self.true_cluster_num:
            return True
        else:
            return False

    def _get_subgraph_list(self):
        if self.simulation_occur_flag:
            return get_subgraphs(self.second_cluster_graph)
        else:
            return get_subgraphs(self.init_graph)

    def _get_second_subgraph_nodes_list(self):
        return get_subgraph_nodes_list(self.subgraph_list)

def print_clust_total(cluster_list):
    paper_pair_set_list = cluster_list
    sum = 0

    union_set = []

    set_length_list = []
    for paper_pair_set in paper_pair_set_list:
        # print list(paper_pair_set), len(paper_pair_set)
        print sorted(list(paper_pair_set)), len(paper_pair_set)
        set_length_list.append(len(paper_pair_set))
        sum += len(paper_pair_set)

        union_set.extend(list(paper_pair_set))
    print "sum: ", sum
    # print sorted(list(union_set)), len(list(union_set))
    print sorted(list(set(union_set))) , len(set(union_set))
    print set_length_list

def print_subgraphs(first_stage_clust):
    for subgraph in first_stage_clust.subgraph_list:
        print subgraph.nodes(), len(subgraph.nodes())

from nd_utils.trans_clusteridxlist_paperidx_pairset import get_paperidx_pairset_list
from model_metric import Model_metric
from nd_utils.str_util import get_name
from nd_utils.write_csv import write_csv_f

if __name__ == '__main__':
    new_csv_row_list = []
    for xml_file_name in xml_experiment_file_name_list:
        xml2dict = Xml2Dict(xml_dir + xml_file_name) # 1.0 0.345161290323 0.513189448441
        feature_org_venue_extration = Feature_org_venue_extration(xml2dict)
        answer_extration = Answer_extration(xml2dict)
        second_stage_clust = Graph_org_venue_stage(feature_org_venue_extration, answer_extration)

        # print second_stage_clust.simulation_occur_flag, second_stage_clust.predict_cluster_num , second_stage_clust.true_cluster_num
        # print first_stage_clust.cluster_list

        # print_subgraphs(second_stage_clust)

        # print_subgraphs(first_stage_clust)

        # from nd_utils.matplotlib_helper import *
        #
        # plt_networkx_main_second(second_stage_clust.second_cluster_graph)

        # paperid_pairset_list = get_paperidx_pairset_list(second_stage_clust.true_paperidx_cluster_list)
        #
        # predict_paperid_pairset_list = get_paperidx_pairset_list(second_stage_clust.previous_subgraph_nodes_list)

        model_metric = Model_metric(second_stage_clust.true_paperidx_cluster_list, second_stage_clust.previous_subgraph_nodes_list )
        new_csv_row = []
        new_csv_row.append( get_name(xml_file_name) )
        new_csv_row.append( model_metric.pairwise_precision )
        new_csv_row.append( model_metric.pairwise_recall )
        new_csv_row.append( model_metric.pairwise_f1 )

        print model_metric.pairwise_precision, model_metric.pairwise_recall, model_metric.pairwise_f1
        new_csv_row_list.append(new_csv_row)
    write_csv_f('second_stage_prf', new_csv_row_list, fileHeader1)
    """
org 和 venue 获得 更高的 精确率
然后 通过 title VSM 提高 召回率
True
0.9836400818 0.529735682819 0.688618468146
True
1.0 0.345161290323 0.513189448441
True
1.0 0.803069053708 0.890780141844

True        这个 精确率较低， 说明存在 同一单位，同一领域 投了同一种会议的人 数据集非常复杂。 或 没有标记单位的 没有消歧
0.748659003831 0.818257956449 0.781912765106

True
1.0 0.817848410758 0.899798251513
True
0.984334203655 0.319220999153 0.482097186701
True
1.0 0.525672371638 0.689102564103
True
1.0 0.491525423729 0.659090909091
True
1.0 0.977375565611 0.988558352403
True
1.0 0.649103139013 0.787219578518
"""

"""
单独 org
True
0.860613810742 0.741189427313 0.796449704142
True
0.985994397759 0.378494623656 0.547008547009
True
1.0 0.842710997442 0.914642609299
True
0.562266167825 0.881072026801 0.686460032626
False
1.0 0.773838630807 0.87250172295
True
0.986301369863 0.487722269263 0.65269121813
True
1.0 0.933985330073 0.965865992415
True
0.995412844037 0.612994350282 0.758741258741
True
1.0 0.986425339367 0.993166287016
True
1.0 0.727578475336 0.842310188189
"""