import sys
import codecs
import json
import time
from multiprocessing import Pool, Lock, Value
from collections import defaultdict, OrderedDict
import copy
from stanfordcorenlp import StanfordCoreNLP
import os
import re

reload(sys)
sys.setdefaultencoding('utf8')


def extract_coref_each_story(_tokens, _corefs):
    # p_coref = sorted(_corefs, key=lambda x: (x[0]['sentNum'], x[0]['headIndex']))

    count_entity = 0
    coref_dict = {}
    for entity in _corefs:
        head_entity = entity[0]
        # filter entity by text length
        if len(head_entity['text'].split(' ')) > 3:
            continue
        count_entity += 1

        coref_texts = [e['text'].lower() for e in entity]
        coref_dict[count_entity] = coref_texts

    return coref_dict


def extract_coref(hypos):
    coref_data = []
    for h in hypos:
        j_data = json.loads(h.strip())
        p_coref = j_data['corefs'].values()
        # sentences_tokens = j_data['sentences']
        coref_dict = extract_coref_each_story(None, p_coref)
        if len(coref_dict.keys()) == 0:
            continue
        coref_data.append(coref_dict)
    return coref_data


def calc_coref_statstics(coref_data):
    total_example = len(coref_data)
    len_each_examples = [len(coref_item.keys()) for coref_item in coref_data]
    total_coref_chains = sum(len_each_examples)
    avg_coref_chains = total_coref_chains * 1.0 / total_example

    avg_cluster_len_each_examples = []
    avg_cluster_unique_len_each_examples = []
    for coref_item in coref_data:
        cluster_number = len(coref_item.keys())
        len_each_cluster = [len(coref_item[e]) for e in coref_item]
        avg_len_each_cluster = sum(len_each_cluster) * 1.0 / cluster_number
        avg_cluster_len_each_examples.append(avg_len_each_cluster)

        unique_len_each_cluster = [len(set(coref_item[e])) for e in coref_item]
        avg_unique_len_each_cluster = sum(unique_len_each_cluster) * 1.0 / cluster_number
        avg_cluster_unique_len_each_examples.append(avg_unique_len_each_cluster)

    avg_cluster_len = sum(avg_cluster_len_each_examples) * 1.0 / total_example
    avg_cluster_unique_len = sum(avg_cluster_unique_len_each_examples) * 1.0 / total_example
    metrics = OrderedDict()
    metrics['avg_coref_chains'] = round(avg_coref_chains, 6)
    metrics['avg_cluster_len'] = round(avg_cluster_len, 6)
    metrics['avg_cluster_unique_len'] = round(avg_cluster_unique_len, 6)
    metrics['total_coref_chains'] = total_coref_chains
    return metrics


hyp_path = sys.argv[1]

hypos = codecs.open(hyp_path, 'r', encoding='utf8').readlines()
print('total number of test example', len(hypos))

coref_data = extract_coref(hypos)
metrics = calc_coref_statstics(coref_data)

print_distinct = 'avg_coref_chains={:.6f}, avg_cluster_len={:.6f}, avg_cluster_unique_len={:.6f}, total_coref_chains={:d}'.format(*metrics.values())
print('=' * 50)
print(print_distinct)
print('=' * 50)

# write to file
with open(os.path.join(hyp_path + '.coref'), 'w') as f:
    f.write(print_distinct)
# with open(os.path.join(hyp_path + '.coref.text'), 'w') as f:
#     f.write('\n'.join(coref_data))
#     f.write('\n')