#!/usr/bin/env python
# -*- coding: utf8 -*-
# Filename: src\evaluation_rule\evaluation_rule.py
# Author: xul - 294739212@qq.com
# Create: 2017-03-12 16:20:02
# Description: evaluate rule


import networkx as nx
import word2vec
import re
import numpy as np
import collections
import cPickle as pickle
import stopwords
import logging
logger = logging.getLogger('debug')

import src.global_var

EPS = 1e-6
vector_file = 'data/vector_data.txt'
test_positive_file = 'data/test_p.txt'
test_negative_file = 'data/test_n.txt'
rule_dir = 'data/rule/'
cluster_graph_dir = 'data/cluster_graph/'


def get_all_test_data(lock, type_):
    '''
    获取测试正例数据和测试负例数据
    '''

    def connect_entity_words(train_data):
        '''
        接受一条训练数据，保留实体的完整性，在多个词语之间加上下划线
        '''
        m_list = re.findall(r'====(.+?)@.+?\+\+\+\+', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'====(.+?)@.+?\+\+\+\+', m_list[i], train_data, 1)
        m_list = re.findall(r'____(.+?)----', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'____(.+?)----', '-i-'+m_list[i], train_data, 1)
        return train_data


    def del_irrelevant_char(line):
        '''
        取出字符串无关的字符，包括（标点符号、数字、人为插入的====和++++）
        '''
        word_list_res = []
        punctuation = '!"#$%&\'()*+,./:;<=>?@[\\]^`{|}~'    # 借鉴了string.punctuation
        word_list = line.translate(None, punctuation).split()
        for word in word_list:
            if word.isdigit():
                pass
            elif word.startswith('===='):
                word_list_res.append(word[4:])
            elif word.endswith('++++'):
                word_list_res.append(word[:-4])
            else:
                word_list_res.append(word)
        return word_list_res

    def filter_nonprintable_char(src_word):
        '''
        因为做的是英文的word2vec，所以过滤ascii不在0-127之间的字符
        此处的做法是直接将这些字符去掉
        '''
        dst_word = []
        for c in src_word:
            if ord(c) >= 0 and ord(c) < 128:
                dst_word.append(c)
        return ''.join(dst_word)

    def storage_test_positive_data_in_dict(test_lines):
        '''

        :param test_lines:
        :return:
        '''
        type_to_test_lines = collections.defaultdict(list)
        for line in test_lines:
            word_list = line.split('\t:\t')
            pure_word_list = del_irrelevant_char(connect_entity_words(word_list[3]))
            printable_word_list = []
            for word in pure_word_list:
                printable_word = filter_nonprintable_char(word)
                if len(printable_word) != 0:
                    printable_word_list.append(printable_word)
            type_to_test_lines[word_list[1]].append(printable_word_list)
        return type_to_test_lines

    def storage_test_negative_data_in_dict(test_lines):
        '''

        :param test_lines:
        :return:
        '''
        type_to_test_lines = collections.defaultdict(list)
        for line in test_lines:
            word_list = line.split('\t:\t')
            pure_word_list = del_irrelevant_char(connect_entity_words(word_list[4]))
            printable_word_list = []
            for word in pure_word_list:
                printable_word = filter_nonprintable_char(word)
                if len(printable_word) != 0:
                    printable_word_list.append(printable_word)
            type_to_test_lines[word_list[0]].append(printable_word_list)
        return type_to_test_lines

    with lock:
        one_test_positive_file_lines = src.global_var.get_value(type_+'type_to_test_positive_file_lines')
        if one_test_positive_file_lines is None:
            test_positive_file_lines = open(test_positive_file).readlines()
            type_to_test_positive_file_lines = storage_test_positive_data_in_dict(test_positive_file_lines)
            for type__ in type_to_test_positive_file_lines:
                src.global_var.set_value(type__+'type_to_test_positive_file_lines',
                                         type_to_test_positive_file_lines[type__])
            one_test_positive_file_lines = type_to_test_positive_file_lines[type_]
    test_positive_data = one_test_positive_file_lines

    with lock:
        one_test_negative_file_lines = src.global_var.get_value(type_+'type_to_test_negative_file_lines')
        if one_test_negative_file_lines is None:
            test_negative_file_lines = open(test_negative_file).readlines()
            type_to_test_negative_file_lines = storage_test_negative_data_in_dict(test_negative_file_lines)
            for type__ in type_to_test_negative_file_lines:
                src.global_var.set_value(type__+'type_to_test_negative_file_lines',
                                         type_to_test_negative_file_lines[type__])
            one_test_negative_file_lines = type_to_test_negative_file_lines[type_]
    test_negative_data = one_test_negative_file_lines

    for positive_data in test_positive_data:
        stopwords.clean(positive_data, 'english')
    for negative_data in test_negative_data:
        stopwords.clean(negative_data, 'english')

    return test_positive_data, test_negative_data


def get_test_vectors(lock, test_positive_data, test_negative_data):
    '''
    利用训练好的word2vec model将正负例测试数据中的每一个词替换为向量
    '''
    def transform_model_to_dict(model):
        vocab = model.vocab
        length = len(vocab)
        model_dict = {vocab[i]: model.get_vector(vocab[i]) for i in xrange(length)}
        return model_dict

    with lock:
        model_dict = src.global_var.get_value('model_dict')
        if model_dict is None:
            model = word2vec.load(vector_file)
            model_dict = transform_model_to_dict(model)
            src.global_var.set_value('model_dict', model_dict)

    print 'load model complete'

    test_positive_vectors_list = []
    test_negative_vectors_list = []

    for test_positive_data_i in test_positive_data:
        test_positive_vectors = []
        position = 'left'
        for word in test_positive_data_i:
            if word.startswith('-i-'):
                position = 'right'
                continue
            try:
                vector = model_dict[word]
                test_positive_vectors.append((vector, position))
            except KeyError:
                logger.debug(word + ' not in vector file')
        test_positive_vectors_list.append(test_positive_vectors)

    for test_negative_data_i in test_negative_data:
        test_negative_vectors = []
        position = 'left'
        for word in test_negative_data_i:
            if word.startswith('-i-'):
                position = 'right'
                continue
            try:
                vector = model_dict[word]
                test_negative_vectors.append((vector, position))
            except KeyError:
                logger.debug(word + ' not in vector file')
        test_negative_vectors_list.append(test_negative_vectors)

    return test_positive_vectors_list, test_negative_vectors_list


def get_test_clusters(cluster_graph, test_positive_vectors_list, test_negative_vectors_list):
    '''
    计算出每条正负例测试数据分别经过聚类结果图cluster_graph中的
    节点
    '''
    def distance(point1, point2):
        return np.sqrt(np.sum((point1 - point2)**2))

    def find_cluster_cover_vector(vector, center, radius):
        '''
        计算vector是否在以center为中心，radius为半径的抽象圆中
        '''
        cluster_graph_nodes = []
        for i in xrange(len(center)):
            if distance(vector, center[i]) - radius[i] < EPS:
                cluster_graph_nodes.append(i)
        # logger.debug('cluster_graph_nodes')
        # logger.debug(cluster_graph_nodes)
        return cluster_graph_nodes

    def node_position_identify(cluster_graph_nodes, position):
        res = []
        for node in cluster_graph_nodes:
            res.append((node, position))
        logger.debug(res)
        return res

    test_positive_clusters_list = []
    test_negative_clusters_list = []

    center = nx.get_node_attributes(cluster_graph, 'center')
    radius = nx.get_node_attributes(cluster_graph, 'radius')

    for test_positive_vectors in test_positive_vectors_list:
        test_positive_clusters = []
        # logger.debug('test_positive_vectors')
        # logger.debug(test_positive_vectors)
        for vector, postion in test_positive_vectors:
            cluster_graph_nodes = find_cluster_cover_vector(vector, center, radius)
            if len(cluster_graph_nodes) != 0:
                test_positive_clusters.append(node_position_identify(cluster_graph_nodes, postion))
        test_positive_clusters_list.append(test_positive_clusters)

    for test_negative_vectors in test_negative_vectors_list:
        test_negative_clusters = []
        for vector, postion in test_negative_vectors:
            cluster_graph_nodes = find_cluster_cover_vector(vector, center, radius)
            if len(cluster_graph_nodes) != 0:
                test_negative_clusters.append(node_position_identify(cluster_graph_nodes, postion))
        test_negative_clusters_list.append(test_negative_clusters)

    return test_positive_clusters_list, test_negative_clusters_list


def count_classfied_result(rules, test_positive_clusters_list, test_negative_clusters_list):
    '''
    positive example, and is classfied to positive example
    negative example, and is classfied to positive example
    '''
    def is_sentence_over_path(path, sentence):
        '''
        example:
            sentence = [[1], [2,3], [5]]
            path = [1,2,5]

            is_path(path, sentence) return True
        '''
        if len(path) > len(sentence):
            return False

        i=0
        j=0
        while True:
            if i == len(path):
                return True
            if j == len(sentence):
                return False
            if path[i] in sentence[j]:
                i = i + 1
                j = j + 1
            else:
                j = j + 1

    def is_sentence_over_paths(paths, sentence):
        for path in paths:
            if is_sentence_over_path(path, sentence):
                return True
        return False

    p_classfied_to_p_nums = 0
    n_classfied_to_p_nums = 0
    p_classfied_to_n_nums = 0
    n_classfied_to_n_nums = 0
    test_positive_clusters_type_detail_list = []

    for test_positive_clusters in test_positive_clusters_list:
        if is_sentence_over_paths(rules, test_positive_clusters):
            test_positive_clusters_type_detail_list.append(1)
            p_classfied_to_p_nums += 1
        else:
            test_positive_clusters_type_detail_list.append(0)
            p_classfied_to_n_nums += 1

    for test_negative_clusters in test_negative_clusters_list:
        if is_sentence_over_paths(rules, test_negative_clusters):
            n_classfied_to_p_nums += 1
        else:
            n_classfied_to_n_nums += 1

    return p_classfied_to_p_nums, p_classfied_to_n_nums, n_classfied_to_p_nums, n_classfied_to_n_nums,test_positive_clusters_type_detail_list


def calc_precision(rules, test_positive_clusters_list, test_negative_clusters_list, type_):
    '''
    compute precision
    '''
    p_classfied_to_p_nums, p_classfied_to_n_nums, n_classfied_to_p_nums, n_classfied_to_n_nums, test_positive_clusters_type_detail_list= \
        count_classfied_result(rules, test_positive_clusters_list, test_negative_clusters_list)

    print type_+'\t:\t'+'TP:', p_classfied_to_p_nums
    print type_+'\t:\t'+'FP:', n_classfied_to_p_nums
    print type_+'\t:\t'+'TN:', p_classfied_to_n_nums
    print type_+'\t:\t'+'FN:', n_classfied_to_n_nums

    if p_classfied_to_p_nums + n_classfied_to_p_nums == 0:
        precision = 0.0
    else:
        precision = float(p_classfied_to_p_nums) / float(p_classfied_to_p_nums + n_classfied_to_p_nums)

    if p_classfied_to_p_nums + n_classfied_to_n_nums == 0:
        recall = 0.0
    else:
        recall = float(p_classfied_to_p_nums) / float(p_classfied_to_p_nums + p_classfied_to_n_nums)

    if precision + recall == 0.0:
        F1 = 0.0
    else:
        F1 = precision * recall * 2 / (precision + recall)

    return precision, recall, F1, test_positive_clusters_type_detail_list


def evaluate(lock, type_):
    type_to_rules = pickle.load(open(rule_dir+type_.replace(' ', '_'), 'rb'))
    print 'load rules complete'

    cluster_graph = pickle.load(open(cluster_graph_dir+type_.replace(' ', '_'), 'rb'))
    print 'load cluster graph complete'

    test_positive_data, test_negative_data = get_all_test_data(lock, type_)
    # print test_negative_data
    print 'load test data complete'

    test_positive_vectors_list, test_negative_vectors_list = get_test_vectors(lock, test_positive_data, test_negative_data)
    test_positive_clusters_list, test_negative_clusters_list = get_test_clusters(cluster_graph, \
        test_positive_vectors_list, test_negative_vectors_list)

    print 'start calculating precision, recall, F1'
    precision, recall, F1, test_positive_clusters_type_detail_list = calc_precision(
        type_to_rules[type_], test_positive_clusters_list, test_negative_clusters_list, type_)
    print 'precision is:', precision
    print 'recall is:', recall
    print 'F1 score is:', F1

    for i in xrange(len(test_positive_data)):
        print type_+'\t:\t'+str(test_positive_clusters_type_detail_list[i])+'\t:\t'+str(' '.join(test_positive_data[i]))

