# coding=utf8

import networkx as nx
import word2vec
import re
import numpy as np
import collections
import cPickle as pickle
import stopwords
import logging
logger = logging.getLogger('debug')

import src.global_var

from sys import argv

a,c,pmin,positive_case_num1 = argv

POSITIVE_CASE_NUM = int(positive_case_num1)
EPS = 1e-6
vector_file = 'data/vector_data.txt'
validate_positive_file = 'data/validate_p.txt'
validate_negative_file = 'data/validate_n.txt'


def get_all_validate_data(lock, type_):
    '''
    获取测试正例数据和测试负例数据
    :param lock:
    '''

    def connect_entity_words(train_data):
        '''
        接受一条训练数据，保留实体的完整性，在多个词语之间加上下划线
        '''
        m_list = re.findall(r'====(.+?)@.+?\+\+\+\+', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'====(.+?)@.+?\+\+\+\+', m_list[i], train_data, 1)
        m_list = re.findall(r'____(.+?)----', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'____(.+?)----', '-i-'+m_list[i], train_data, 1)
        return train_data


    def del_irrelevant_char(line):
        '''
        取出字符串无关的字符，包括（标点符号、数字、人为插入的====和++++）
        '''
        word_list_res = []
        punctuation = '!"#$%&\'()*+,./:;<=>?@[\\]^`{|}~'    # 借鉴了string.punctuation
        word_list = line.translate(None, punctuation).split()
        for word in word_list:
            if word.isdigit():
                pass
            elif word.startswith('===='):
                word_list_res.append(word[4:])
            elif word.endswith('++++'):
                word_list_res.append(word[:-4])
            else:
                word_list_res.append(word)
        return word_list_res

    def filter_nonprintable_char(src_word):
        '''
        因为做的是英文的word2vec，所以过滤ascii不在0-127之间的字符
        此处的做法是直接将这些字符去掉
        '''
        dst_word = []
        for c in src_word:
            if ord(c) >= 0 and ord(c) < 128:
                dst_word.append(c)
        return ''.join(dst_word)

    def storage_validate_positive_data_in_dict(validate_lines):
        '''
        将训练数据以type-train data的形式存储
        :param validate_lines: 训练数据
        :return: 将训练数据以type-train data的形式存储的字典
        :rtype: dict(str) -> list
        '''
        type_to_validate_lines = collections.defaultdict(list)
        for line in validate_lines:
            word_list = line.split('\t:\t')
            pure_word_list = del_irrelevant_char(connect_entity_words(word_list[3]))
            printable_word_list = []
            for word in pure_word_list:
                printable_word = filter_nonprintable_char(word)
                if len(printable_word) != 0:
                    printable_word_list.append(printable_word)
            type_to_validate_lines[word_list[1]].append(printable_word_list)
        return type_to_validate_lines

    def storage_validate_negative_data_in_dict(validate_lines):
        '''
        将训练数据以type-train data的形式存储
        :param validate_lines: 训练数据
        :return: 将训练数据以type-train data的形式存储的字典
        :rtype: dict(str) -> list
        '''
        type_to_validate_lines = collections.defaultdict(list)
        for line in validate_lines:
            word_list = line.split('\t:\t')
            pure_word_list = del_irrelevant_char(connect_entity_words(word_list[4]))
            printable_word_list = []
            for word in pure_word_list:
                printable_word = filter_nonprintable_char(word)
                if len(printable_word) != 0:
                    printable_word_list.append(printable_word)
            type_to_validate_lines[word_list[0]].append(printable_word_list)
        return type_to_validate_lines

    with lock:
        one_validate_positive_file_lines = src.global_var.get_value(type_+'type_to_validate_positive_file_lines')
        if one_validate_positive_file_lines is None:
            validate_positive_file_lines = open(validate_positive_file).readlines()
            type_to_validate_positive_file_lines = storage_validate_positive_data_in_dict(validate_positive_file_lines)
            for type__ in type_to_validate_positive_file_lines:
                src.global_var.set_value(type__+'type_to_validate_positive_file_lines',
                                         type_to_validate_positive_file_lines[type__])
            one_validate_positive_file_lines = type_to_validate_positive_file_lines[type_]
    validate_positive_data = one_validate_positive_file_lines

    with lock:
        one_validate_negative_file_lines = src.global_var.get_value(type_+'type_to_validate_negative_file_lines')
        if one_validate_negative_file_lines is None:
            validate_negative_file_lines = open(validate_negative_file).readlines()
            type_to_validate_negative_file_lines = storage_validate_negative_data_in_dict(validate_negative_file_lines)
            for type__ in type_to_validate_negative_file_lines:
                src.global_var.set_value(type__+'type_to_validate_negative_file_lines',
                                         type_to_validate_negative_file_lines[type__])
            one_validate_negative_file_lines = type_to_validate_negative_file_lines[type_]
    validate_negative_data = one_validate_negative_file_lines

    for positive_data in validate_positive_data:
        stopwords.clean(positive_data, 'english')
    for negative_data in validate_negative_data:
        stopwords.clean(negative_data, 'english')

    # print validate_negative_data
    return validate_positive_data, validate_negative_data


def get_validate_vectors(lock, validate_positive_data, validate_negative_data):
    '''
    利用训练好的word2vec model将正负例测试数据中的每一个词替换为向量
    :param lock:
    '''

    def transform_model_to_dict(model):
        vocab = model.vocab
        length = len(vocab)
        model_dict = {vocab[i]: model.get_vector(vocab[i]) for i in xrange(length)}
        return model_dict

    with lock:
        model_dict = src.global_var.get_value('model_dict')
        if model_dict is None:
            model = word2vec.load(vector_file)
            model_dict = transform_model_to_dict(model)
            src.global_var.set_value('model_dict', model_dict)

    print 'load model complete'

    validate_positive_vectors_list = []
    validate_negative_vectors_list = []

    for validate_positive_data_i in validate_positive_data:
        validate_positive_vectors = []
        position = 'left'
        for word in validate_positive_data_i:
            if word.startswith('-i-'):
                position = 'right'
                continue
            try:
                vector = model_dict[word]
                validate_positive_vectors.append((vector, position))
            except KeyError:
                logger.debug(word + ' not in vector file')
        validate_positive_vectors_list.append(validate_positive_vectors)

    for validate_negative_data_i in validate_negative_data:
        validate_negative_vectors = []
        position = 'left'
        for word in validate_negative_data_i:
            if word.startswith('-i-'):
                position = 'right'
                continue
            try:
                vector = model_dict[word]
                validate_negative_vectors.append((vector, position))
            except KeyError:
                logger.debug(word + ' not in vector file')
        validate_negative_vectors_list.append(validate_negative_vectors)

    return validate_positive_vectors_list, validate_negative_vectors_list


def get_validate_clusters(cluster_graph, validate_positive_vectors_list, validate_negative_vectors_list):
    '''
    计算出每条正负例测试数据分别经过聚类结果图cluster_graph中的
    节点
    '''
    def distance(point1, point2):
        return np.sqrt(np.sum((point1 - point2)**2))

    def find_cluster_cover_vector(vector, center, radius):
        '''
        计算vector是否在以center为中心，radius为半径的抽象圆中
        '''
        cluster_graph_nodes = []
        for i in xrange(len(center)):
            if distance(vector, center[i]) - radius[i] < EPS:
                cluster_graph_nodes.append(i)
        # logger.debug('cluster_graph_nodes')
        # logger.debug(cluster_graph_nodes)
        return cluster_graph_nodes

    def node_position_identify(cluster_graph_nodes, position):
        res = []
        for node in cluster_graph_nodes:
            res.append((node, position))
        logger.debug(res)
        return res

    validate_positive_clusters_list = []
    validate_negative_clusters_list = []
    
    center = nx.get_node_attributes(cluster_graph, 'center')
    radius = nx.get_node_attributes(cluster_graph, 'radius')

    for validate_positive_vectors in validate_positive_vectors_list:
        validate_positive_clusters = []
        # logger.debug('validate_positive_vectors')
        # logger.debug(validate_positive_vectors)
        for vector, postion in validate_positive_vectors:
            cluster_graph_nodes = find_cluster_cover_vector(vector, center, radius)
            if len(cluster_graph_nodes) != 0:
                validate_positive_clusters.append(node_position_identify(cluster_graph_nodes, postion))
        validate_positive_clusters_list.append(validate_positive_clusters)

    for validate_negative_vectors in validate_negative_vectors_list:
        validate_negative_clusters = []
        for vector, postion in validate_negative_vectors:
            cluster_graph_nodes = find_cluster_cover_vector(vector, center, radius)
            if len(cluster_graph_nodes) != 0:
                validate_negative_clusters.append(node_position_identify(cluster_graph_nodes, postion))
        validate_negative_clusters_list.append(validate_negative_clusters)

    return validate_positive_clusters_list, validate_negative_clusters_list


def mine(lock, type_, cluster_graph, node_to_sentence_num, precision_min):
    '''
    挖掘规则rule(path)
    '''
    def get_areas(cluster_graph):
        '''
        获取图中所有的点的标号
        '''
        return cluster_graph.nodes()
    
    def is_sentence_over_path(path, sentence):
        '''
        example:
            sentence = [[1], [2,3], [5]]
            path = [1,2,5]

            is_path(path, sentence) return True
        '''
        # logger.debug('path')
        # logger.debug(path)
        # logger.debug('sentence')
        # logger.debug(sentence)
        if len(path) > len(sentence):
            return False

        i=0
        j=0
        while True:
            if i == len(path):
                return True
            if j == len(sentence):
                return False
            if path[i] in sentence[j]:
                i = i + 1
                j = j + 1
            else:
                j = j + 1

    def get_positive_case_num(validate_positive_clusters_list, path, point=None, position=None):
        '''
        获取经过当前path的正例的条数
        '''
        if point is not None and position is not None:
            path = path + [(point, position)]
        positive_case_num = 0
        for validate_positive_clusters in validate_positive_clusters_list:
            if is_sentence_over_path(path, validate_positive_clusters):
                positive_case_num += 1
        return positive_case_num

    def get_negative_case_num(validate_negative_clusters_list, path, point=None, position=None):
        '''
        获取经过当前path的负例的条数
        '''
        if point is not None and position is not None:
            path = path + [(point, position)]
         
        negative_case_num = 0
        for validate_negative_clusters in validate_negative_clusters_list:
            if is_sentence_over_path(path, validate_negative_clusters):
                negative_case_num += 1
        return negative_case_num

    def calc_precision(validate_positive_clusters_list, validate_negative_clusters_list, \
        path, point=None, position=None):
        '''
        计算经过当前path（在position位置加上point点）的正例/（正例+负例）
        '''
        positive_case_num = get_positive_case_num(validate_positive_clusters_list, path, point, position)
        negative_case_num = get_negative_case_num(validate_negative_clusters_list, path, point, position)
        logger.debug('positive_case_num is %d, negative_case_num is %d' %(positive_case_num, negative_case_num))

        if positive_case_num + negative_case_num == 0:
            return 0.0
        else:
            return float(positive_case_num)/float(positive_case_num + negative_case_num)

    def all_adjacent_point():
        '''
        获取每个点的邻接点和被邻接点
        '''
        def filter_adjacent_point(point_to_adjacent_points):
            '''
            filter adjacent point that don't have a node with 
            point's node in the same sentence
            add node mention fore-mentiond
            '''
            filter_dict = collections.defaultdict(list)
            d = collections.defaultdict(tuple)

            for point, adjacent_points in point_to_adjacent_points.iteritems():
                all_nodes_in_point = cluster_graph.node[point]['left'] + cluster_graph.node[point]['right']
                sentence_num_of_all_1 = [node_to_sentence_num[node] for node in all_nodes_in_point]

                for i in xrange(len(adjacent_points)):
                    all_nodes_in_adjacent_point = cluster_graph.node[adjacent_points[i]]['left'] + \
                        cluster_graph.node[adjacent_points[i]]['right']
                    sentence_num_of_all_2 = [node_to_sentence_num[node] for node in all_nodes_in_adjacent_point]

                    sentence_num_intersection = set(sentence_num_of_all_1).intersection(set(sentence_num_of_all_2))
                    if not sentence_num_intersection:
                        filter_dict[point].append(i)
                    else:
                        point_node = -1
                        adjacent_point_node = -1
                        sentence_num = list(sentence_num_intersection)[0] # only find one, not all
                        for node in all_nodes_in_point:
                            if node_to_sentence_num[node] == sentence_num:
                                point_node = node
                                break
                        for node in all_nodes_in_adjacent_point:
                            if node_to_sentence_num[node] == sentence_num:
                                adjacent_point_node = node
                                break
                        d[(point, adjacent_points[i])] = (point_node, adjacent_point_node)

            # filter and add point_node adjacent_point_node
            for point, filter_list in filter_dict.iteritems():
                for i in sorted(filter_list, reverse=True):
                    del point_to_adjacent_points[point][i]
            # logger.debug('point_to_adjacent_points')
            # logger.debug(point_to_adjacent_points)
            for point, adjacent_points in point_to_adjacent_points.iteritems():
                for i in xrange(len(adjacent_points)):
                    adjacent_point = adjacent_points[i]
                    point_node, adjacent_point_node = d[(point, adjacent_point)]
                    point_to_adjacent_points[point][i] = (adjacent_point, point_node, adjacent_point_node)
            return point_to_adjacent_points

        point_to_adjacent_points = collections.defaultdict(list)
        adjacency_list = cluster_graph.adjacency_list()
        for i in xrange(len(adjacency_list)):
            point_to_adjacent_points[i] = adjacency_list[i]
        return filter_adjacent_point(point_to_adjacent_points)

    def get_adjacent_point(path, point_to_adjacent_points):
        '''
        获取path首尾的邻接点
        '''
        if len(path) == 0:
            return []
        else:
            # logger.debug('point_to_adjacent_points')
            # logger.debug(point_to_adjacent_points)
            adjacent_points_right = point_to_adjacent_points[path[-1][0]]
            return adjacent_points_right

    def add_point_to_path(path, point_need_add_to_path, adjacent_point_position):
        '''
        将point_position位置的point_need_add_to_path加入到path中
        '''
        path.append((point_need_add_to_path, adjacent_point_position))
        
    def get_node_to_position():
        '''
        node : 'left'/'right'
        '''
        node_to_position = {}
        points = cluster_graph.nodes()
        for point in points:
            left_nodes = cluster_graph.node[point]['left']
            for node in left_nodes:
                node_to_position[node] = 'left'
            right_nodes = cluster_graph.node[point]['right']
            for node in right_nodes:
                node_to_position[node] = 'right'
        return node_to_position

    if True: 
        validate_positive_data, validate_negative_data = get_all_validate_data(lock, type_)
        validate_positive_vectors_list, validate_negative_vectors_list = \
            get_validate_vectors(lock, validate_positive_data, validate_negative_data)
        validate_positive_clusters_list, validate_negative_clusters_list = \
            get_validate_clusters(cluster_graph, validate_positive_vectors_list, \
            validate_negative_vectors_list)
        pickle.dump(validate_negative_clusters_list, open('data/validate_negative_clusters_list.txt', 'wb'))
        pickle.dump(validate_positive_clusters_list, open('data/validate_positive_clusters_list.txt', 'wb'))
    else:
        validate_positive_clusters_list = pickle.load(open('data/validate_positive_clusters_list.txt', 'rb'))
        validate_negative_clusters_list = pickle.load(open('data/validate_negative_clusters_list.txt', 'rb'))
    print 'load validate data complete'
    
    point_to_adjacent_points = all_adjacent_point()

    paths = []
    areas = get_areas(cluster_graph)
    logger.debug('area length is %s' %(len(areas)))

    node_to_position = get_node_to_position()

    for point_a in areas:
        if len(cluster_graph.node[point_a]['left']) != 0:
            path = [(point_a, 'left')]
            precision_last = 0.0

            accuracy_improving = True
            while accuracy_improving:
                adjacent_points = get_adjacent_point(path, point_to_adjacent_points)
                precision_cur = 0.0
                flag = False
                for i in xrange(len(adjacent_points)):
                    point_b = adjacent_points[i][0]
                    adjacent_point_node = adjacent_points[i][2]
                    point_position = node_to_position[adjacent_point_node]
                    if node_to_position[adjacent_points[i][1]]=='left' and path[-1][1]=='right':
                        continue
                    if (point_b, point_position) in path:
                        continue
                    precision_tmp = calc_precision(validate_positive_clusters_list, validate_negative_clusters_list,\
                        path, point_b, point_position)
                    positive_case_num = get_positive_case_num(validate_positive_clusters_list, path, point_b, point_position)
                    if precision_tmp >= precision_cur and positive_case_num >= POSITIVE_CASE_NUM:
                        logger.debug('precision is %f' %(precision_tmp))
                        precision_cur = precision_tmp
                        point_need_add_to_path = point_b
                        point_need_add_position = point_position
                        flag = True
                if  flag and (point_need_add_to_path, point_need_add_position) not in path and precision_cur > precision_last:
                    add_point_to_path(path, point_need_add_to_path, point_need_add_position)
                    if precision_cur >= precision_min:
                        accuracy_improving = False
                    precision_last = precision_cur
                else:
                    accuracy_improving = False

            logger.debug(path)
            if precision_last > precision_min:
                paths.append(path)

            logger.debug('repeat...')

        if len(cluster_graph.node[point_a]['right']) != 0:
            path = [(point_a, 'right')]
            precision_last = 0.0

            accuracy_improving = True
            while accuracy_improving:
                adjacent_points = get_adjacent_point(path, point_to_adjacent_points)
                precision_cur = 0.0
                flag = False
                for i in xrange(len(adjacent_points)):
                    point_b = adjacent_points[i][0]
                    adjacent_point_node = adjacent_points[i][2]
                    point_position = node_to_position[adjacent_point_node]
                    if node_to_position[adjacent_points[i][1]]=='left' and path[-1][1]=='right':
                        continue
                    if (point_b, point_position) in path:
                        continue
                    precision_tmp = calc_precision(validate_positive_clusters_list, validate_negative_clusters_list,\
                        path, point_b)
                    positive_case_num = get_positive_case_num(validate_positive_clusters_list, path, point_b)
                    if precision_tmp >= precision_cur and positive_case_num >= POSITIVE_CASE_NUM:
                        logger.debug('precision is %f' %(precision_tmp))
                        precision_cur = precision_tmp
                        point_need_add_to_path = point_b
                        point_need_add_position = point_position
                        flag = True
                if flag and (point_need_add_to_path, point_need_add_position) not in path and precision_cur > precision_last:
                    add_point_to_path(path, point_need_add_to_path, point_need_add_position) 
                    if precision_cur >= precision_min:
                        accuracy_improving = False
                    precision_last = precision_cur
                else:
                    accuracy_improving = False

            logger.debug(path)
            if precision_last > precision_min:
                paths.append(path)

            logger.debug('repeat...')

        type_to_rules = {type_ : paths}
        pickle.dump(type_to_rules, open('data/rule/' + type_.replace(' ', '_'), 'wb'))

    return paths

