# coding=utf8

import networkx as nx
import word2vec
import numpy as np
import stopwords
import re
import collections
import logging
logger = logging.getLogger('debug')

import src.global_var

vector_file = 'data/vector_data.txt'
cluster_train_file = 'data/train_p.txt'

def get_train_data(lock, type_):
    '''
    input type
    output all train data of the type
    :param lock:
    '''
    
    def connect_entity_words(train_data):
        '''
        接受一条训练数据，保留实体的完整性，在多个词语之间加上下划线
        '''
        m_list = re.findall(r'====(.+?)@.+?\+\+\+\+', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'====(.+?)@.+?\+\+\+\+', m_list[i], train_data, 1)
        m_list = re.findall(r'____(.+?)----', train_data)
        m_list = [m_i.replace(' ', '_') for m_i in m_list]
        m_list = [m_i.replace('\\', '') for m_i in m_list]
        for i in xrange(len(m_list)):
            train_data = re.sub(r'____(.+?)----', '-i-'+m_list[i], train_data, 1)
        return train_data


    def del_irrelevant_char(line):
        '''
        取出字符串无关的字符，包括（标点符号、数字、人为插入的====和++++）
        '''
        word_list_res = []
        punctuation = '!"#$%&\'()*+,./:;<=>?@[\\]^`{|}~'    # 借鉴了string.punctuation
        word_list = line.translate(None, punctuation).split()
        for word in word_list:
            if word.isdigit():
                pass
            elif word.startswith('===='):
                word_list_res.append(word[4:])
            elif word.endswith('++++'):
                word_list_res.append(word[:-4])
            else:
                word_list_res.append(word)
        return word_list_res

    def filter_nonprintable_char(src_word):
        '''
        因为做的是英文的word2vec，所以过滤ascii不在0-127之间的字符
        此处的做法是直接将这些字符去掉
        '''
        dst_word = []
        for c in src_word:
            if ord(c) >= 0 and ord(c) < 128:
                dst_word.append(c)
        return ''.join(dst_word)

    def storage_train_data_in_dict(lines):
        '''
        将训练数据以type-train data的形式存储
        :param lines: 训练数据
        :return: 将训练数据以type-train data的形式存储的字典
        :rtype: dict(str) -> list
        '''
        type_to_lines = collections.defaultdict(list)
        for line in lines:
            word_list = line.split('\t:\t')
            pure_word_list = del_irrelevant_char(connect_entity_words(word_list[3]))
            printable_word_list = []
            for word in pure_word_list:
                printable_word = filter_nonprintable_char(word)
                if len(printable_word) != 0:
                    printable_word_list.append(printable_word)
            type_to_lines[word_list[1]].append(printable_word_list)

        return type_to_lines

    with lock:
        one_type_to_lines = src.global_var.get_value(type_ + 'type_to_lines')
        if one_type_to_lines is None:
            lines = open(cluster_train_file).readlines()
            type_to_lines = storage_train_data_in_dict(lines)
            for type__ in type_to_lines:
                src.global_var.set_value(type__ + 'type_to_lines', type_to_lines[type__])
        one_type_to_lines = type_to_lines[type_]
    print 'load train data complete'
    # print 'one_type_to_lines', len(one_type_to_lines)
    return one_type_to_lines


def get_train_vectors(lock, train_data):
    '''
    根据训练数据从word2vec中抽取出其对应的所有词向量
    :param lock:
    '''

    def transform_model_to_dict(model):
        vocab = model.vocab
        length = len(vocab)
        model_dict = {vocab[i]: model.get_vector(vocab[i]) for i in xrange(length)}
        return model_dict

    with lock:
        model_dict = src.global_var.get_value('model_dict')
        if model_dict is None:
            model = word2vec.load(vector_file)
            model_dict = transform_model_to_dict(model)
            src.global_var.set_value('model_dict', model_dict)
    
    print 'load model complete'

    word_to_vector = {}

    for train_data_i in train_data:
        for word in train_data_i:
            if word.startswith('-i-'):
                continue
            try:
                vector = model_dict[word]
                word_to_vector[word] = vector
            except KeyError:
                logger.debug(word + ' not in vector file')

    return word_to_vector


def create(lock, type_):
    def distance(vector1, vector2):
        return np.sqrt(sum((vector1 - vector2)**2))

    node_to_sentence_num = {}
    node_word = []
    node_position = []

    train_data = get_train_data(lock, type_)
    word_to_vector = get_train_vectors(lock, train_data)
    
    no_stopword_train_data = []
    for train_data_i in train_data:
        no_stopword_train_data.append(  
                stopwords.clean(  
                [ x.lower() for x in train_data_i], \
                'english')
                )
    train_data = no_stopword_train_data

    num = 0
    for i in xrange(len(train_data)):
        position = 'left'
        for j in xrange(len(train_data[i])):
            if train_data[i][j].startswith('-i-'):
                position = 'right'
                continue
            if train_data[i][j] in word_to_vector:
                node_to_sentence_num[num] = i
                node_word.append(train_data[i][j])
                node_position.append(position)
                num += 1

    G = nx.Graph()
    for i in xrange(len(node_word)):
        G.add_node(i, position=node_position[i])    # property position means before/after the instance

    for i in xrange(len(node_word)):
        for j in xrange(i+1, len(node_word)):
            weight = distance(word_to_vector[node_word[i]], word_to_vector[node_word[j]])
            G.add_edge(i, j, weight=weight)

    return G, node_to_sentence_num, node_word, word_to_vector
