# encoding=utf8
import pickle
import numpy as np


def meanLength(ll):
    ll = [len(l) for l in ll]
    return np.mean(ll)


def transferMatrixMeanSimilarity(a, b):  # a,b has the same shape
    assert a.shape == b.shape
    SM = a / b
    SM[np.isinf(SM)] = 0  # R+/0 type: inf --> 0
    SM[np.isnan(SM)] = 1  # 0/0  type:  nan--> 0
    # 将大于1的值，转为它的倒数
    SM_1 = np.array(SM)  # 拷贝原矩阵
    SM[SM < 1] = np.inf;
    SM = (1 / SM)  # 小于1的取倒数后变为0，大于等于的变为倒数
    SM_1[SM_1 >= 1] = 0
    SM = SM + SM_1
    return SM.mean()


def transferMatrixMeanSimilarity_diff_p(a, b):  # 绝对差值 比例
    assert a.shape == b.shape
    diff_p = abs(a - b) / (a + b)
    diff_p[np.isnan(diff_p)] = 0
    return diff_p.mean()


def texts2transferMatrix(self=None, texts=None):
    if self:
        texts = self.sunNodes[0].getData()
    from gensim import corpora
    texts = [list(text) for text in texts]
    dictionary = corpora.Dictionary(texts);
    keys = dictionary.token2id;
    print('length of dictionary:', len(keys), dictionary.token2id, sep='\n')
    from scipy.sparse import csr_matrix
    tMs = []  # tM:transferMatrix
    interval = 5;
    for index, text in enumerate(texts):
        tm = np.zeros((len(keys), len(keys)))
        for i, char in enumerate(text):
            for itv in range(interval):
                index_text = i + itv
                try:
                    tm[keys[char]][keys[text[index_text]]] += 1
                #                         print(text[i],'--',text[index_text],keys[text[i]],keys[text[index_text]],sep=' ')
                except:
                    break
        tMs.append(csr_matrix(tm))
        if index % 10 == 0: print(index)
    return tMs


# #test
# texts = ['我是小天使，他是小天鹅','我是小天鹅，他是小天使']
# tMs = texts2transferMatrix(texts=texts)
# print(tMs)
# print(transferMatrixMeanSimilarity(tMs[0],tMs[1]))


def groupVectorSimilarity(a, b):
    # 最简单的将向量组取均值，并且在计算余弦值
    a = a.mean(0);
    b = b.mean(0)
    return vectorCos(a, b)


def log_Matrix(CountMatrix):
    sp = CountMatrix.shape
    meanWords = np.sum(CountMatrix) / sp[0]
    CountMatrix = CountMatrix.astype(np.float64)
    CountMatrix += meanWords * 0.00001 / sp[1]  # 为每个词汇加上一点点，背景噪声，避免log函数的0无穷
    divisor_V = np.sum(CountMatrix, axis=1)  # 只有列
    divisor_V = np.tile(divisor_V, (1, sp[1])).reshape(sp)  # 扩充成原矩阵大小
    probabilize_CountMatrix = CountMatrix / divisor_V
    log2pMatrix = np.log2(probabilize_CountMatrix)
    transpose = log2pMatrix.transpose(1, 0)
    crossEntropy = -(probabilize_CountMatrix.dot(transpose))
    return crossEntropy


def vectorCos(a, b, norm_a=None, norm_b=None):  # cos(a,b) = a.b /(|a|*|b|)
    if norm_a is None: norm_a = np.linalg.norm(a)  # 用于加速
    if norm_b is None: norm_b = np.linalg.norm(b)  # 避免每次都计算度量值
    norm2mul = norm_a * norm_b
    return np.dot(a, b) / norm2mul


def MatrixCos(a, b=None, isTrimZeros=True):
    if b is None: b = np.array(a)  # print('b is None')
    assert a.shape[1] == b.shape[1]
    if isTrimZeros:
        trimZeros = lambda a: a[:a.nonzero()[0][-1] + 1]
        a = trimZeros(a);
        b = trimZeros(b)
    transpose = b.transpose(1, 0)
    computeDot = a.dot(transpose)
    # 这里可以加速
    norm2_a = np.apply_along_axis(lambda x: np.linalg.norm(x), 1, a)
    norm2_b = np.apply_along_axis(lambda x: np.linalg.norm(x), 1, b)
    #     Z = np.ones((a.shape[0],b.shape[0]), np.float)
    #     for i in range(Z.shape[0]):
    #         for j in range(Z.shape[1]):
    #             Z[i][j]=norm2_a[i]*norm2_b[j]
    # 看老子加速
    Z = np.outer(norm2_a, norm2_b)
    MxCos = computeDot / Z
    return MxCos


def MatrixMeanCos(a, b, isTrimZeros=True):
    return MatrixCos(a, b, isTrimZeros).mean()


def MatrixMeanPower(a, b):
    transpose = b.transpose(1, 0)
    computeDot = a.dot(transpose)
    return computeDot.mean()


def vectorOSdist(a, b):  # 欧氏距离
    return np.linalg.norm(a - b)


def MatrixOSMeanDist(a, b=None, isTrimZeros=True):
    if b is None: b = np.array(a)
    assert a.shape[1] == b.shape[1]
    if isTrimZeros:
        trimZeros = lambda a: a[:a.nonzero()[0][-1] + 1]
        a = trimZeros(a);
        b = trimZeros(b)
    OSDs = np.zeros((a.shape[0], b.shape[0]))
    for i in range(a.shape[0]):
        for j in range(b.shape[0]):
            OSDs[i][j] = vectorOSdist(a[i], b[j])
    return OSDs.mean()


def singel_model_similarity(self=None, cid=555, order='+', data=None):
    if self is None and data is not None:
        crossEntropy = data
    elif self is not None:
        crossEntropy = self.sunNodes[0].getData()
    else:
        raise Exception("single_model_similarity")
    pseudo_H_pq = crossEntropy[cid]
    # 放缩到 0-1 区间，相似度越大越接近1，与其他similarity 统一方向
    if order == '-': pseudo_H_pq = -pseudo_H_pq  # 因为交叉熵越小越相似，需要取反
    pseudo_H_pq = pseudo_H_pq - pseudo_H_pq.min()
    result = pseudo_H_pq = (pseudo_H_pq / pseudo_H_pq.max())
    # keysSort = np.argsort(result)[-6 :]
    # print(keysSort)
    return result


def strContainSub(trivial, label):
    for tr in trivial:
        if tr in label or label in tr:
            return True
    return False


def is_number(s):
    # 教程代码当出现多个汉字数字时会报错，通过遍历字符串解决
    # 对汉字表示的数字也可分辨
    try:  # 如果能运行float(s)语句，返回True（字符串s是浮点数）
        float(s)
        return True
    except ValueError:  # ValueError为Python的一种标准异常，表示"传入无效的参数"
        pass  # 如果引发了ValueError这种异常，不做任何事情（pass：不做任何事情，一般用做占位语句）
    try:
        import unicodedata  # 处理ASCii码的包
        for i in s:
            unicodedata.numeric(i)  # 把一个表示数字的字符串转换为浮点数返回的函数
            # return True
        return True
    except (TypeError, ValueError):
        pass
    return False


def superParameterSpace(start, end, varianceDownthresh, upPropotion, format_f=int):
    # 超参数空间提供函数，内置参数空间优化方法：
    # 一维网格搜索变化最大值不超过自身upPropotion，最小不小于varianceDownthresh
    oneDimentionaSpace = [start]
    while oneDimentionaSpace[-1] * upPropotion < varianceDownthresh:
        oneDimentionaSpace.append(oneDimentionaSpace[-1] + varianceDownthresh)
    while oneDimentionaSpace[-1] < end:
        oneDimentionaSpace.append(oneDimentionaSpace[-1] * (1 + upPropotion))
    if oneDimentionaSpace[-1] > end: oneDimentionaSpace[-1] = end
    result = [format_f(param) for param in oneDimentionaSpace]
    return result


def default_write_func(data, f_path):
    with open(f_path, 'wb')as f:
        pickle.dump(data, f)


def default_read_func(f_path):
    with open(f_path, 'rb')as f:
        return pickle.load(f)


# 缓存已经实例化的node
cached_node_pool = dict()


def get_node(**init_params):
    self = Node(**init_params)
    if self.node_id in cached_node_pool:
        self = cached_node_pool[self.node_id]
        self.forceUpdate = self.forceUpdate
    else:
        cached_node_pool[self.node_id] = self
    return self


class Node:
    def __init__(self, name='anonymous-common_node', compute=lambda x: x, dirCompute=lambda x: 0, superParams=[[], {}], \
                 supportNodes=[], cache_dir='cache/', persistence_path='None', save=True, forceUpdate=False,
                 isSingleFile=True,isLeafNode = False,
                 write_func=default_write_func, read_func=default_read_func):
        self.isLeafNode = isLeafNode
        self.write_func = write_func
        self.read_func = read_func
        self.name = name
        self.data = None
        self.persistence_path = persistence_path if persistence_path is not None else cache_dir + compute.__name__ + '_' + name
        self.save = save
        self.forceUpdate = forceUpdate
        self.supportNodes = supportNodes
        self.compute = compute
        self.superParams = superParams  # superParams:超参数两种:序列参数和字典参数
        self.dirCompute = dirCompute
        self.isSingleFile = isSingleFile
        self.node_id = self.name + self.persistence_path


    def getData(self):
        if self.data is None:
            if not self.isSingleFile:
                # 目录型多文件持久化，强制更新需要自己在计算函数体内实现
                if self.superParams is None:
                    self.data = self.dirCompute(self)
                else:
                    self.data = self.dirCompute(self, *self.superParams[0], **self.superParams[1])
                print('dirCompute', self.name, 'over', sep=' ')
            elif self.isSingleFile:
                try:
                    if self.forceUpdate:
                        raise Exception("forceUpdate")
                    self.data = self.read_func(f_path=self.persistence_path)
                except Exception as e:
                    print(self.name, f'entering except {e}: self.forceUpdate =', self.forceUpdate, sep=' ')
                    if self.superParams is None:
                        if self.isLeafNode:
                            self.data = self.compute()
                        else:
                            self.data = self.compute(self)
                    else:
                        if self.isLeafNode:
                            self.data = self.compute(*self.superParams[0], **self.superParams[1])
                        else:
                            self.data = self.compute(self, *self.superParams[0], **self.superParams[1])
                    print('compute', self.name, 'over', sep=' ')
                    if self.save:
                        self.preserveSideEffect()
        print(self.name, 'getData() over')
        return self.data

    def preserveSideEffect(self):
        self.write_func(data=self.data, f_path=self.persistence_path)


def compute1(self):
    return 1


def compute2(self, *args, **kwargs):
    print('superParams: ', args, kwargs)
    return 2


def compute3(self):
    result = sum([sun.getData() for sun in self.supportNodes])
    return result


node1 = Node(name="node1", compute=compute1, persistence_path='/tmp/node1')
node2 = Node(name="node2", compute=compute2, supportNodes=[], persistence_path='/tmp/node2',
             superParams=([1, 2, 3], {'a': 1, 'b': 2, 'Rn': 'color'}))
node3 = Node(name="node3", compute=compute3, supportNodes=[node1, node2], persistence_path='/tmp/node3')
node33 = get_node(name="node3", persistence_path='/tmp/node3')

node4 = get_node(name="node4", persistence_path='/tmp/node4')

if __name__ == '__main__':
    wanted_Nodes = [node2, node3]
    for node in wanted_Nodes:
        node.forceUpdate = True
        node.getData()
        print(node.data)


