import numpy as np

def clip_grads(grads,max_norm):
    '''梯度裁剪'''
    total_norm = 0
    for grad in grads:
        total_norm += np.sum(grad**2)
    total_norm = np.sqrt(total_norm)

    rate = max_norm/(total_norm+1e-6)
    if rate < 1: # 说明总范数超过了max_norm，需要按比例缩放梯度。
        for grad in grads:
            grad *= rate
#------NLP-------
def preprocess(text):
    """构建语料库"""
    text = text.lower()
    text = text.replace("."," .")
    words = text.split(" ")

    word_to_id,id_to_word = {},{}
    for word in words:
        if word not in word_to_id:
            new_id = len(word_to_id)
            word_to_id[word] = new_id
            id_to_word[new_id] = word 

    corpus = np.array([word_to_id[w] for w in words])

    return corpus,word_to_id,id_to_word

def create_co_matrix(corpus,vocab_size,window_size=1):
    """生成共现矩阵

    paramters
    -----------
    corpus：语料库（单词ID列表）
    vocab_size: 词汇个数
    window_size: 窗口大小（当窗口大小为1时，左右各1个单词为上下文)
    
    returns
    -----------
    返回共现矩阵
    """
    corpus_size = len(corpus)
    co_matrix = np.zeros((vocab_size,vocab_size),dtype=np.int32)

    for idx,word_id in enumerate(corpus):
        for i in range(1,window_size+1):
            left_idx = idx - i 
            right_idx = idx + i 

            if left_idx >= 0:
                left_word_id = corpus[left_idx]
                co_matrix[word_id,left_word_id] += 1
            if right_idx < corpus_size:
                right_word_id = corpus[right_idx]
                co_matrix[word_id,right_word_id] += 1
    
    return co_matrix

# 向量相似度计算
def cos_similarity(x,y,eps=1e-8):
    """计算余弦相似度
    
    parameters
    -----------
    x,y: 向量
    eps: 用于防止"除数为0"的微小值
    
    returns
    ----------
    返回余弦相似度
    """
    # 单位化向量
    nx = x/(np.sqrt(np.sum(x**2))+eps)
    ny = y/(np.sqrt(np.sum(y**2))+eps)

    # 两个单位向量的内积，即为夹角余弦
    return np.dot(nx,ny)

def most_similar(query,word_to_id,id_to_word,word_matrix,top=5):
    """相似单词的查找
    
    parameters
    ----------
    query: 查询词
    word_to_id: 从单词到单词ID的字典
    id_to_word: 从单词ID到单词的字典
    word_matrix: 汇总了单词向量的矩阵，假定保存了与各行对应的单词向量
    top: 显示到前几位
    """
    if query not in word_to_id:
        print(f"{query} is not found")
        return 

    print(f"\n[query] {query}")
    query_id = word_to_id[query]
    query_vec = word_matrix[query_id]

    vocab_size = len(id_to_word)

    similarity = np.zeros(vocab_size)
    for i in range(vocab_size):
        similarity[i] = cos_similarity(word_matrix[i],query_vec)

    count = 0
    for i in (-1*similarity).argsort(): # 逆序输出列表元素的索引
        if id_to_word[i] == query:
            continue
        print(' %s: %s' % (id_to_word[i], similarity[i]))

        count += 1
        if count >= top:
            return 

def ppmi(C,verbose=False,eps=1e-8):
    """生成PPMI(正的点互信息)
    
    parameters
    ----------
    C: 共现矩阵
    verbose: 是否输出进展情况
    
    returns
    ---------
    返回点互信息矩阵
    """
    M = np.zeros_like(C,dtype=np.float32)
    N = np.sum(C)
    S = np.sum(C,axis=0)
    total = C.shape[0]*C.shape[1]
    cnt = 0

    for i in range(C.shape[0]):
        for j in range(C.shape[1]):
            # pmi = np.log2(C[i, j] * N / (S[j]*S[i]) + eps)
            # 避免运算中的警告，采用下面的运算
            pmi = np.log2(C[i,j]+ eps) + np.log2(N+ eps) - np.log2(S[j]+ eps) - np.log2(S[i]+ eps)
            M[i,j] = max(0,pmi)

            if verbose:
                cnt += 1
                if cnt % (total//100+1) == 0:
                    print('%.1f%% done' % (100*cnt/total))
    return M

# word2vec-------------------
def create_contexts_target(corpus,window_size=1):
    """生成上下文和目标词
    
    parameters
    -----------
    corpus: 语料库(单词ID列表)
    window_size: 窗口大小（当大小为1时, 左右各1个单词为上下文)
    
    returns
    -----------
    对应窗口大小的上下文和目标词
    """
    target = corpus[window_size:-window_size]
    contexts = []

    for idx in range(window_size,len(corpus)-window_size):
        cs = []
        for t in range(-window_size,window_size+1):
            if t == 0:
                continue
            cs.append(corpus[idx+t])
        contexts.append(cs)
    return np.array(contexts),np.array(target)

def convert_one_hot(corpus,vocab_size):
    """转换为one-hot表示
    
    parameters
    -----------
    corpus: 单词ID列表(一维或二维的Numpy数组)
    vocab_size: 词汇个数
    
    returns
    ----------
    one-hot表示(二维或三维的Numpy数组)
    """
    N = corpus.shape[0]

    if corpus.ndim == 1:
        one_hot = np.zeros((N,vocab_size),dtype=np.int32)
        for idx,word_id in enumerate(corpus):
            one_hot[idx,word_id] = 1
    elif corpus.ndim == 2:
        C = corpus.shape[1] 
        one_hot = np.zeros((N,C,vocab_size),dtype=np.int32)
        for idx_0,word_ids in enumerate(corpus):
            for idx_1,word_id in enumerate(word_ids):
                one_hot[idx_0,idx_1,word_id] = 1
    return one_hot

if __name__ == '__main__':
    text = "You say goodbye and I say hello."
    corpus,word_to_id,id_to_word = preprocess(text)
    # print(corpus)
    # contexts, target = create_contexts_target(corpus)
    # print(contexts)
    # print(target)
    corpus = np.array(corpus)
    one_hot = np.zeros((8,7),dtype=np.int32)
    for idx,word_id in enumerate(corpus):
        one_hot[idx,word_id] = 1
    
            
