from data_util import  TextData
import numpy as np
import math
import random
from collections import Counter
import tensorflow as tf
from tensorflow import keras
import time
from args import Args


class Glove(keras.Model):
    def __init__(self, vocab_size, embedding_dim):
        super(Glove, self).__init__()
        #Embedding（嵌入层）将正整数（下标）转换为具有固定大小的向量，此处vocab_size为词大小，embedding_dim为全连接嵌入的维度，ev为该词作为中心词时的向量表示
        self.ev = keras.layers.Embedding(vocab_size, embedding_dim)
        #eu为该词作为背景词时的向量表示
        self.eu = keras.layers.Embedding(vocab_size, embedding_dim)
        #中心词向量偏置
        self.b = keras.layers.Embedding(vocab_size, 1)
        #背景词向量偏置
        self.c = keras.layers.Embedding(vocab_size, 1)
    def call(self, centers, contexts):
        #将self.ev(centers)进行转置，并且以[0,2,1]重新排列输出维度，为下面的矩阵相乘做准备
        v = tf.transpose(self.ev(centers),[0,2,1])
        u = self.eu(contexts)
        res = u@v
        bb = self.b(centers)
        bc = self.c(contexts)
        res = res + bb + bc
        return res
    #返回最终的词向量权重
    def get_uaddv_weights(self):
        self.ev(tf.zeros(0,dtype="int32"))
        self.eu(tf.zeros(0,dtype="int32"))
        return [self.ev.weights[0]+self.eu.weights[0]]

def loss_function(res, labels):
    labels = tf.reshape(lables,[res.shape[0],res.shape[1],1])
    #权重函数c取100
    h = tf.math.pow(labels/100,0.75)
    h = tf.clip_by_value(h,0,1)
    res -= tf.math.log1p(labels)
    res = tf.math.pow(res,2)
    res *= h
    loss_ = tf.reduce_mean(res)
    return loss_

    

#把Xij作为label放到dataset，否则numpy数组无法参与tf的训练
@tf.function
def train_step(centers, contexts, lables):
    loss = 0
    #GradientTape是eager模式下计算梯度
    with tf.GradientTape() as tape:
        res = glove(centers, contexts)
        loss += loss_function(res,lables)
    #批损失
    batch_loss = float(loss)
    variables = glove.trainable_variables
    gradients = tape.gradient(loss, variables)
    #更新gradients, variables的梯度，同时不在里面的变量的梯度不变
    optimizer.apply_gradients(zip(gradients, variables))
    return batch_loss


if __name__ == '__main__':
    #读取语料库，拼接问答对
    args = Args()    
    textData = TextData(args)
    inp = np.array(textData.train_samples)[:,0]
    targ = np.array(textData.train_samples)[:,1]
    dataset = np.concatenate((inp,targ),axis=0)
    num_tokens = sum([len(st) for st in dataset])
    
    #扁平化list，统计分词频数
    df=[x for tup in dataset for x in tup]
    counter = Counter(df)
    
    #二次采样随机丢弃
    def discard(idx):
        return random.uniform(0, 1) < 1 - math.sqrt(
            1e-4 / counter[idx] * num_tokens)
    
    subsampled_dataset = [[tk for tk in st if not discard(tk)] for st in dataset]  

    #随机窗口滑动产生中心词和背景词
    def get_centers_and_contexts(dataset, max_window_size):
        centers, contexts, n_contexts = [], [], []
        for st in dataset:
            if len(st) < 2:  # 每个句子至少要有2个词才可能组成一对“中心词-背景词”
                continue
            centers += st
            for center_i in range(len(st)):
                window_size = random.randint(1, max_window_size)
                indices = list(range(max(0, center_i - window_size),
                                     min(len(st), center_i + 1 + window_size)))
                contexts.append([st[idx] for idx in indices])
                indices.remove(center_i)  # 将中心词排除在背景词之外
                n_contexts.append([st[idx] for idx in indices])
        #contexts没有排除中心词是为了在共现矩阵计算距离，n_contexts用于训练
        return centers, contexts, n_contexts
        
    all_centers, all_contexts, contexts = get_centers_and_contexts(subsampled_dataset, 5)
    
    #初始化共现矩阵
    vocab_size = textData.vocab_size
    cooccurrence = np.zeros([vocab_size,vocab_size], dtype="float32")
    
    for i in range(0,len(all_centers)):
        #中心词第一次出现在句中的位置
        aim = all_contexts[i].index(all_centers[i])
        for j in range(0,len(all_contexts[i])):
            if j != aim:
                x = all_centers[i]
                y = all_contexts[i][j]
                #用背景词和中心词距离的倒数代替频数
                cooccurrence[x][y] += 1/abs(j-aim)
    
    #统一contexts列表大小，大小不足时末尾补零
    contexts = keras.preprocessing.sequence.pad_sequences(contexts, padding='post')
    #将all_centers转化为张量，并调大小
    centers = tf.convert_to_tensor(all_centers)
    centers = tf.reshape(centers,[-1,1])
    #根据共现矩阵，构建label
    labels = np.zeros([centers.shape[0],contexts.shape[1]],dtype="float32")
    for i in range(0, centers.shape[0]):
            center = centers[i]
            context = contexts[i]
            for j in range(0,context.shape[0]):
                labels[i][j] = cooccurrence[int(context[j])][int(center)]
    labels = tf.convert_to_tensor(labels)
    
    #批训练大小，和词向量维度，每轮训练步数
    BATCH_SIZE = 1024
    units = 64
    steps_per_epoch = len(lables)//BATCH_SIZE 

    dataset = tf.data.Dataset.from_tensor_slices((centers,contexts,labels))
    dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
    
    glove = Glove(vocab_size, units)
    
    optimizer = keras.optimizers.Adam()
    
    #tensorboard数据可视化
    log_dir = 'logs/glove_log'
    summary_writer = tf.summary.create_file_writer(log_dir)

    EPOCHS = 20
    for epoch in range(EPOCHS):
        start = time.time()
        total_loss = 0

        for (batch, (centers, contexts, labels)) in enumerate(dataset):
            batch_loss = train_step(centers, contexts, labels)
            total_loss += batch_loss
            #记录每批损失
            with summary_writer.as_default():
                tf.summary.scalar('loss_mse', data=batch_loss ,step=steps_per_epoch*epoch + batch)
        
            if batch % 100 == 0:
                print('Epoch {} Batch {} Loss {:.8f}'.format(epoch + 1,
                                                             batch,
                                                             batch_loss.numpy()))
            
        print('Epoch {} Loss {:.8f}'.format(epoch + 1,
                                          total_loss / steps_per_epoch))
        print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
    #模型权重保存
    checkpoint = tf.train.Checkpoint(glove=glove)
    checkpoint.save('./save_model/glove/glove.ckpt')