import os
import time

import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing import sequence
import matplotlib.pyplot as plt

"""
=======================================================================================
                                数据获取和处理
=======================================================================================
"""


# 获取训练数据的特征以及对应的标签（英语描述）
# video_data_path： 视频标签文件
# video_feat_path： 视频特征文件夹
def get_video_data(video_data_path, video_feat_path):
    video_data = pd.read_csv(video_data_path, sep=',')
    video_data = video_data[video_data['Language'] == 'English']
    video_data['video_path'] = video_data.apply(
        lambda row: row['VideoID'] + '_' + str(int(row['Start'])) + '_' + str(int(row['End'])) + '.avi.npy', axis=1)
    # 拼接视频提取特征后存的全路径
    video_data['video_path'] = video_data['video_path'].map(lambda x: os.path.join(video_feat_path, x))
    # 去除路径不存在或者描述不是字符串的行
    video_data = video_data[video_data['video_path'].map(lambda x: os.path.exists(x))]
    video_data = video_data[video_data['Description'].map(lambda x: isinstance(x, str))]
    # 以下两行感觉没作用
    unique_filenames = sorted(video_data['video_path'].unique())
    train_or_test_data = video_data[video_data['video_path'].map(lambda x: x in unique_filenames)]
    # 返回的数据中含有key：
    # ['VideoID', 'Start', 'End', 'WorkerID', 'Source', 'AnnotationTime', 'Language', 'Description', 'video_path']
    # 其中video_path是npy文件的全路径，因为每个视频有多个描述，所以路径包含重复。
    return train_or_test_data, train_or_test_data['Description'].values


# 返回batch_size大小的数据
# current_feats, 视频特征
# current_video_masks, 视频特征真实长度
# current_caption_matrix, 视频标签
# current_caption_masks, 视频标签真实长度
def get_batch_data(start, batch, train_data, wordtoix):
    end = start + batch

    # 获取batch_size大小的数据行
    current_batch = train_data[start:end]
    current_videos = current_batch['video_path'].values

    # 保存batch_size大小的视频特征（50*80*4096）
    current_feats = np.zeros((batch, n_video_lstm_step, dim_image))
    # 将所有.npy文件的路径通过numpy加载进来
    current_feats_vals = list(map(lambda vid: np.load(vid), current_videos))
    # （50*80）标记视频真实长度，真实的为1，自己增加的为0
    current_video_masks = np.zeros((batch, n_video_lstm_step))

    # feat是之前保存的视频提取好的特征（80*4096），第一维不一定为80帧
    # 下面将其一个一个赋值给current_feats
    # 并用current_video_masks来记录视频真实长度
    for ind, feat in enumerate(current_feats_vals):
        current_feats[ind][:len(current_feats_vals[ind])] = feat
        current_video_masks[ind][:len(current_feats_vals[ind])] = 1

    # 对训练集数据标签做处理，去掉标点符号并在开头加上<bos>标签
    current_captions = current_batch['Description'].values
    current_captions = list(map(lambda x: '<bos> ' + (x.replace('.', '').replace(',', '').replace('"', '')
                                                      .replace('\n', '').replace('?', '').replace('!', '')
                                                      .replace('\\', '').replace('/', '')), current_captions))

    # 对于视频的标签，长度不足20的直接在末尾加上<eos>结束符
    # 对于长度达到20的，取前面19长度，第二十个设置为<eos>符号
    for idx, each_cap in enumerate(current_captions):
        word = each_cap.lower().split(' ')
        # 如果视频的标签长度不足20，就在末尾加上<eos>结束符
        if len(word) < n_caption_lstm_step:
            current_captions[idx] = current_captions[idx] + ' <eos>'
        else:
            new_word = word[:n_caption_lstm_step - 1]
            new_word.append('<eos>')
            current_captions[idx] = ' '.join(new_word)
    # 至此current_captions中存储的是batch_size个字符串，每个字符串都是<bos> XXX XXX ... XXX <eos>的形式

    # 将句子由单词组成转变为相应的编码的句子
    current_caption_ind = []
    for cap in current_captions:
        current_word_ind = []
        for word in cap.lower().split(' '):
            if word in wordtoix:
                current_word_ind.append(wordtoix[word])
            else:
                current_word_ind.append(wordtoix['<unk>'])
        current_caption_ind.append(current_word_ind)
    # current_caption_ind格式（50*None）

    # 序列填充，长度不足n_caption_lstm_step的在末尾（post)补0<pad>，使总的长度对其
    # 最终构成一个（50 * 20）的矩阵
    current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post',
                                                    maxlen=n_caption_lstm_step)

    # 在最后增加一列0，变成（50 * 21）的矩阵
    current_caption_matrix = np.hstack(
        [current_caption_matrix, np.zeros([len(current_caption_matrix), 1])]).astype(int)

    # 得到该文本描述中真实的数据，1代表真实数据，0代表为了长度填充的<pad>
    current_caption_masks = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
    # 先用numpy的计算将其转换为是否等于1的布尔值的二维矩阵（50 * 21）
    # 再使用每一行的sum函数计算出为True的个数，也就是不等于的个数
    # 为何要加上1个长度变成21：因为第一个字符为<bos>在LSTM输出的时候并不算在内，因此LSTM要输出20个单词加上<bos>刚好21个
    nonzeros = np.array(list(map(lambda x: (x != 0).sum() + 1, current_caption_matrix)))
    # 将原先用来记录的矩阵中相应不为0的位置标记为1
    for ind, row in enumerate(current_caption_masks):
        row[:nonzeros[ind]] = 1

    return current_feats, current_video_masks, current_caption_matrix, current_caption_masks


# captions: 所有视频标签集合，已经去除标点符号。
# word_count_threshold： 频率阈值，去除出现频率在此之下的单词。
def build_wordvocab(captions, word_count_threshold=5):
    print('开始处理单词数量并创建vocab，保存单词的频率阈值为： %d' % word_count_threshold)

    # 去除标点符号
    captions = map(lambda x: x.replace('.', '').replace(',', '').replace('"', '').replace('\n', '').replace('?', '')
                   .replace('!', '').replace('\\', '').replace('/', ''), captions)

    word_counts = {}  # 记录单词出现的频率
    nsents = 0  # 视频标签的个数
    for sent in captions:
        nsents += 1
        # 统计单词在标签中出现的次数
        for w in sent.lower().split(' '):
            word_counts[w] = word_counts.get(w, 0) + 1

    # 记录在总的标签中出现超过阈值的那些单词
    vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
    print('在经过阈值筛选后单词数量由 %d 变成 %d' % (len(word_counts), len(vocab)))

    # one-hot编码，将单词与序号对应起来
    # 需要两个字典，一个序号直接映射单词，一个单词直接映射单词
    ixtoword = {}
    ixtoword[0] = '<pad>'
    ixtoword[1] = '<bos>'
    ixtoword[2] = '<eos>'
    ixtoword[3] = '<unk>'

    wordtoix = {}
    wordtoix['<pad>'] = 0
    wordtoix['<bos>'] = 1
    wordtoix['<eos>'] = 2
    wordtoix['<unk>'] = 3

    for idx, w in enumerate(vocab):
        wordtoix[w] = idx + 4
        ixtoword[idx + 4] = w

    word_counts['<pad>'] = nsents
    word_counts['<bos>'] = nsents
    word_counts['<eos>'] = nsents
    word_counts['<unk>'] = nsents

    # 按ixtoword的顺序得到每个单词出现的频率
    bias_init_vector = np.array([word_counts[ixtoword[i]] for i in ixtoword], dtype=np.float)
    # 得到每个单词出现的频率占总单词频率的比例
    bias_init_vector /= np.sum(bias_init_vector)
    bias_init_vector = np.log(bias_init_vector)
    # 取完对数之后减去最大的值，所有都变成负数
    # 这样做的目的是什么还不清楚
    # 公式y = x - max(log(x))
    bias_init_vector -= np.max(bias_init_vector)

    # 保存结果，做测试时候可以直接使用
    print('保存wordtoix、ixtoword、bias_init_vector到data文件夹。')
    save_path = './data'
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    np.save("./data/wordtoix", wordtoix)
    np.save('./data/ixtoword', ixtoword)
    np.save("./data/bias_init_vector", bias_init_vector)

    return wordtoix, ixtoword, bias_init_vector


"""
=======================================================================================
                                算法模型构建
=======================================================================================
"""


class VideoCaptionGenerator:
    def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, n_video_lstm_step, n_caption_lstm_step,
                 bias_init_vector=None):
        # 视频帧特征向量的长度 4096
        self.dim_image = dim_image
        # 单词的数量
        self.n_words = n_words
        # 1000 这个参数表示的是用于记忆和储存过去状态的节点个数
        self.dim_hidden = dim_hidden
        # 指一次性输入到LSTM中的数据个数
        self.batch_size = batch_size
        # 80
        self.n_lstm_steps = n_lstm_steps
        # 80 LSTM总共有80个时间上的输入
        self.n_video_lstm_step = n_video_lstm_step
        # 20 LSTM生成文字输出的长度
        self.n_caption_lstm_step = n_caption_lstm_step

        # random_uniform()返回n_words*1000的矩阵，产生于-0.1和0.1之间，产生的值是均匀分布的。（文字长度 * 1000）
        self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='wemb')

        with tf.variable_scope("LSTM1", reuse=tf.AUTO_REUSE):
            self.lstm1 = tf.nn.rnn_cell.BasicLSTMCell(dim_hidden, state_is_tuple=False)
        with tf.variable_scope("LSTM2", reuse=tf.AUTO_REUSE):
            self.lstm2 = tf.nn.rnn_cell.BasicLSTMCell(dim_hidden, state_is_tuple=False)

        self.encode_image_W = tf.Variable(tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1),
                                          name='encode_image_W')  # （4096，1000）隐藏层节点的参数
        self.encode_image_b = tf.Variable(tf.zeros([dim_hidden]), name='encode_image_b')  # 隐藏层节点偏置

        self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1, 0.1),
                                        name='embed_word_W')  # 标签embeding的权重（1000， 文字长度）
        if bias_init_vector is not None:
            self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')  # 标签embeding的偏置
        else:
            self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')

    # isTest： 因为训练模型跟测试模型细节不同，所以需要用此标志位来区别开。
    def build_model(self, isTest=False):
        video = tf.placeholder(tf.float32, [self.batch_size, self.n_video_lstm_step, self.dim_image])  # （50， 80，4096）
        video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_video_lstm_step])  # （50， 80）

        caption = tf.placeholder(tf.int32, [self.batch_size, self.n_caption_lstm_step + 1])  # （50， 21）
        caption_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_caption_lstm_step + 1])

        video_flat = tf.reshape(video, [-1, self.dim_image])  # （50*80， 4096）
        # 将输入与隐藏层权重相乘再加上权重变成1000维的向量
        image_emb = tf.nn.xw_plus_b(video_flat, self.encode_image_W,
                                    self.encode_image_b)  # (batch_size*n_lstm_steps, dim_hidden)
        # 上面得到的是（50*80， 1000）的矩阵，这里把其还原成（50， 80， 1000）
        image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])

        state1 = tf.zeros([self.batch_size, self.lstm1.state_size])  # （50， state_size）
        state2 = tf.zeros([self.batch_size, self.lstm2.state_size])
        padding = tf.zeros([self.batch_size, self.dim_hidden])  # （50， 1000）

        loss = 0.0
        # 记录生成的单词，在验证的时候需要输出结果
        generated_words = []

        ##############################  Encoding Stage ##################################
        # 循环80次，因为视频有80帧
        for i in range(0, self.n_video_lstm_step):
            with tf.variable_scope("LSTM1", reuse=tf.AUTO_REUSE):
                # 同时输入50个视频中第一帧的特征，同时有50个状态
                output1, state1 = self.lstm1(image_emb[:, i, :], state1)

            with tf.variable_scope("LSTM2", reuse=tf.AUTO_REUSE):
                # 加上偏移量之后的维度为（50， 2000），LSTM的输入维度会自动匹配，输出维度就是隐藏层的个数
                output2, state2 = self.lstm2(tf.concat([padding, output1], 1), state2)

        ############################# Decoding Stage ######################################
        current_embed = 0
        # 循环20次，生成文字
        for i in range(0, self.n_caption_lstm_step):
            if isTest and i == 0:
                current_embed = tf.nn.embedding_lookup(self.Wemb, tf.ones([1], dtype=tf.int64))
            elif not isTest:
                current_embed = tf.nn.embedding_lookup(self.Wemb, caption[:, i])

            with tf.variable_scope("LSTM1", reuse=tf.AUTO_REUSE):
                # 解码时候第一层开始输入padding，因为这时候帧画面已经传完
                output1, state1 = self.lstm1(padding, state1)

            with tf.variable_scope("LSTM2", reuse=tf.AUTO_REUSE):
                # 此处使用随机生成的数据来生成的作为下一个输入拼接，和原文不太一样
                output2, state2 = self.lstm2(tf.concat([current_embed, output1], 1), state2)  # （50， 1000）

            # caption中第一个单词是<bos>在LSTM中不输出，所以LSTM从caption第二个位置开始判断
            labels = tf.expand_dims(caption[:, i + 1], 1)
            indices = tf.expand_dims(tf.range(0, self.batch_size, 1), 1)
            concated = tf.concat([indices, labels], 1)
            onehot_labels = tf.sparse_to_dense(concated, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)

            logit_words = tf.nn.xw_plus_b(output2, self.embed_word_W, self.embed_word_b)  # （50 * 单个单词编码长度）

            # logit_words中是要找概率最大的那个位置对应的idx的单词，那onehot_labels不是可以直接使用caption[:, i + 1]来代表吗？
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit_words, labels=onehot_labels)
            cross_entropy = cross_entropy * caption_mask[:, i]

            # 计算损失值
            current_loss = tf.reduce_sum(cross_entropy) / self.batch_size
            loss = loss + current_loss

            # 将输出向量变成单词记录下来------验证时候使用
            max_prob_index = tf.argmax(logit_words, 1)[0]
            generated_words.append(max_prob_index)

            current_embed = tf.nn.embedding_lookup(self.Wemb, max_prob_index)
            current_embed = tf.expand_dims(current_embed, 0)

        return [video, video_mask, caption, caption_mask], [loss, generated_words]


"""
=======================================================================================
                                全局参数设置
=======================================================================================
"""
video_path = '../data/MSVD_train'
video_train_feat_path = './rgb_train_features'
video_test_feat_path = './rgb_test_features'
video_train_data_path = '../data/MSVD_train/video_corpus.csv'
video_test_data_path = '../data/MSVD_train/video_corpus.csv'
model_path = './models'

"""
=======================================================================================
                                训练参数设置
=======================================================================================
"""
# vgg的fc7层提取到的特征是4096的向量
dim_image = 4096
# 隐藏层神经元的个数
dim_hidden = 1000

n_video_lstm_step = 80
n_caption_lstm_step = 20
n_frame_step = 80

n_epochs = 1000
batch_size = 50
# 学习率
learning_rate = 0.0001
# 保存模型个数
max_save_model = 10

"""
=======================================================================================
                                训练和测试
=======================================================================================
"""


# 训练函数
def train():
    # 返回的数据中含有key：
    # ['VideoID', 'Start', 'End', 'WorkerID', 'Source', 'AnnotationTime', 'Language', 'Description', 'video_path']
    # 其中video_path是npy文件的全路径，因为每个视频有多个描述，所以路径包含重复。
    # 包含在提取好的特征训练集文件夹中的存在的行
    train_data, train_captions = get_video_data(video_train_data_path, video_train_feat_path)
    # 与上一步一摸一样，获取到的还是全部的路径。
    # 包含在提取好的特征验证集文件夹中的存在的行
    test_data, test_captions = get_video_data(video_test_data_path, video_test_feat_path)

    captions_list = list(train_captions) + list(test_captions)
    captions = np.asarray(captions_list, dtype=np.object)

    # 预处理所有视频标签
    # word_count_threshold ： 单词出现频率的阈值
    # 返回的结果中，两个字典的映射以及一个向量包含每个单词出现频率的计算
    wordtoix, ixtoword, bias_init_vector = build_wordvocab(captions, word_count_threshold=0)

    # 获得VideoCaptionGenerator类的实例来创建模型
    model = VideoCaptionGenerator(
        dim_image=dim_image,
        n_words=len(wordtoix),
        dim_hidden=dim_hidden,
        batch_size=batch_size,
        n_lstm_steps=n_frame_step,
        n_video_lstm_step=n_video_lstm_step,
        n_caption_lstm_step=n_caption_lstm_step,
        bias_init_vector=bias_init_vector)

    input_data, output_data = model.build_model()
    tf_video, tf_video_mask, tf_caption, tf_caption_mask = input_data
    tf_loss, _ = output_data

    # 保存最近10次训练好的模型
    saver = tf.train.Saver(max_to_keep=max_save_model)
    # 设置更新参数方法
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(tf_loss)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # 打开需要记录训练过程信息的文件
    loss_fd = open('loss.txt', 'w')

    if not os.path.exists(model_path):
        os.mkdir(model_path)

    # 保存训练过程的loss信息以绘制图像
    loss_to_draw = []
    init = tf.global_variables_initializer()
    with tf.Session() as sess:

        sess = tf.Session(config=config)

        sess.run(init)

        # 开始训练，训练次数n_epochs
        for epoch in range(0, n_epochs):
            loss_to_draw_epoch = []

            # 对训练集做shuffle，也就是重新随机排列
            index = list(train_data.index)
            np.random.shuffle(index)
            train_data = train_data.ix[index]

            # 因为数据中同一个路径（同一个视频）有多种描述
            # 使用groupby将相同路径的分到一组，这组是同一个视频不同的描述，然后随机的选取一行
            current_train_data = train_data.groupby('video_path').apply(lambda x: x.iloc[np.random.choice(len(x))])
            # 从新排列索引
            current_train_data = current_train_data.reset_index(drop=True)

            # 这样就可以从数据集里面每次拿到batch_size大小的训练数据
            for start in range(0, len(current_train_data), batch_size):
                # 获取当前的时间
                start_time = time.time()

                # 获得start位置开头的batch_size大小的数据
                current_feats, current_video_masks, current_caption_matrix, current_caption_masks = get_batch_data(
                    start, batch_size, current_train_data, wordtoix)
                if len(current_caption_matrix) != batch_size:
                    continue

                # 将batch_size数据传入学习
                _, loss_val = sess.run(
                    [train_op, tf_loss],
                    feed_dict={
                        tf_video: current_feats,
                        tf_video_mask: current_video_masks,
                        tf_caption: current_caption_matrix,
                        tf_caption_mask: current_caption_masks
                    })
                loss_to_draw_epoch.append(loss_val)

                print('[Train]: Epoch: %d, idx：%d, loss: %3.4f, Elapsed time: %f' % (epoch, start, loss_val,
                                                                                     (time.time() - start_time)))
                loss_fd.write(
                    'epoch:' + str(epoch) + ',idx:' + str(start / batch_size) + ',loss:' + str(loss_val) + '\n')

            # 训练100次就绘制loss图像,并保存到文件
            if epoch % 100 == 0:
                loss_to_draw.append(np.mean(loss_to_draw_epoch))
                plt_save_dir = "./loss_imgs"
                if not os.path.exists(plt_save_dir):
                    os.mkdir(plt_save_dir)
                plt_save_img_name = str(epoch) + '.png'
                plt.plot(range(len(loss_to_draw)), loss_to_draw, color='g')
                plt.grid(True)
                plt.savefig(os.path.join(plt_save_dir, plt_save_img_name))

            # 训练50次保存一次模型
            if epoch % 50 == 0:
                print("Epoch ", epoch, " is done. Saving the model ...")
                saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)

    loss_fd.close()


# 测试函数
def test(test_model_path='./models/model-950'):
    test_data, test_captions = get_video_data(video_test_data_path, video_test_feat_path)

    current_test_data = test_data.groupby('video_path').apply(lambda x: x.iloc[np.random.choice(len(x))])
    current_test_data = current_test_data.reset_index(drop=True)
    test_videos_path = current_test_data['video_path']

    ixtoword = pd.Series(np.load('./data/ixtoword.npy').tolist())
    wordtoix = pd.Series(np.load('./data/wordtoix.npy').tolist())
    bias_init_vector = np.load('./data/bias_init_vector.npy')

    test_batch_size = 1
    model = VideoCaptionGenerator(
        dim_image=dim_image,
        n_words=len(ixtoword),
        dim_hidden=dim_hidden,
        batch_size=test_batch_size,
        n_lstm_steps=n_frame_step,
        n_video_lstm_step=n_video_lstm_step,
        n_caption_lstm_step=n_caption_lstm_step,
        bias_init_vector=bias_init_vector)

    input_data, output_data = model.build_model(isTest=True)
    tf_video, tf_video_mask, tf_caption, tf_caption_mask = input_data
    tf_loss, tf_generated_word_index = output_data

    test_output_txt_fd = open('S2VT_results.txt', 'w')
    with tf.Session() as sess:
        # 恢复训练好的模型
        saver = tf.train.Saver()
        saver.restore(sess, test_model_path)

        for idx, video_feat_path in enumerate(test_videos_path):
            print(idx, video_feat_path)

            # 获得start位置开头的一条数据
            current_feats, current_video_masks, current_caption_matrix, current_caption_masks = get_batch_data(
                idx, test_batch_size, current_test_data, wordtoix)

            generated_word_index = sess.run(
                tf_generated_word_index,
                feed_dict={
                    tf_video: current_feats,
                    tf_video_mask: current_video_masks,
                    tf_caption: current_caption_matrix,
                    tf_caption_mask: current_caption_masks
                })

            generated_words = ixtoword[generated_word_index]

            punctuation = np.argmax(np.array(generated_words) == '<eos>') + 1
            generated_words = generated_words[:punctuation]

            generated_sentence = ' '.join(generated_words)
            generated_sentence = generated_sentence.replace('<bos> ', '')
            generated_sentence = generated_sentence.replace(' <eos>', '')
            print(generated_sentence, '\n')
            # 写入文件
            video_name = video_feat_path.split('/')[-1]
            video_name = video_name[:video_name.index('.npy')]
            test_output_txt_fd.write('beam_size_1\t' + video_name + '\t' + generated_sentence + '\n')
    test_output_txt_fd.close()


if __name__ == '__main__':
    choose = input('----please choose----\n   option 1: train\n   option 2: test\ninput your choose:')
    choose = int(choose)
    if choose == 1:
        train()
    elif choose == 2:
        test()
    else:
        print("your choose is wrong!")
