import argparse
import numpy as np
import os
import json
import nltk
from collections import Counter
import pickle


def encode(seq_tokens, token_to_idx, allow_unk=False):
    """
    编码，将seq_tokens中的每个token转换为对应的索引
    :param seq_tokens: 待编码的token序列
    :param token_to_idx: token到索引的映射
    :param allow_unk: 是否允许未知的token
    :return: 编码后的索引序列
    :author: njupt-mcc(github)
    """
    seq_idx = []
    for token in seq_tokens:
        if token not in token_to_idx:
            if allow_unk:
                token = '<UNK>'
            else:
                raise KeyError('Token "%s" not in vocab' % token)
        seq_idx.append(token_to_idx[token])
    return seq_idx


def process_questions(args):
    """
    输入
    按照args.annotation_file找到json文件
    json文件是一个列表，每个元素是一个字典，字典中包含了question、answer、video_id等信息
    例如：{"answer":"animal","id":0,"question":"what is chewing on a nut?","video_id":1}

    中间处理
    生成vocab，包括question_token_to_idx和answer_token_to_idx
    question_token_to_idx是一个字典，键是question中的token，值是对应的索引，例如{'<NULL>': 0, '<UNK>': 1, 'what': 2}
    answer_token_to_idx是一个字典，键是answer中的token，值是对应的索引，例如{'<UNK0>': 0, '<UNK1>': 1, 'animal': 2}

    输出
    生成obj
    obj = {
        'questions': questions_encoded,  # 每一个元素是一个列表，每个question对应的index
        'questions_len': questions_len,  # 每个question的真实长度
        'question_id': question_ids,  # 存放的是enumerate对应的id
        'video_ids': np.asarray(video_ids_tbw),  # 存放的是instance中对应的id
        'video_names': np.array(video_names_tbw),  # 同上-视频id
        'answers': all_answers,  # 存放的是answer对应的index
        'glove': glove_matrix,  # question_token_to_idx中每个token对应的词向量
    }
    最后将obj写入pickle文件
    :author: njupt-mcc(github)
    """
    print('Loading data')
    with open(args.annotation_file, 'r') as dataset_file:
        instances = json.load(dataset_file)

    # Either create the vocab or load it from disk
    if args.mode in ['train']:
        print('Building vocab')
        answer_cnt = {}  # train里的ans set
        for instance in instances:
            # {"answer":"animal","id":0,"question":"what is chewing on a nut?","video_id":1}
            answer = instance['answer']
            answer_cnt[answer] = answer_cnt.get(answer, 0) + 1  # 统计每个ans出现的次数，get(key, default)

        answer_token_to_idx = {'<UNK0>': 0, '<UNK1>': 1}  # 2 unknown-0 for train, 1 for val/test，这里的UNK是指unknown
        # 表示answer中每个token对应的索引

        # 统计工作开始
        # Counter 可以用来统计一个 python 列表、字符串、元组等可迭代对象中每个元素出现的次数，并返回一个字典。
        answer_counter = Counter(answer_cnt)
        frequent_answers = answer_counter.most_common(args.answer_top)  # top 4000-ans default 4000
        total_ans = sum(item[1] for item in answer_counter.items())  # answer_counter中所有ans出现的次数
        total_freq_ans = sum(item[1] for item in frequent_answers)  # frequent_answers中所有ans出现的次数
        print("Number of unique answers:", len(answer_counter))
        print("Total number of answers:", total_ans)
        print("Top %i answers account for %f%%" % (len(frequent_answers), total_freq_ans * 100.0 / total_ans))
        # 统计工作结束

        # 处理answer_token_to_idx，它是一个字典，键是answer中的token，值是对应的索引
        for token, cnt in Counter(answer_cnt).most_common(args.answer_top):  # top 4000-ans
            answer_token_to_idx[token] = len(answer_token_to_idx)
            # 把freq ans->load into dictionary， answer中每个token对应的索引
        print('Get answer_token_to_idx, num: %d' % len(answer_token_to_idx))

        # 处理question_token_to_idx，它是一个字典，键是question中的token，值是对应的索引
        question_token_to_idx = {'<NULL>': 0, '<UNK>': 1}  # 表示question中每个token对应的索引
        for i, instance in enumerate(instances):
            question = instance['question'].lower()[:-1]
            for token in nltk.word_tokenize(question):  # word+punctuation marks
                if token not in question_token_to_idx:
                    question_token_to_idx[token] = len(question_token_to_idx)
        print('Get question_token_to_idx')
        print('len(question_token_to_idx): ' + str(len(question_token_to_idx)))

        vocab = {
            'question_token_to_idx': question_token_to_idx,
            'answer_token_to_idx': answer_token_to_idx,
            'question_answer_token_to_idx': {'<NULL>': 0, '<UNK>': 1}
        }

        print('Write into %s' % args.vocab_json.format(args.dataset, args.dataset))  # default='data/{}/{}_vocab.json'
        with open(args.vocab_json.format(args.dataset, args.dataset), 'w') as f:
            json.dump(vocab, f, indent=4)  # 将vocab写入json文件
    else:
        # 读取vocab
        print('Loading vocab')
        with open(args.vocab_json.format(args.dataset, args.dataset), 'r') as f:
            vocab = json.load(f)
    # 将question的top freq vocab和ans的分开存放
    # Encode all questions
    print('Encoding data')
    questions_encoded = []  # 每一个元素是一个列表，每个question对应的index
    questions_len = []  # 每个question的真实长度
    question_ids = []   # 存放的是enumerate对应的id
    video_ids_tbw = []  # 存放的是instance中对应的id
    video_names_tbw = []  # 同上-视频id
    all_answers = []  # 存放的是answer对应的index
    for idx, instance in enumerate(instances):
        # {"answer":"animal","id":0,"question":"what is chewing on a nut?","video_id":1}
        question = instance['question'].lower()[:-1]  # 去掉最后的问号
        question_tokens = nltk.word_tokenize(question)  # 将question分词
        # 编码，将question_tokens中的每个token转换为对应的索引
        question_encoded = encode(question_tokens, vocab['question_token_to_idx'], allow_unk=True)
        questions_encoded.append(question_encoded)
        questions_len.append(len(question_encoded))
        question_ids.append(idx)
        im_name = instance['video_id']
        video_ids_tbw.append(im_name)
        video_names_tbw.append(im_name)

        if instance['answer'] in vocab['answer_token_to_idx']:
            answer = vocab['answer_token_to_idx'][instance['answer']]
        elif args.mode in ['train']:
            answer = 0
        elif args.mode in ['val', 'test']:
            answer = 1
        else:
            raise ValueError('Wrong data split name')

        all_answers.append(answer)
    max_question_length = max(len(x) for x in questions_encoded)  # max sequence length
    for qe in questions_encoded:
        while len(qe) < max_question_length:  # 每个都用0来padding
            qe.append(vocab['question_token_to_idx']['<NULL>'])

    questions_encoded = np.asarray(questions_encoded, dtype=np.int32)  # 从列表转换为numpy数组
    questions_len = np.asarray(questions_len, dtype=np.int32)  # 真实句长，从列表转换为numpy数组
    print('questions_encoded.shape' + str(questions_encoded.shape))

    glove_matrix = None
    if args.mode == 'train':
        token_itow = {i: w for w, i in vocab['question_token_to_idx'].items()}
        # word+index，例如{0: '<NULL>', 1: '<UNK>', 2: 'what'}
        print("Load glove from %s" % args.glove_pt)  # glove.840.300d.pkl
        glove = pickle.load(open(args.glove_pt, 'rb'))
        dim_word = glove['the'].shape[0]  # 300，每个词向量的维度
        glove_matrix = []  # 每个元素是一个词向量
        for i in range(len(token_itow)):
            vector = glove.get(token_itow[i], np.zeros((dim_word,)))  # 从glove中找到对应的词向量
            glove_matrix.append(vector)  # 将词向量放入glove_matrix
        glove_matrix = np.asarray(glove_matrix, dtype=np.float32)
        print('glove_matrix.shape' + str(glove_matrix.shape))

    print('Writing', args.output_pt.format(args.dataset, args.dataset, args.mode))
    obj = {
        'questions': questions_encoded,  # 每一个元素是一个列表，每个question对应的index
        'questions_len': questions_len,  # 每个question的真实长度
        'question_id': question_ids,  # 存放的是enumerate对应的id
        'video_ids': np.asarray(video_ids_tbw),  # 存放的是instance中对应的id
        'video_names': np.array(video_names_tbw),  # 同上-视频id
        'answers': all_answers,  # 存放的是answer对应的index
        'glove': glove_matrix,  # question_token_to_idx中每个token对应的词向量
    }
    with open(args.output_pt.format(args.dataset, args.dataset, args.mode), 'wb') as f:
        pickle.dump(obj, f)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', default='svqa', choices=['msrvtt-qa', 'msvd-qa', 'svqa'], type=str)
    parser.add_argument('--answer_top', default=4000, type=int)
    parser.add_argument('--glove_pt', type=str, default='data/glove/glove.840B.300d.pkl',
                    help='glove pickle file, should be a map whose key are words and value are word vectors '
                            'represented by numpy arrays. Only needed in train mode')
    parser.add_argument('--output_pt', type=str, default='data/{}/{}_{}_questions.pt')
    parser.add_argument('--vocab_json', type=str, default='data/{}/{}_vocab.json')
    parser.add_argument('--mode', choices=['train', 'val', 'test'])
    parser.add_argument('--seed', type=int, default=666)

    args = parser.parse_args()
    np.random.seed(args.seed)

    assert args.dataset == 'msvd-qa', 'Only support msvd-qa dataset'
    assert args.mode in ['train', 'val', 'test'], 'Wrong mode'

    args.annotation_file = './data/msvd-qa/{}_qa.json'.format(args.mode)
    # check if data folder exists
    if not os.path.exists('data/{}'.format(args.dataset)):
        os.makedirs('data/{}'.format(args.dataset))
    process_questions(args)
