# -*- coding: utf-8 -*-

import sys
sys.path.append('/home/cnn/pro/yazif/vqa/')

import h5py
from nltk.tokenize import word_tokenize
import numpy as np
import json
from utils import config

dtype = 'uint32'
top_answer_size = 1000
max_length = 26


def get_data_from_json(path):
    with open(path, 'r') as config_file:
        data_dict = json.load(config_file)
    return data_dict


def get_unique_img(imgs):
    counts = {}
    for img in imgs:
        img_path = img['img_path']
        counts[img_path] = counts.get(img_path, 0) + 1
    #按出现次数排序
    cw = sorted([(count, w) for w, count in counts.items()], reverse=True)
    unique_img = []
    for i in range(len(cw)):
        # print cw[i][1]
        unique_img.append(cw[i][1])
    return unique_img


def get_top_answer(imgs, vocab_size):
    #按单词出现频率排序，并获取出现前vocab_size个元素
    counts = {}

    for img in imgs:
        ans = img['ans']
        counts[ans] = counts.get(ans, 0) + 1
    #按出现次数排序
    cw = sorted([(count, w) for w, count in counts.iteritems()], reverse=True)
    print ('top answer and their counts:')
    print ('\n'.join(map(str, cw[:20])))

    #建立词库
    vocab = []
    for i in range(vocab_size):
        # print cw[i][1]
        vocab.append(cw[i][1])
    return vocab[:vocab_size]


def filter_question(imgs, atoi):
    #过滤不在映射表中的元素
    new_img = []
    for i, img in enumerate(imgs):
        if img['ans'] in atoi:
            new_img.append(img)

    print ('question number reduce from %d to %d ' % (len(imgs),  len(new_img)))
    return new_img


def prepro_question(imgs):
    # 对库中的问题进行分词
    # 可能需要下载puntk：
    #   impot nltk
    #   nltk.download('puntk')
    english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%', '\'']
    for i, img in enumerate(imgs):
        s = img['question']
        #分词
        txt = word_tokenize(str(s).lower())

        # img['processed_tokens'] = txt
        # 增加分词token,过滤标点
        img['processed_tokens'] = [word for word in txt if word not in english_punctuations]
    return imgs


def build_vocab_question(imgs):
    # 建立问题词库，步骤如下
    # 1：建立单词出现频率表，并按出现频率排序
    # 2：过滤不需要的词
    # 3： 建立词典
    # 4: 使用'UNK'代替不需要的词

    min_count = 0

    counts = {}

    for img in imgs:
        for w in img['processed_tokens']:
            counts[w] = counts.get(w, 0) + 1
    cw = sorted([(count, w) for w, count in counts.iteritems()], reverse=True)
    print ('top words and their counts:')
    print ('\n'.join(map(str, cw[:20])))

    total_words = sum(counts.values())
    print ('total words:', total_words)
    bad_words = [w for w, n in counts.items() if n <= min_count]
    vocab = [w for w, n in counts.items() if n > min_count]
    bad_count = sum(counts[w] for w in bad_words)

    print ('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)/len(counts)))
    print ('number of words in vocab would be %d' % (len(vocab)))
    print ('number of UNK: %d/%d = %.2f%%' % (bad_count, total_words, bad_count * 100.0/total_words))

    print ('insert the sepcial UNK token')
    vocab.append('UNK')

    for img in imgs:
        txt = img['processed_tokens']
        questoin = [w if counts.get(w, 0) > min_count else 'UNK' for w in txt]
        img['final_question'] = questoin
    return imgs, vocab


def encoding_question(imgs, wtoi):
    # label_arrays：img中问题单词在词库中的位置
    # label_length ： img中问题的长度
    # question_id ： img中问题id

    N = len(imgs)
    label_arrays = np.zeros((N, max_length), dtype=dtype)
    label_length = np.zeros(N, dtype=dtype)
    question_id = np.zeros(N, dtype=dtype)

    question_counter = 0
    for i, img in enumerate(imgs):
        question_id[question_counter] = img['ques_id']
        label_length[question_counter] = min(max_length, len(img['final_question']))
        question_counter += 1

        ques_len = len(img['final_question'])
        pos = max_length - ques_len
        for k, w in enumerate(img['final_question']):
            # 向右对齐
            if pos + k < max_length:
                label_arrays[i][pos + k] = wtoi[w]

    return label_arrays, label_length, question_id


def apply_vocab_question(imgs, wtoi):
    for img in imgs:
        txt = img['processed_tokens']
        question = [w if w in wtoi else 'UNK' for w in txt]
        img['final_question'] = question

    return imgs


def encoding_img(imgs, imgtoi):
    # imgs : 原图片库
    # img_pos： imgs中在unique_img中的映射，通过图片序号建立关系
    # ques_pos_tmp ： 对imgs中的所有图片进行分组得到
    # ques_pos_len ： 每个分组中含有元素个数
    # ques_pos ： 问题字典，按分组和在分组中的位置编号

    N = len(imgs)
    img_pos = np.zeros(N, dtype=dtype)

    for i, img in enumerate(imgs):
        # 记录图片库中在imgtoi中的位置
        idx = imgtoi.get(img['img_path'])
        img_pos[i] = idx
    return img_pos


def encoding_answer(imgs, atoi):

    N = len(imgs)
    ans_arrays = np.zeros(N, dtype=dtype)

    for i, img in enumerate(imgs):
        ans_arrays[i] = atoi.get(img['ans'], 0)

    return ans_arrays


def encoding_mc_answer(imgs, atoi):
    N = len(imgs)
    mc_ans_array = np.zeros((N, 18), dtype=dtype)

    for i, img in enumerate(imgs):
        for j, ans in enumerate(img['MC_ans']):
            mc_ans_array[i, j] = atoi.get(ans, 0)

    return mc_ans_array


def main():
    lstm_config_path = '../configs/lstm.json'
    prepro_config, _ = config.get_config_from_json('../configs/prepro.json')

    img_train = get_data_from_json(prepro_config.vqa_raw_train)
    img_val = get_data_from_json(prepro_config.vqa_raw_val)

    unique_img_train = get_unique_img(img_train)
    unique_img_val = get_unique_img(img_val)
    imgtoi_train = {i: ix for ix, i in enumerate(unique_img_train)}
    imgtoi_val = {i: ix for ix, i in enumerate(unique_img_val)}
    itoimg_train = {ix: i for ix, i in enumerate(unique_img_train)}
    itoimg_val = {ix: i for ix, i in enumerate(unique_img_val)}


    # 取出现最频繁的前top_answer_size答案
    top_answer = get_top_answer(img_train, top_answer_size)

    # 建立序号映射
    atoi = {w: i + 1 for i, w in enumerate(top_answer)}
    itoa = {i + 1: w for i, w in enumerate(top_answer)}

    # 过滤训练库中不在映射表中的元素
    img_train = filter_question(img_train, atoi)
    img_val = filter_question(img_val, atoi)
    # 分词
    img_train = prepro_question(img_train)
    img_val = prepro_question(img_val)

    img_train, vocab = build_vocab_question(img_train)
    itow = {i + 1: w for i, w in enumerate(vocab)}
    wtoi = {w: i + 1  for i, w in enumerate(vocab)}

    ques_train, ques_length_train, ques_id_train = encoding_question(img_train, wtoi)

    img_val = apply_vocab_question(img_val, wtoi)
    ques_val, ques_length_val, ques_id_val = encoding_question(img_val, wtoi)

    img_pos_train = encoding_img(img_train, imgtoi_train)
    img_pos_val = encoding_img(img_val, imgtoi_val)

    ans_train = encoding_answer(img_train, atoi)
    ans_val = encoding_answer(img_val, atoi)

    # create output h5 file for training set.
    f = h5py.File(prepro_config.h5_file_path, "w")
    f.create_dataset("ques_train", dtype=dtype, data=ques_train)
    f.create_dataset("ques_val", dtype=dtype, data=ques_val)

    f.create_dataset("answers", dtype=dtype, data=ans_train)
    f.create_dataset("ans_val", dtype=dtype, data=ans_val)

    f.create_dataset("img_pos_train", dtype=dtype, data=img_pos_train)
    f.create_dataset("img_pos_val", dtype=dtype, data=img_pos_val)

    f.close()
    print ('wrote ', prepro_config.h5_file_path)

    # create output json file
    out = {}
    out['ix_to_word'] = itow  # encode the (1-indexed) vocab
    out['ix_to_ans'] = itoa
    out['word_to_ix'] = wtoi
    out['ix_to_img_train'] = itoimg_train
    out['ix_to_img_val'] = itoimg_val

    json.dump(out, open(prepro_config.json_file_path, 'w'))
    print ('wrote ', prepro_config.json_file_path)


    lstm_json = {
        "ans_vocab_size": top_answer_size,
        "max_ques_length": max_length,
        "num_lstm_layers" : 2,
        "rnn_size": 512,
        "embedding_size": 512,
        "fc7_feat_length": 4096,
        "lstm_steps": max_length + 1,
        "q_vocab_size": len(itow),
        "json_file_path" : prepro_config.json_file_path,
        "h5_file_path" : prepro_config.h5_file_path,
        "vgg19_npy_path": prepro_config.vgg19_npy_path,
        "is_have_img_data": "",
        "img_root":  prepro_config.img_root
    }
    json.dump(lstm_json, open(lstm_config_path, 'w'))
    print ('wrote ', lstm_config_path)

if __name__ == '__main__':
    main()