# -*- coding: utf-8 -*-

import sys

sys.path.append('/home/cnn/pro/yazif/vqa/')

from nltk.tokenize import word_tokenize
import numpy as np
import json

dtype = 'uint32'
max_length = 26

english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%', '\'']


def prepro_question(ques):
    txt = word_tokenize(str(ques).lower())
    ques = [word for word in txt if word not in english_punctuations]
    return ques


def get_encode_ques(ques, wtoi):
    ques = prepro_question(ques)
    # N = len(imgs)
    ques = [w if  w in wtoi else 'UNK' for w in ques]
    label_arrays = np.zeros((1, max_length), dtype=dtype)
    ques_len = len(ques)
    pos = max_length - ques_len
    for k, w in enumerate(ques):
        if pos + k < max_length:
            label_arrays[0][pos + k] = int(wtoi[w])
    return label_arrays
