# --*-- coding: UTF-8 -*-

import sys
sys.path.append('/home/cnn/pro/yazif/vqa/')

import tensorflow as tf
from utils.config import process_config, get_config_from_json
from utils.dirs import create_dirs
from models.vis_lstm_model import VisLstmModel
from models.vgg_model import VggModel
from utils.language import get_encode_ques
import numpy as np

import redis
import json
import hashlib
from utils import image
import os



PENDING_KEY = "PENDING:message"
DEAL_KEY = 'DEALED:message'


def get_task_json():
    print('====>wait task')
    pend = re.brpop(PENDING_KEY)
    for v in pend:
        if PENDING_KEY in v:
            continue
        print('====>get task')
        return json.loads(v)


import urllib2


def get_img(url):
    md5.update(url.encode(encoding='utf-8'))
    img_path = '../img_tmp/{}.jpg'.format(md5.hexdigest())

    if not os.path.exists(img_path):
        print('downing image...')
        response = urllib2.urlopen(url)
        img = response.read()
        with open(img_path, 'wb') as f:
            f.write(img)
    return img_path


md5 = hashlib.md5()
re = redis.Redis('frp.yazif.top', password='qa123456', db=0)

config = process_config('../configs/lstm.json')

# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])

data_set, _ = get_config_from_json(config.json_file_path)
wtoi = data_set.word_to_ix
itoa = data_set.ix_to_ans

vgg = VggModel(config.vgg19_npy_path)
vgg.build()

model = VisLstmModel(config, mode=1)
model.build_model()

saver = tf.train.Saver()
save_path = '/home/cnn/pro/yazif/vqa/experiments/vqa/checkpoint/vqa.ckpt-11'


with tf.Session() as sess:
    module_file = save_path
    # module_file = tf.train.latest_checkpoint(save_path)
    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, module_file)

    while True:
        ans = None
        pend_json = get_task_json()
        try:
            print('======>task begin<========')
            url = pend_json['imageUri']
            img_path = get_img(url)
            print('img_path', img_path)
            img = np.zeros((1, 224, 224, 3), np.float32)
            img[0, ...] = image.load_image(img_path)
            print('ques', pend_json['ques'])
            ques = get_encode_ques(pend_json['ques'], wtoi)
            img_feat = sess.run(vgg.fc7, feed_dict={vgg.rgb: img})
            pred, ans_pred_vocab = sess.run([model.predictions, model.answer_probab], feed_dict={
                model.img_feat: img_feat,
                model.ques: ques,
                model.batch_size: 1
            })
            del img
            ans_pred = [(-ans_pred_vocab[0][idx], idx) for idx in range(len(ans_pred_vocab[0]))]
            ans_pred.sort()

            print('top5:')
            for i in range(5):
                print(itoa[str(ans_pred[i][1])], -ans_pred[i][0])
            ans = itoa[str(pred[0])]
            print('ans', ans)
        except UnicodeEncodeError:
            ans = 'Bad, wrong. . . The question you sent is not supported for analysis. Please send in English'
        except BaseException:
            ans = 'bad, Unknown exception'
        dealed = {
            'toUser': pend_json['toUser'],
            'fromUser': pend_json['fromUser'],
            'answer': ans
        }
        dealed_str = str(dealed)
        re.lpush(DEAL_KEY, dealed_str.encode("UTF-8"))
        print('====>task end<====')