# sampling: Sampler and BleuValidator
#from __future__ import print_function
import numpy
import argparse
import pprint
import os
import cPickle as pkl
import subprocess
import logging
import time
import re
import configurations
from search import BeamSearch
from nmt import EncoderDecoder
from stream import get_devtest_stream

logger = logging.getLogger(__name__)


class Sampler(object):

    def __init__(self, search_model, **kwards):

        self.search_model = search_model
        self.unk_token = kwards.pop('unk_token')
        self.eos_token = kwards.pop('eos_token')
        self.vocab_src = kwards.pop('vocab_src')
        self.vocab_trg = kwards.pop('vocab_trg')
        self.hook_samples = kwards.pop('hook_samples')

        self.dict_src, self.idict_src = self._get_dict(self.vocab_src)
        self.dict_trg, self.idict_trg = self._get_dict(self.vocab_trg)


    def apply(self, src_batch, trg_batch):

        batch_size = src_batch.shape[0]
        hook_samples = min(batch_size, self.hook_samples)
        sample_idx = numpy.random.choice(batch_size, hook_samples, replace=False)
        input_ = src_batch[sample_idx, :]
        target_ = trg_batch[sample_idx, :]

        for i in range(hook_samples):
            input_length = self._get_true_length(input_[i], self.dict_src)
            target_length = self._get_true_length(target_[i], self.dict_trg)

            inp = input_[i, :input_length]
            outputs, costs = self.search_model.apply(inp[:, None])
            sample_length = self._get_true_length(numpy.array(outputs), self.dict_trg)

            logger.info("Input: {}".format(self._idx_to_word(input_[i][:input_length], self.idict_src)))
            logger.info("Target: {}".format(self._idx_to_word(target_[i][:target_length], self.idict_trg)))
            logger.info("Output: {}".format(self._idx_to_word(outputs[:sample_length], self.idict_trg)))
            logger.info("Cost: %.4f\n" %costs)

    
    def _get_dict(self, vocab_file):

        if os.path.isfile(vocab_file):
            ddict = pkl.load(open(vocab_file, 'rb'))
        else:
            logger.error("file [{}] do not exist".format(vocab_file))

        iddict = dict()
        for kk, vv in ddict.iteritems():
            iddict[vv] = kk

        iddict[0] = self.eos_token

        return ddict, iddict


    def _get_true_length(self, seq, vocab):

        try:
            return seq.tolist().index(vocab[self.eos_token]) + 1
        except ValueError:
            return len(seq)


    def _idx_to_word(self, seq, ivocab):

        return " ".join([ivocab.get(idx, self.unk_token) for idx in seq])


class BleuValidator(object):

    def __init__(self, search_model, test_src=None, test_ref=None, **kwards):

        self.search_model = search_model
        self.unk_token = kwards.pop('unk_token')
        self.eos_token = kwards.pop('eos_token')
        self.vocab_src = kwards.pop('vocab_src')
        self.vocab_trg = kwards.pop('vocab_trg')
        self.normalize = kwards.pop('normalized_bleu')
        self.bleu_script = kwards.pop('bleu_script')
        self.res_to_sgm = kwards.pop('res_to_sgm')
        self.test_src = test_src
        self.test_ref = test_ref

        if test_src is None or test_ref is None:
            self.test_src = kwards.pop('valid_src')
            self.test_ref = kwards.pop('valid_trg')

        self.dict_src, self.idict_src = self._get_dict(self.vocab_src)
        self.dict_trg, self.idict_trg = self._get_dict(self.vocab_trg)


    def apply(self, data_stream, out_file):

        logger.info("Begin decoding ...")
        fout = open(out_file, 'w')
        val_start_time = time.time()
        i = 0
        for sent in data_stream.get_epoch_iterator():
            i += 1
            outputs, scores = self.search_model.apply(numpy.array(sent).T)
            if self.normalize:
                #lengths = numpy.array([self._get_true_length(numpy.array(s), self.dict_trg) for s in outputs])
                lengths = numpy.array([len(s) for s in outputs])
                scores = scores / lengths
            sidx = numpy.argmin(scores)
            res = self._idx_to_word(outputs[sidx][:-1], self.idict_trg)

            if res.strip() == '':
                res = self.unk_token

            fout.write(res + '\n')
            if i % 100 == 0:
                logger.info("Translated {} lines of valid/test set ...".format(i))

        fout.close()

        logger.info("Decoding took {} minutes".format(float(time.time() - val_start_time) / 60.))

        logger.info("Evaluate ...")

        cmd_res_to_sgm = [self.res_to_sgm, 'tst', out_file, '>', out_file+'.sgm']
        cmd_bleu_cmd = ['perl', self.bleu_script, \
                        '-r', self.test_ref+'.sgm', \
                        '-s', self.test_src+'.sgm', \
                        '-t', out_file+'.sgm', \
                        '>', self.test_src+'.eval']

        logger.info('covert result to sgm')
        subprocess.check_call(" ".join(cmd_res_to_sgm), shell=True)
        logger.info('compute bleu score')
        subprocess.check_call(" ".join(cmd_bleu_cmd), shell=True)

        fin = open(self.test_src+'.eval', 'rU')
        out = re.search('BLEU score = [-.0-9]+', fin.readlines()[7])
        fin.close()

        bleu_score = float(out.group()[13:])
        logger.info("Done")

        return bleu_score


    def _get_dict(self, vocab_file):

        if os.path.isfile(vocab_file):
            ddict = pkl.load(open(vocab_file, 'rb'))
        else:
            logger.error("file [{}] do not exist".format(vocab_file))

        iddict = dict()
        for kk, vv in ddict.iteritems():
            iddict[vv] = kk

        iddict[0] = self.eos_token

        return ddict, iddict


    def _get_true_length(self, seq, vocab):

        try:
            return seq.tolist().index(vocab[self.eos_token]) + 1
        except ValueError:
            return len(seq)


    def _idx_to_word(self, seq, ivocab):

        return " ".join([ivocab.get(idx, self.unk_token) for idx in seq])


if __name__=='__main__':
    # Get the arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--proto", default="get_config_search_gru",
                        help="Prototype config to use for config")
    parser.add_argument('source', type=str)
    parser.add_argument('target', type=str)
    parser.add_argument('trans', type=str)
    args = parser.parse_args()

    configuration = getattr(configurations, args.proto)()
    logger.info("\nModel options:\n{}".format(pprint.pformat(configuration)))

    rng = numpy.random.RandomState(1234)

    enc_dec = EncoderDecoder(rng, **configuration)
    enc_dec.build_sampler()

    enc_dec.load(path=configuration['saveto_best'])

    test_search = BeamSearch(enc_dec=enc_dec, configuration=configuration, beam_size=configuration['beam_size'],
                             maxlen=3*configuration['seq_len_src'], stochastic=False)
    bleuvalidator = BleuValidator(search_model=test_search, test_src=args.source, test_ref=args.target, **configuration)

    # test data
    ts = get_devtest_stream(data_type='test', input_file=args.source, **configuration)
    test_bleu = bleuvalidator.apply(ts, args.trans)

    logger.info('test bleu %.4f' %test_bleu)

