import argparse
import collections
import json
import logging
from logging import getLogger
from typing import OrderedDict
import numpy as np
import re
import sys
import time
import torch
from torch.linalg import inv, svd

import embeddings_torch as embeddings

# from models import build_model, Trainer

def dropout(m, p):
    if p <= 0.0:
        return m
    else:
        mask = torch.rand(*m.shape) >= p
        return m*(mask.to(m.device))

def topk_mean(m, k, inplace=False):
    n = m.shape[0]
    ans = torch.zeros(n, dtype=m.dtype).to(m.device)
    if k <= 0:
        return ans
    if not inplace:
        m = torch.clone(m)
    ind0 = torch.arange(n).to(m.device)
    ind1 = torch.empty(n, dtype=int).to(m.device)
    minimum = m.min()
    for i in range(k):
        ind1 = m.argmax(dim=1)
        ans += m[ind0, ind1]
        m[ind0, ind1] = minimum
    
    return ans / k

def parsing():
    #Parse command line arguments
    parser = argparse.ArgumentParser(description='UBDI map word embeddings in two languages into a shared space')

    parser.add_argument('src_input', help='the input source embeddings')
    parser.add_argument('trg_input', help='the input target embeddings')
    parser.add_argument('src_output', help='the output source embeddings')
    parser.add_argument('trg_output', help='the output target embeddings')
    parser.add_argument('--encoding', default='utf-8', help='the character encoding for input/output (defaults to utf-8)')
    parser.add_argument('--precision', choices=['fp16', 'fp32', 'fp64'], default='fp32', help='the floating-point precision (defaults to fp32)')
    parser.add_argument('--cuda', help='id of cuda, -1 is default cpu', action='store_true')
    parser.add_argument('--batch_size', default=10000, type=int, help='batch size (defaults to 10000); does not affect results, larger is usually faster but uses more memory')
    parser.add_argument('--seed', type=int, default=0, help='the random seed (defaults to 0)')

    recommended_group = parser.add_argument_group('recommended settings', 'Recommended settings for different scenarios')
    recommended_type = recommended_group.add_mutually_exclusive_group()
    recommended_type.add_argument('--supervised', metavar='DICTIONARY', help='recommended if you have a large training dictionary')
    recommended_type.add_argument('--semi_supervised', metavar='DICTIONARY', help='recommended if you have a small seed dictionary')
    recommended_type.add_argument('--identical', action='store_true', help='recommended if you have no seed dictionary but can rely on identical words')
    recommended_type.add_argument('--unsupervised', action='store_true', help='recommended if you have no seed dictionary and do not want to rely on identical words')

    init_group = parser.add_argument_group('advanced initialization arguments', 'Advanced initialization arguments')
    init_type = init_group.add_mutually_exclusive_group()
    init_type.add_argument('-d', '--init_dictionary', default=sys.stdin.fileno(), metavar='DICTIONARY', help='the training dictionary file (defaults to stdin)')
    init_type.add_argument('--init_identical', action='store_true', help='use identical words as the seed dictionary')
    init_type.add_argument('--init_numerals', action='store_true', help='use latin numerals (i.e. words matching [0-9]+) as the seed dictionary')
    init_type.add_argument('--init_unsupervised', action='store_true', help='use unsupervised initialization')
    init_group.add_argument('--unsupervised_vocab', type=int, default=0, help='restrict the vocabulary to the top k entries for unsupervised initialization')


    mapping_group = parser.add_argument_group('advanced mapping arguments', 'Advanced embedding mapping arguments')
    mapping_group.add_argument('--normalize', choices=['unit', 'center', 'unitdim', 'centeremb', 'none'], nargs='*', default=[], help='the normalization actions to perform in order')
    mapping_group.add_argument('--whiten', action='store_true', help='whiten the embeddings')
    mapping_group.add_argument('--src_reweight', type=float, default=0, nargs='?', const=1, help='re-weight the source language embeddings')
    mapping_group.add_argument('--trg_reweight', type=float, default=0, nargs='?', const=1, help='re-weight the target language embeddings')
    mapping_group.add_argument('--src_dewhiten', choices=['src', 'trg'], help='de-whiten the source language embeddings')
    mapping_group.add_argument('--trg_dewhiten', choices=['src', 'trg'], help='de-whiten the target language embeddings')
    mapping_group.add_argument('--dim_reduction', type=int, default=0, help='apply dimensionality reduction')
    mapping_type = mapping_group.add_mutually_exclusive_group()
    mapping_type.add_argument('-c', '--orthogonal', action='store_true', help='use orthogonal constrained mapping')
    mapping_type.add_argument('-u', '--unconstrained', action='store_true', help='use unconstrained mapping')

    self_learning_group = parser.add_argument_group('advanced self-learning arguments', 'Advanced arguments for self-learning')
    self_learning_group.add_argument('--self_learning', action='store_true', help='enable self-learning')
    self_learning_group.add_argument('--vocabulary_cutoff', type=int, default=0, help='restrict the vocabulary to the top k entries')
    self_learning_group.add_argument('--direction', choices=['forward', 'backward', 'union'], default='union', help='the direction for dictionary induction (defaults to union)')
    self_learning_group.add_argument('--csls', type=int, nargs='?', default=0, const=10, metavar='NEIGHBORHOOD_SIZE', dest='csls_neighborhood', help='use CSLS for dictionary induction')
    self_learning_group.add_argument('--threshold', default=0.000001, type=float, help='the convergence threshold (defaults to 0.000001)')
    self_learning_group.add_argument('--validation', default=None, metavar='DICTIONARY', help='a dictionary file for validation at each iteration')
    self_learning_group.add_argument('--stochastic_initial', default=0.1, type=float, help='initial keep probability stochastic dictionary induction (defaults to 0.1)')
    self_learning_group.add_argument('--stochastic_multiplier', default=2.0, type=float, help='stochastic dictionary induction multiplier (defaults to 2.0)')
    self_learning_group.add_argument('--stochastic_interval', default=50, type=int, help='stochastic dictionary induction interval (defaults to 50)')
    self_learning_group.add_argument('--log', help='write to a log file in tsv format at each iteration')
    self_learning_group.add_argument('-v', '--verbose', action='store_true', help='write log information to stderr at each iteration')

    adversial_learning_group = parser.add_argument_group('adversial unsupervised bilingual dictionary induction', 'Adversial unsupervised bilingual dictionary induction')
    adversial_learning_group.add_argument("--n_refinement", type=int, default=5, help="Number of refinement iterations (0 to disable the refinement procedure)")
    adversial_learning_group.add_argument("--n_epochs", type=int, default=5, help="Number of epochs")
    adversial_learning_group.add_argument("--epoch_size", type=int, default=1000000, help="Iterations per epoch")
    adversial_learning_group.add_argument("--adversarial", type=bool, default=True, help="Use adversarial training")
    adversial_learning_group.add_argument("--dis_steps", type=int, default=5, help="Discriminator steps")
    adversial_learning_group.add_argument("--lr_decay", type=float, default=0.98, help="Learning rate decay (SGD only)")
    adversial_learning_group.add_argument("--min_lr", type=float, default=1e-6, help="Minimum learning rate (SGD only)")
    adversial_learning_group.add_argument("--lr_shrink", type=float, default=0.5, help="Shrink the learning rate if the validation metric decreases (1 to disable)")
    adversial_learning_group.add_argument("--dis_layers", type=int, default=2, help="Discriminator layers")
    adversial_learning_group.add_argument("--dis_hid_dim", type=int, default=2048, help="Discriminator hidden layer dimensions")
    adversial_learning_group.add_argument("--dis_dropout", type=float, default=0., help="Discriminator dropout")
    adversial_learning_group.add_argument("--dis_input_dropout", type=float, default=0.1, help="Discriminator input dropout")
    adversial_learning_group.add_argument("--dis_lambda", type=float, default=1, help="Discriminator loss feedback coefficient")
    adversial_learning_group.add_argument("--dis_most_frequent", type=int, default=75000, help="Select embeddings of the k most frequent words for discrimination (0 to disable)")
    adversial_learning_group.add_argument("--dis_smooth", type=float, default=0.1, help="Discriminator smooth predictions")
    adversial_learning_group.add_argument("--dis_clip_weights", type=float, default=0, help="Clip discriminator weights (0 to disable)")
    adversial_learning_group.add_argument("--map_optimizer", type=str, default="sgd,lr=0.1", help="Mapping optimizer")
    adversial_learning_group.add_argument("--map_beta", type=float, default=0.001, help="Beta for orthogonalization")
    adversial_learning_group.add_argument("--dis_optimizer", type=str, default="sgd,lr=0.1", help="Discriminator optimizer")
    # adversial_learning_group.add_argument("--map_id_init", type=bool, default=True, help="Initialize the mapping as an identity matrix")



    args = parser.parse_args()

    if args.supervised is not None:
        parser.set_defaults(init_dictionary=args.supervised, normalize=['unit', 'center', 'unit'], whiten=True, src_reweight=0.5, trg_reweight=0.5, src_dewhiten='src', trg_dewhiten='trg', batch_size=1000)
    if args.semi_supervised is not None:
        parser.set_defaults(init_dictionary=args.semi_supervised, normalize=['unit', 'center', 'unit'], whiten=True, src_reweight=0.5, trg_reweight=0.5, src_dewhiten='src', trg_dewhiten='trg', self_learning=True, vocabulary_cutoff=20000, csls_neighborhood=10)
    if args.identical:
        parser.set_defaults(init_identical=True, normalize=['unit', 'center', 'unit'], whiten=True, src_reweight=0.5, trg_reweight=0.5, src_dewhiten='src', trg_dewhiten='trg', self_learning=True, vocabulary_cutoff=20000, csls_neighborhood=10)
    if args.unsupervised:
        parser.set_defaults(init_unsupervised=True, unsupervised_vocab=4000, normalize=['unit', 'center', 'unit'], whiten=True, src_reweight=0.5, trg_reweight=0.5, src_dewhiten='src', trg_dewhiten='trg', self_learning=True, vocabulary_cutoff=20000, csls_neighborhood=10)

    args = parser.parse_args()

    
    args.device = torch.device('cuda') if args.cuda else torch.device('cpu')
    return args


def build_seed_dictionary(args, src_words, src_word2ind, x, trg_words, trg_word2ind, z):
    # Build the initial dictionary
    src_indices = []
    trg_indices = []

    if args.init_unsupervised:
        sim_size = min(x.shape[0], z.shape[0])
        if args.unsupervised_vocab > 0:
            sim_size = min(sim_size, args.unsupervised_vocab)
        u, s, vt = svd(x[:sim_size], full_matrices=False)
        xsim = (u*s).mm(u.T)
        # xsim = torch.from_numpy((u*s).dot(u.T)).to(x.device)
        # u, s, vt = np.linalg.svd(z[:sim_size].cpu().numpy(), full_matrices=False)
        u, s, vt = svd(z[:sim_size], full_matrices=False)
        zsim = (u*s).mm(u.T)
        del u, s, vt
        xsim, _ = xsim.sort(dim=1)
        zsim, _ = zsim.sort(dim=1)

        embeddings.normalize(xsim, args.normalize)
        embeddings.normalize(zsim, args.normalize)

        sim = xsim.mm(zsim.T)
        if args.csls_neighborhood > 0:
            knn_sim_fwd = topk_mean(sim, k=args.csls_neighborhood)
            knn_sim_bwd = topk_mean(sim.T, k=args.csls_neighborhood)
            sim -= knn_sim_fwd.unsqueeze(-1)/2 + knn_sim_bwd/2
        if args.direction == 'forward':
            src_indices = torch.arange(sim_size).to(sim.device)
            trg_indices = sim.argmax(axis=1).to(sim.device)
        elif args.direction == 'backward':
            src_indices = sim.argmax(axis=0).to(sim.device)
            trg_indices = torch.arange(sim_size).to(sim.device)
        elif args.direction == 'union':
            src_indices = torch.cat((torch.arange(sim_size).to(sim.device), sim.argmax(dim=0)))
            trg_indices = torch.cat((sim.argmax(dim=1), torch.arange(sim_size).to(sim.device)))
        del xsim, zsim, sim

    elif args.init_numerals:
        numeral_regex = re.compile('^[0-9]+$')
        src_numerals = {word for word in src_words if numeral_regex.match(word) is not None}
        trg_numerals = {word for word in trg_words if numeral_regex.match(word) is not None}
        numerals = src_numerals.intersection(trg_numerals)
        for word in numerals:
            src_indices.append(src_word2ind[word])
            trg_indices.append(trg_word2ind[word])
    elif args.init_identical:
        identical = set(src_words).intersection(set(trg_words))
        for word in identical:
            src_indices.append(src_word2ind[word])
            trg_indices.append(trg_word2ind[word])
    else:
        f = open(args.init_dictionary, encoding=args.encoding, errors='surogateescape')
        for line in f:
            src, trg = line.split()
            try:
                src_ind = src_word2ind[src]
                trg_ind = trg_word2ind[trg]
                src_indices.append(src_ind)
                trg_indices.append(trg_ind)
            except KeyError:
                print('WARNING: OOV dictionary entry ({0} - {1})'.format(src, trg), file=sys.stderr)
    return src_indices, trg_indices

def dataload(args):
    dtype = torch.float
    if (args.src_dewhiten is not None or args.trg_dewhiten is not None) and not args.whiten:
        print('ERROR: De-whitening requires whitening first', file=sys.stderr)
        sys.exit(-1)

    # Choose the right dtype for the desired precision
    if args.precision == 'fp16':
        dtype = torch.float16
    elif args.precision == 'fp32':
        dtype = torch.float32
    elif args.precision == 'fp64':
        dtype = torch.float64

    cutoff = args.vocabulary_cutoff

    srcfile = open(args.src_input, encoding=args.encoding, errors='surrogateescape')
    trgfile = open(args.trg_input, encoding=args.encoding, errors='surrogateescape')
    src_words, x = embeddings.read(srcfile, threshold=cutoff, dtype=dtype)
    trg_words, z = embeddings.read(trgfile, threshold=cutoff, dtype=dtype)
    x = x.to(args.device)
    z = z.to(args.device)

    src_size = x.shape[0] if cutoff <= 0 else min(x.shape[0], cutoff)
    trg_size = z.shape[0] if cutoff <= 0 else min(z.shape[0], cutoff)
    args.src_size = src_size
    args.trg_size = trg_size
    args.src_dim = x.shape[1]
    args.trg_dim = z.shape[1]


    torch.random.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)


    # Build word to index map
    src_word2ind = {word:i for i,word in enumerate(src_words)}
    trg_word2ind = {word:i for i,word in enumerate(trg_words)}

    # STEP 0: Normalization
    
    embeddings.normalize(x, args.normalize)
    embeddings.normalize(z, args.normalize)
    src_indices, trg_indices = build_seed_dictionary(\
        args, src_words, src_word2ind, x, trg_words, trg_word2ind, z)
    # the validation dictionary
    if args.validation is not None:
        f = open(args.validation, encoding=args.encoding, errors='surrogateescape')
        validation = collections.defaultdict(set)
        out_of_vocab = set()
        vocab = set()
        for line in f:
            src, trg = line.split()
            try:
                src_ind = src_word2ind[src]
                trg_ind = trg_word2ind[trg]
                validation[src_ind].add(trg_ind)
                vocab.add(src)
            except KeyError:
                out_of_vocab.add(src)
        out_of_vocab -= vocab # if there's key in validation, then it should not be in oov
        validation_coverage = len(validation) / (len(validation) + len(out_of_vocab))
    else:
        validation = validation_coverage = None
    
    return src_words, src_word2ind, src_indices, x,\
           trg_words, trg_word2ind, trg_indices, z,\
           validation, validation_coverage, dtype

def update_embedding_mapping(args, end, src_indices, x, xw, trg_indices, z, zw):
    if args.orthogonal or not end:
        u, s, vt = svd(z[trg_indices].T.mm(x[src_indices]))
        w = vt.T.mm(u.T)
        xw = x.mm(w)
        zw[:] = z
    elif args.unconstrained:
        x_pseudoinv = inv(x[src_indices].T.mm(x[src_indices])).mm(x[src_indices].T)
        w = x_pseudoinv.mm(z[trg_indices])
        xw = x.mm(w)
        zw[:] = z
    else:
        xw[:] = x
        zw[:] = z

        # STEP1: transformation matrix of whitening
        def whitening_transformation(m):
            u, s, vt = svd(m, full_matrices=False)
            return vt.T.mm(torch.diag(1/s)).mm(vt)  # ???? should the correct form be 1/xp.sqrt(s)?
        if args.whiten:
            wx1 = whitening_transformation(xw[src_indices])
            wz1 = whitening_transformation(zw[trg_indices])
            xw = xw.mm(wx1)
            zw = zw.mm(wz1)
        
        # STEP 2: Orthognal mapping
        wx2, s, wz2_t = svd(xw[src_indices].T.mm(zw[trg_indices]))
        wz2 = wz2_t.T
        xw = xw.mm(wx2)
        zw = zw.mm(wz2)
        
        # STEP 3: Re-weighting
        xw *= s**args.src_reweight
        zw *= s**args.trg_reweight

        # STEP 4: De-whitening
        de_whiten = {}
        de_whiten['src'] = wx2.T.mm(inv(wx1)).mm(wx2)
        de_whiten['trg'] = wz2.T.mm(inv(wz1)).mm(wz2)
        xw = xw.mm(de_whiten[args.src_dewhiten])
        zw = zw.mm(de_whiten[args.src_dewhiten])

        # STEP 5: Dimensionality reduction
        if args.dim_reduction > 0:
            xw = xw[:, :args.dim_reduction]
            zw = zw[:, :args.dim_reduction]
    
    return xw, zw

def build_dictionary(args, xw, zw, dtype, keep_prob = 1.):
    ## forward: from src to mid/trg
    ## backward: from trg to mid/src
    src_size, trg_size = xw.size(0), zw.size(0)
    device = xw.device
    best_sim_forward = torch.full((src_size,), -100, dtype=dtype).to(device)
    src_indices_forward = torch.arange(src_size).to(device)
    trg_indices_forward = torch.zeros(src_size, dtype=int).to(device)
    best_sim_backward = torch.full((trg_size,), -100, dtype=dtype).to(device)
    src_indices_backward = torch.zeros(trg_size, dtype=int).to(device)
    trg_indices_backward = torch.arange(trg_size).to(device)
    knn_sim_fwd = torch.zeros(src_size, dtype=dtype).to(device)
    knn_sim_bwd = torch.zeros(trg_size, dtype=dtype).to(device)

    batch_size = args.batch_size
    simfwd = torch.zeros(batch_size, trg_size, dtype=dtype).to(device)
    simbwd = torch.zeros(batch_size, src_size, dtype=dtype).to(device)
    if args.direction in ('forward', 'union'):
        if args.csls_neighborhood > 0:
            for i in range(0, trg_size, batch_size):
                j = min(i + batch_size, trg_size)
                simbwd[:j-i] = zw[i:j].mm(xw[:src_size].T)
                knn_sim_bwd[i:j] = topk_mean(simbwd[:j-i], k = args.csls_neighborhood)
        
        for i in range(0, src_size, batch_size):
            j = min(i + batch_size, src_size)
            simfwd[:j-i] = xw[i:j].mm(zw[:trg_size].T)
            best_sim_forward[i:j] = simfwd[:j-i].max(dim=1).values
            simfwd[:j-i] -= knn_sim_bwd/2
            trg_indices_forward[i:j] = dropout(simfwd[:j-i], 1-keep_prob).argmax(dim=1)
        
    if args.direction in ('backward', 'union'):
        if args.csls_neighborhood > 0:
            for i in range(0, src_size, batch_size):
                j = min(i + batch_size, src_size)
                simfwd[:j-i] = xw[i:j].mm(zw[:trg_size].T)
                knn_sim_fwd[i:j] = topk_mean(simfwd[:j-i], k = args.csls_neighborhood)
        
        for i in range(0, trg_size, batch_size):
            j = min(i + batch_size, trg_size)
            simbwd[:j-i] = zw[i:j].mm(xw[:src_size].T)
            best_sim_backward[i:j] = simbwd[:j-i].max(dim=1).values
            simbwd[:j-i] -= knn_sim_fwd/2
            src_indices_backward[i:j] = dropout(simbwd[:j-i], 1 - keep_prob).argmax(dim=1)

    if args.direction == 'forward':
        src_indices = src_indices_forward
        trg_indices = trg_indices_forward
        objective = torch.mean(best_sim_forward).item()
    elif args.direction == 'backward':
        src_indices = src_indices_backward
        trg_indices = trg_indices_backward
        objective = torch.mean(best_sim_backward).item()
    elif args.direction == 'union':
        src_indices = torch.cat((src_indices_forward, src_indices_backward))
        trg_indices = torch.cat((trg_indices_forward, trg_indices_backward))
        objective = (torch.mean(best_sim_forward) + torch.mean(best_sim_backward)).item() / 2
    
    return objective, src_indices, trg_indices

# def muse_mapping(args, x,z):
#     mapping, discriminator = build_model(args, True)
#     gan = Trainer(mapping, discriminator, args)
#     logger = getLogger('muse_mapping')
#     VALIDATION_METRIC = 'mean_cosine-csls_knn_10-S2T-10000'
#     if args.adversarial:
#         logger.info('Adversial training')
#         for n_epoch in range(args.n_epochs):
#             logger.info(f'Starting adversial training epoch {n_epoch}')
#             tic = time.time()
#             n_words_proc = 0
#             stats = {'DIS_COSTS':[]}

#             for n_iter in range(0, args.epoch_size, args.batch_size):

#                 # discriminator training
#                 for _ in range(args.dis_steps):
#                     gan.dis_step(x, z, stats)
            
#                 # mapping training 
#                 n_words_proc += gan.mapping_step(x, stats)

#                 # log stats
#                 if n_iter % 500 == 0:
#                     stats_str = [('DIS_COSTS', 'Discriminator loss')]
#                     stats_log = [f'{v}: {np.mean(stats[k]):.4f}'
#                                 for k, v in stats_str if len(stats[k]) > 0]
#                     stats_log.append(f'{int(n_words_proc / (time.time() - tic))} samples/s')
#                     logger.info(f'{n_iter:06} - ' + ' - '.join(stats_log))

#                     # reset counter
#                     tic = time.time()
#                     n_words_proc = 0
#                     for k,_ in stats_str:
#                         del stats[k][:]
            
#             # update the learining rate
#             to_log = OrderedDict({'n_epoch':n_epoch})
#             logger.info("__log__:%s" % json.dumps(to_log))
#             logger.info('End of epoch %i.\n\n' % n_epoch)
#             # gan.save_best()

#             gan.update_lr(to_log, VALIDATION_METRIC)
#             if gan.map_optimizer.param_groups[0]['lr'] < args.min_lr:
#                 logger.info('Learning rate < 1e-6. BREAK.')
#                 break
        
#     return gan.mapping(x)


def main(task):
    logging.basicConfig(level=logging.INFO)
    args = parsing()
    if task is None or not hasattr(task, 'gan_initialized_space'):
        src_words, src_word2ind, src_indices, x,\
            trg_words, trg_word2ind, trg_indices, z,\
            validation, validation_coverage, dtype = dataload(args)
    else:
        src_words, src_word2ind, src_indices, x,\
            trg_words, trg_word2ind, trg_indices, z = task.gan_initialized_space
        dtype = torch.float
        if args.validation is not None:
            f = open(args.validation, encoding=args.encoding, errors='surrogateescape')
            validation = collections.defaultdict(set)
            out_of_vocab = set()
            vocab = set()
            for line in f:
                src, trg = line.split()
                try:
                    src_ind = src_word2ind[src]
                    trg_ind = trg_word2ind[trg]
                    validation[src_ind].add(trg_ind)
                    vocab.add(src)
                except KeyError:
                    out_of_vocab.add(src)
            out_of_vocab -= vocab # if there's key in validation, then it should not be in oov
            validation_coverage = len(validation) / (len(validation) + len(out_of_vocab))
        else:
            validation = validation_coverage = None
        
    # Create log file
    if args.log:
        log = open(args.log, mode='w', encoding=args.encoding, errors='surrogateescape')

    # x = muse_mapping(args, x, z)
    #Allocate Memory
    xw = torch.zeros_like(x)
    zw = torch.zeros_like(z)
    if args.validation:
        simval = torch.zeros(len(validation), z.shape[0], dtype=dtype)
    

    # Loop of training
    best_objective = objective = -100.
    it = 1
    last_improvement = 0
    keep_prob = args.stochastic_initial
    t = time.time()
    end = not args.self_learning
    print('='*20, 'start UBDI', '='*20)
    while True:
        # Increase the keep probability if we have not improve in args.stochastic_interval iterations
        if it - last_improvement > args.stochastic_interval:
            if keep_prob >= 1.0:
                end = True
            keep_prob = min(1.0, args.stochastic_multiplier*keep_prob)
            last_improvement = it
        
        xw, zw = update_embedding_mapping(args, end,\
            src_indices, x, xw,\
            trg_indices, z, zw)

        # self-learning
        if end:
            break
        objective, src_indices, trg_indices =\
            build_dictionary(args, xw, zw, dtype, keep_prob=keep_prob)
        if objective - best_objective >= args.threshold:
            last_improvement = it
            best_objective = objective

        # Accuracy and similarity evaluation in validation
        if args.validation:
            src = list(validation.keys())
            simval = xw[src].mm(zw.T)
            nn = simval.argmax(dim=1).cpu().numpy()
            accuracy = np.mean([1 if nn[i] in validation[src[i]] else 0 for i in range(len(src))])
            similarity = np.mean([max([simval[i][j].tolist() for j in validation[src[i]]]) for i in range(len(src))])
        
        # Logging
        duration = time.time() - t
        if args.verbose:
            print(file=sys.stderr)
            print('ITERATION {0} ({1:.2f}s)'.format(it, duration), file=sys.stderr)
            print('\t- Objective:        {0:9.4f}%'.format(100 * objective), file=sys.stderr)
            print('\t- Drop probability: {0:9.4f}%'.format(100 - 100*keep_prob), file=sys.stderr)
            if args.validation is not None:
                print('\t- Val. similarity:  {0:9.4f}%'.format(100 * similarity), file=sys.stderr)
                print('\t- Val. accuracy:    {0:9.4f}%'.format(100 * accuracy), file=sys.stderr)
                print('\t- Val. coverage:    {0:9.4f}%'.format(100 * validation_coverage), file=sys.stderr)
            sys.stderr.flush()
        if args.log is not None:
            val = '{0:.6f}\t{1:.6f}\t{2:.6f}'.format(
                100 * similarity, 100 * accuracy, 100 * validation_coverage) if args.validation is not None else ''
            print('{0}\t{1:.6f}\t{2}\t{3:.6f}'.format(it, 100 * objective, val, duration), file=log)
            log.flush()

        t = time.time()
        it += 1
    
    # Write mapped embeddings
    srcfile = open(args.src_output, mode='w', encoding=args.encoding, errors='surrogateescape')
    trgfile = open(args.trg_output, mode='w', encoding=args.encoding, errors='surrogateescape')
    embeddings.write(src_words, xw, srcfile)
    embeddings.write(trg_words, zw, trgfile)
    srcfile.close()
    trgfile.close()

if __name__ == '__main__':
    main(None)