{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "8ab67b7d",
   "metadata": {},
   "source": [
    "This notebook translates German sentences to English by generating multiple hypotheses with various methods and reranking them with various scores."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aa5262f6",
   "metadata": {},
   "source": [
    "# Dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "dd5bfe1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# path to the translation model and its vocabulary, in order to compute ALTI correctly. \n",
    "MODEL_DIR = '../model'\n",
    "DATA_DIR = '../model/wmt18_de-en'\n",
    "LASER_DIR = '../laser'\n",
    "USE_GPU = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "cf323269",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8'\n",
    "os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n",
    "# this is for comet to behave"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "60466e1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "0f80f40e",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.use_deterministic_algorithms(False) # otherwise, comet complains\n",
    "#!pip install unbabel-comet==1.1.2 --use-feature=2020-resolver\n",
    "import comet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f99885a8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Please install tensorboardX: pip install tensorboardX\n"
     ]
    }
   ],
   "source": [
    "from fairseq.models.transformer import TransformerModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d5d4d6a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from stopes.eval.alti.wrappers.transformer_wrapper import FairseqTransformerHub\n",
    "from stopes.eval.alti.alti_metrics.alti_metrics_utils import compute_alti_metrics, compute_alti_nllb, get_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ea78bfb6",
   "metadata": {},
   "outputs": [],
   "source": [
    "from stopes.modules.preprocess.laser_sentence_encoder import SentenceEncoder, spm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "0961ef43",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "from transformers import AutoModelForSequenceClassification, AutoTokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7fbc7943",
   "metadata": {},
   "outputs": [],
   "source": [
    "import gc\n",
    "\n",
    "def cleanup():\n",
    "    gc.collect()\n",
    "    if torch.cuda.is_available():\n",
    "        torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "9d8dd374",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm.auto import tqdm, trange"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "34499932",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "59e1a59b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sacrebleu import CHRF"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a26cee22",
   "metadata": {},
   "source": [
    "# Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "3f99a9ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "gt = pd.read_csv('../annotated_data/guerreiro2022_corpus_w_annotations.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "afe9e5cb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    2048\n",
       "1    1074\n",
       "2     164\n",
       "3     129\n",
       "Name: error_class, dtype: int64"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gt['any_mistake'] = 1 - gt.correctness\n",
    "gt['any_detached'] = gt[['strong-unsupport', 'full-unsupport']].max(1)\n",
    "gt['repeat_or_detached'] = gt[['repetitions', 'strong-unsupport', 'full-unsupport']].max(1)\n",
    "gt['other_errors'] = gt['any_mistake']-gt['named-entities']-gt['omission']-gt['repeat_or_detached']\n",
    "gt['error_class'] = gt['any_detached'] + gt['full-unsupport'] + gt['any_mistake']\n",
    "gt['error_class'].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18253fe6",
   "metadata": {},
   "source": [
    "Sample 400 source texts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "880e9a4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "smpl = gt.groupby('error_class').sample(100, random_state=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d8db2f27",
   "metadata": {},
   "source": [
    "# Creating the translations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "03cf74da",
   "metadata": {},
   "outputs": [],
   "source": [
    "k = 10  # number of hypotheses; also beam size when appropriate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "32fa821a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loading archive file ../model\n",
      "load_model_ensemble_and_task is_moe=False\n",
      "Rank 0: Done reading from disk\n",
      "[de] dictionary: 32032 types\n",
      "[en] dictionary: 32032 types\n",
      "Done loading state dict\n",
      "{'_name': None, 'common': {'_name': None, 'no_progress_bar': False, 'log_interval': 10, 'log_format': None, 'log_file': None, 'tensorboard_logdir': None, 'wandb_project': 'mt-hallucinations', 'azureml_logging': False, 'seed': 42, 'cpu': False, 'tpu': False, 'bf16': False, 'memory_efficient_bf16': False, 'fp16': False, 'memory_efficient_fp16': False, 'fp16_no_flatten_grads': False, 'fp16_init_scale': 128, 'fp16_scale_window': None, 'fp16_scale_tolerance': 0.0, 'on_cpu_convert_precision': False, 'min_loss_scale': 0.0001, 'threshold_loss_scale': None, 'amp': False, 'amp_batch_retries': 2, 'amp_init_scale': 128, 'amp_scale_window': None, 'user_dir': None, 'empty_cache_freq': 0, 'all_gather_list_size': 16384, 'model_parallel_size': 1, 'quantization_config_path': None, 'profile': False, 'reset_logging': False, 'suppress_crashes': False, 'use_plasma_view': False, 'plasma_path': '/tmp/plasma'}, 'common_eval': {'_name': None, 'path': None, 'post_process': None, 'quiet': False, 'model_overrides': '{}', 'results_path': None}, 'distributed_training': {'_name': None, 'distributed_world_size': 1, 'distributed_num_procs': 1, 'distributed_rank': 0, 'distributed_backend': 'nccl', 'distributed_init_method': None, 'distributed_port': -1, 'device_id': 0, 'distributed_no_spawn': False, 'ddp_backend': 'pytorch_ddp', 'ddp_comm_hook': 'none', 'bucket_cap_mb': 25, 'fix_batches_to_gpus': False, 'find_unused_parameters': False, 'gradient_as_bucket_view': False, 'fast_stat_sync': False, 'heartbeat_timeout': -1, 'broadcast_buffers': False, 'slowmo_momentum': None, 'slowmo_base_algorithm': 'localsgd', 'localsgd_frequency': 3, 'nprocs_per_node': 1, 'pipeline_model_parallel': False, 'pipeline_balance': None, 'pipeline_devices': None, 'pipeline_chunks': 0, 'pipeline_encoder_balance': None, 'pipeline_encoder_devices': None, 'pipeline_decoder_balance': None, 'pipeline_decoder_devices': None, 'pipeline_checkpoint': 'never', 'zero_sharding': 'none', 'fp16': False, 'memory_efficient_fp16': False, 'tpu': False, 'no_reshard_after_forward': False, 'fp32_reduce_scatter': False, 'cpu_offload': False, 'use_sharded_state': False, 'not_fsdp_flatten_parameters': False}, 'dataset': {'_name': None, 'num_workers': 1, 'skip_invalid_size_inputs_valid_test': False, 'max_tokens': 8192, 'batch_size': None, 'required_batch_size_multiple': 8, 'required_seq_len_multiple': 1, 'dataset_impl': None, 'data_buffer_size': 10, 'train_subset': 'train', 'valid_subset': 'valid', 'combine_valid_subsets': None, 'ignore_unused_valid_subsets': False, 'validate_interval': 1, 'validate_interval_updates': 0, 'validate_after_updates': 0, 'fixed_validation_seed': None, 'disable_validation': False, 'max_tokens_valid': 8192, 'batch_size_valid': None, 'max_valid_steps': None, 'curriculum': 0, 'gen_subset': 'test', 'num_shards': 1, 'shard_id': 0, 'grouped_shuffling': False, 'update_epoch_batch_itr': False, 'update_ordered_indices_seed': False}, 'optimization': {'_name': None, 'max_epoch': 0, 'max_update': 250000, 'stop_time_hours': 0.0, 'clip_norm': 0.0, 'sentence_avg': False, 'update_freq': [4], 'lr': [0.0005], 'stop_min_lr': -1.0, 'use_bmuf': False, 'skip_remainder_batch': False}, 'checkpoint': {'_name': None, 'save_dir': '/home/nunomg/mt-hallucinations/HALO/fairseq/checkpoints/wmt18_de-en', 'restore_file': 'checkpoint_last.pt', 'finetune_from_model': None, 'reset_dataloader': False, 'reset_lr_scheduler': False, 'reset_meters': False, 'reset_optimizer': False, 'optimizer_overrides': '{}', 'save_interval': 1, 'save_interval_updates': 50000, 'keep_interval_updates': -1, 'keep_interval_updates_pattern': -1, 'keep_last_epochs': 10, 'keep_best_checkpoints': -1, 'no_save': False, 'no_epoch_checkpoints': False, 'no_last_checkpoints': False, 'no_save_optimizer_state': False, 'best_checkpoint_metric': 'bleu', 'maximize_best_checkpoint_metric': True, 'patience': -1, 'checkpoint_suffix': '', 'checkpoint_shard_count': 1, 'load_checkpoint_on_all_dp_ranks': False, 'write_checkpoints_asynchronously': False, 'model_parallel_size': 1}, 'bmuf': {'_name': None, 'block_lr': 1.0, 'block_momentum': 0.875, 'global_sync_iter': 50, 'warmup_iterations': 500, 'use_nbm': False, 'average_sync': False, 'distributed_world_size': 1}, 'generation': {'_name': None, 'beam': 5, 'nbest': 1, 'max_len_a': 0.0, 'max_len_b': 200, 'min_len': 1, 'match_source_len': False, 'unnormalized': False, 'no_early_stop': False, 'no_beamable_mm': False, 'lenpen': 1.0, 'unkpen': 0.0, 'replace_unk': None, 'sacrebleu': False, 'score_reference': False, 'prefix_size': 0, 'no_repeat_ngram_size': 0, 'sampling': False, 'sampling_topk': -1, 'sampling_topp': -1.0, 'constraints': None, 'temperature': 1.0, 'diverse_beam_groups': -1, 'diverse_beam_strength': 0.5, 'diversity_rate': -1.0, 'print_alignment': None, 'print_step': False, 'lm_path': None, 'lm_weight': 0.0, 'iter_decode_eos_penalty': 0.0, 'iter_decode_max_iter': 10, 'iter_decode_force_max_iter': False, 'iter_decode_with_beam': 1, 'iter_decode_with_external_reranker': False, 'retain_iter_history': False, 'retain_dropout': False, 'retain_dropout_modules': None, 'decoding_format': None, 'no_seed_provided': False}, 'eval_lm': {'_name': None, 'output_word_probs': False, 'output_word_stats': False, 'context_window': 0, 'softmax_batch': 9223372036854775807}, 'interactive': {'_name': None, 'buffer_size': 0, 'input': '-'}, 'model': Namespace(no_progress_bar=False, log_interval=10, log_format=None, log_file=None, tensorboard_logdir=None, wandb_project='mt-hallucinations', azureml_logging=False, seed=42, cpu=False, tpu=False, bf16=False, memory_efficient_bf16=False, fp16=False, memory_efficient_fp16=False, fp16_no_flatten_grads=False, fp16_init_scale=128, fp16_scale_window=None, fp16_scale_tolerance=0.0, on_cpu_convert_precision=False, min_loss_scale=0.0001, threshold_loss_scale=None, amp=False, amp_batch_retries=2, amp_init_scale=128, amp_scale_window=None, user_dir=None, empty_cache_freq=0, all_gather_list_size=16384, model_parallel_size=1, quantization_config_path=None, profile=False, reset_logging=False, suppress_crashes=False, use_plasma_view=False, plasma_path='/tmp/plasma', criterion='label_smoothed_cross_entropy', tokenizer=None, bpe='sentencepiece', optimizer='adam', lr_scheduler='inverse_sqrt', simul_type=None, scoring='bleu', task='translation', num_workers=1, skip_invalid_size_inputs_valid_test=False, max_tokens=8192, batch_size=None, required_batch_size_multiple=8, required_seq_len_multiple=1, dataset_impl=None, data_buffer_size=10, train_subset='train', valid_subset='valid', combine_valid_subsets=None, ignore_unused_valid_subsets=False, validate_interval=1, validate_interval_updates=0, validate_after_updates=0, fixed_validation_seed=None, disable_validation=False, max_tokens_valid=8192, batch_size_valid=None, max_valid_steps=None, curriculum=0, gen_subset='test', num_shards=1, shard_id=0, grouped_shuffling=False, update_epoch_batch_itr=False, update_ordered_indices_seed=False, distributed_world_size=1, distributed_num_procs=1, distributed_rank=0, distributed_backend='nccl', distributed_init_method=None, distributed_port=-1, device_id=0, distributed_no_spawn=False, ddp_backend='pytorch_ddp', ddp_comm_hook='none', bucket_cap_mb=25, fix_batches_to_gpus=False, find_unused_parameters=False, gradient_as_bucket_view=False, fast_stat_sync=False, heartbeat_timeout=-1, broadcast_buffers=False, slowmo_momentum=None, slowmo_base_algorithm='localsgd', localsgd_frequency=3, nprocs_per_node=1, pipeline_model_parallel=False, pipeline_balance=None, pipeline_devices=None, pipeline_chunks=0, pipeline_encoder_balance=None, pipeline_encoder_devices=None, pipeline_decoder_balance=None, pipeline_decoder_devices=None, pipeline_checkpoint='never', zero_sharding='none', no_reshard_after_forward=False, fp32_reduce_scatter=False, cpu_offload=False, use_sharded_state=False, not_fsdp_flatten_parameters=False, arch='transformer_wmt_en_de', max_epoch=0, max_update=250000, stop_time_hours=0, clip_norm=0.0, sentence_avg=False, update_freq=[4], lr=[0.0005], stop_min_lr=-1.0, use_bmuf=False, skip_remainder_batch=False, save_dir='/home/nunomg/mt-hallucinations/HALO/fairseq/checkpoints/wmt18_de-en', restore_file='checkpoint_last.pt', finetune_from_model=None, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, optimizer_overrides='{}', save_interval=1, save_interval_updates=50000, keep_interval_updates=-1, keep_interval_updates_pattern=-1, keep_last_epochs=10, keep_best_checkpoints=-1, no_save=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_save_optimizer_state=False, best_checkpoint_metric='bleu', maximize_best_checkpoint_metric=True, patience=-1, checkpoint_suffix='', checkpoint_shard_count=1, load_checkpoint_on_all_dp_ranks=False, write_checkpoints_asynchronously=False, store_ema=False, ema_decay=0.9999, ema_start_update=0, ema_seed_model=None, ema_update_freq=1, ema_fp32=False, data='/private/home/daviddale/dev/nllb/demo/alti/de-en-hallucinations/model/wmt18_de-en', source_lang='de', target_lang='en', load_alignments=False, left_pad_source=True, left_pad_target=False, upsample_primary=-1, truncate_source=False, num_batch_buckets=0, eval_bleu=True, eval_bleu_args='{\"beam\": 5, \"max_len_a\": 1.2, \"max_len_b\": 10}', eval_bleu_detok='space', eval_bleu_detok_args='{}', eval_tokenized_bleu=False, eval_bleu_remove_bpe='sentencepiece', eval_bleu_print_samples=True, label_smoothing=0.1, report_accuracy=False, ignore_prefix_size=0, adam_betas='(0.9, 0.98)', adam_eps=1e-08, weight_decay=0.0001, use_old_adam=False, fp16_adam_stats=False, warmup_updates=4000, warmup_init_lr=-1, pad=1, eos=2, unk=3, share_decoder_input_output_embed=True, dropout=0.3, no_seed_provided=False, encoder_embed_path=None, encoder_embed_dim=512, encoder_ffn_embed_dim=2048, encoder_layers=6, encoder_attention_heads=8, encoder_normalize_before=False, encoder_learned_pos=False, decoder_embed_path=None, decoder_embed_dim=512, decoder_ffn_embed_dim=2048, decoder_layers=6, decoder_attention_heads=8, decoder_normalize_before=False, decoder_learned_pos=False, attention_dropout=0.0, activation_dropout=0.0, activation_fn='relu', adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, share_all_embeddings=False, no_token_positional_embeddings=False, adaptive_input=False, no_cross_attention=False, cross_self_attention=False, decoder_output_dim=512, decoder_input_dim=512, no_scale_embedding=False, layernorm_embedding=False, tie_adaptive_weights=False, checkpoint_activations=False, offload_activations=False, encoder_layers_to_keep=None, decoder_layers_to_keep=None, encoder_layerdrop=0, decoder_layerdrop=0, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, _name='transformer_wmt_en_de', max_source_positions=1024, max_target_positions=1024, min_params_to_wrap=100000000, sentencepiece_model='../model/sentencepiece.joint.bpe.model'), 'task': {'_name': 'translation', 'data': '/private/home/daviddale/dev/nllb/demo/alti/de-en-hallucinations/model/wmt18_de-en', 'source_lang': 'de', 'target_lang': 'en', 'load_alignments': False, 'left_pad_source': True, 'left_pad_target': False, 'max_source_positions': 1024, 'max_target_positions': 1024, 'upsample_primary': -1, 'truncate_source': False, 'num_batch_buckets': 0, 'train_subset': 'train', 'dataset_impl': None, 'required_seq_len_multiple': 1, 'eval_bleu': True, 'eval_bleu_args': '{\"beam\": 5, \"max_len_a\": 1.2, \"max_len_b\": 10}', 'eval_bleu_detok': 'space', 'eval_bleu_detok_args': '{}', 'eval_tokenized_bleu': False, 'eval_bleu_remove_bpe': 'sentencepiece', 'eval_bleu_print_samples': True}, 'criterion': {'_name': 'label_smoothed_cross_entropy', 'label_smoothing': 0.1, 'report_accuracy': False, 'ignore_prefix_size': 0, 'sentence_avg': False}, 'optimizer': {'_name': 'adam', 'adam_betas': '(0.9, 0.98)', 'adam_eps': 1e-08, 'weight_decay': 0.0001, 'use_old_adam': False, 'fp16_adam_stats': False, 'tpu': False, 'lr': [0.0005]}, 'lr_scheduler': {'_name': 'inverse_sqrt', 'warmup_updates': 4000, 'warmup_init_lr': -1.0, 'lr': [0.0005]}, 'scoring': {'_name': 'bleu', 'pad': 1, 'eos': 2, 'unk': 3}, 'bpe': {'_name': 'sentencepiece', 'sentencepiece_model': '../model/sentencepiece.joint.bpe.model', 'sentencepiece_enable_sampling': False, 'sentencepiece_alpha': None}, 'tokenizer': None, 'ema': {'_name': None, 'store_ema': False, 'ema_decay': 0.9999, 'ema_start_update': 0, 'ema_seed_model': None, 'ema_update_freq': 1, 'ema_fp32': False}, 'simul_type': None}\n"
     ]
    }
   ],
   "source": [
    "de2en = TransformerModel.from_pretrained(\n",
    "    MODEL_DIR,\n",
    "    checkpoint_file='checkpoint_best.pt',\n",
    "    data_name_or_path=DATA_DIR,\n",
    "    bpe='sentencepiece', \n",
    "    sentencepiece_model=MODEL_DIR + '/sentencepiece.joint.bpe.model'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "a3e21e46",
   "metadata": {},
   "outputs": [],
   "source": [
    "de2en.cuda();"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "d9998579",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Diverse translations of the data sample, key: list of lists of translation hypotheses.\n",
    "smpl_diverse = {}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c8844cc",
   "metadata": {},
   "source": [
    "### Baseline translation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "720f11f4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "cdada40eea644ce984b15f7d52f85ab4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "new_tran = [de2en.translate(t, beam=5) for t in tqdm(smpl.src)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "220f2cd4",
   "metadata": {},
   "outputs": [],
   "source": [
    "key = 'default'\n",
    "smpl_diverse[key] = [[mt] for mt in new_tran]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4fd965a0",
   "metadata": {},
   "source": [
    "### Random sampling"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "d3080940",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1d6f56be9ab14803af6810e6214ec92b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'sampling'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    for _ in range(k):\n",
    "        batched_hypos = de2en.generate(enc, sampling=True, beam=1)\n",
    "        out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "        options.append(out_texts[0])\n",
    "    smpl_diverse[key].append(options)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "22625d49",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "41a865c63c1a44ffa11c525652ae8ef8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'sampling_p08'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    for _ in range(k):\n",
    "        batched_hypos = de2en.generate(enc, sampling=True, sampling_topp=0.8, beam=1)\n",
    "        out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "        options.append(out_texts[0])\n",
    "    smpl_diverse[key].append(options)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "99b86d9e",
   "metadata": {},
   "source": [
    "### Beam search"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "0c474156",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1bc1d8bd1b5a4f06a18ff0dea2cbe3ad",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_search'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "ffab7486",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "660ae98e08d441068ebffd7cec8a11f3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/private/home/daviddale/dev/fairseq/fairseq/search.py:809: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').\n",
      "  final_beams = final_indices // k\n"
     ]
    }
   ],
   "source": [
    "key = 'beam_diversity_1'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diversity_rate=1.0)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "b9dd6af4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "54047afcc97340c3824e2e2338bfc2ed",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_diversity_3'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diversity_rate=3.0)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "5eb1da47",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "823cfa2b80a94228a964f1a53dfb35cf",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_diversity_10'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diversity_rate=10.0)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "8626d0b9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b8fdcdea6e8248e68dcbe178d41cfb53",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_dbs_1'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diverse_beam_groups=k, diverse_beam_strength=1)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "4419449a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e68ec827468a45bb8bfb3b4bef389a66",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_dbs_3'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diverse_beam_groups=k, diverse_beam_strength=3)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "a53d7e97",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "35d9c053ccb74354a341d1e18b004088",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_dbs_10'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    batched_hypos = de2en.generate(enc, beam=k, diverse_beam_groups=k, diverse_beam_strength=10)\n",
    "    out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "    smpl_diverse[key].append(out_texts)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "552fc569",
   "metadata": {},
   "source": [
    "### Dropout methods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "c19e0986",
   "metadata": {},
   "outputs": [],
   "source": [
    "for mn, m in de2en.named_modules():  # an easy way to randomize the model!\n",
    "    if 'dropout' in mn:\n",
    "        m.apply_during_inference = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "f894bd4b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "887378e2728449b9b1954475f85b8c04",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'beam_dropout'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    for _ in range(k):\n",
    "        batched_hypos = de2en.generate(enc, beam=k, retain_dropout=True)\n",
    "        out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "        options.append(out_texts[0])\n",
    "    smpl_diverse[key].append(options)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "02c0f2ec",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "126d39efa3f34d4eb6737c7cd7ad4940",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/400 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "key = 'greedy_dropout'\n",
    "smpl_diverse[key] = []\n",
    "for text in tqdm(smpl.src):\n",
    "    options = []\n",
    "    enc = [de2en.encode(text)]\n",
    "    for _ in range(k):\n",
    "        batched_hypos = de2en.generate(enc, beam=1, retain_dropout=True)\n",
    "        out_texts = [de2en.decode(h['tokens']) for h in batched_hypos[0]]\n",
    "        options.append(out_texts[0])\n",
    "    smpl_diverse[key].append(options)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "db03865d",
   "metadata": {},
   "outputs": [],
   "source": [
    "for mn, m in de2en.named_modules():\n",
    "    if 'dropout' in mn:\n",
    "        m.apply_during_inference = False"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4d29198f",
   "metadata": {},
   "source": [
    "# Scoring the hypotheses"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cfc7be4c",
   "metadata": {},
   "source": [
    "Here is the mean quality of outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "44b815f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "hypotheses_scores = {}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c05b372a",
   "metadata": {},
   "source": [
    "### By LABSE scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "1d0ecc09",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Load pretrained SentenceTransformer: sentence-transformers/LaBSE\n",
      "Use pytorch device: cuda\n"
     ]
    }
   ],
   "source": [
    "labse = SentenceTransformer('sentence-transformers/LaBSE')\n",
    "labse.cuda();"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "77a1f0eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def score_pair(src, trg):\n",
    "    embs = labse.encode([src, trg], show_progress_bar=False)\n",
    "    return embs[0].dot(embs[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "86ae3384",
   "metadata": {},
   "outputs": [],
   "source": [
    "def argmax(values, criterion):\n",
    "    best = -np.infty\n",
    "    candidate = None\n",
    "    for v in values:\n",
    "        score = criterion(v)\n",
    "        if score > best:\n",
    "            best = score\n",
    "            candidate = v\n",
    "    return candidate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "47e017d0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fe4530ec3ba24ab7b6d6cf4b69c5476d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "hypotheses_scores['LABSE'] = {\n",
    "    k: [[score_pair(x, smpl.iloc[i].src) for x in hyps] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "7f058c1c",
   "metadata": {},
   "outputs": [],
   "source": [
    "labse.to('cpu')\n",
    "cleanup();"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5f6f8f1b",
   "metadata": {},
   "source": [
    "### By COMET-QE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "b289e757",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "wmt20-comet-qe-da-v2 is already in cache.\n",
      "Created a temporary directory at /tmp/tmpfh1qnfdl\n",
      "Writing /tmp/tmpfh1qnfdl/_remote_module_non_scriptable.py\n",
      "Some weights of the model checkpoint at xlm-roberta-large were not used when initializing XLMRobertaModel: ['lm_head.layer_norm.weight', 'lm_head.bias', 'lm_head.decoder.weight', 'roberta.pooler.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.dense.weight', 'roberta.pooler.dense.bias']\n",
      "- This IS expected if you are initializing XLMRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing XLMRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Encoder model frozen.\n"
     ]
    }
   ],
   "source": [
    "model_path = comet.download_model(\"wmt20-comet-qe-da-v2\")\n",
    "model = comet.load_from_checkpoint(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "c637c01c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: True, used: False\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/private/home/daviddale/.conda/envs/stopes/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1814: PossibleUserWarning: GPU available but not used. Set `accelerator` and `devices` using `Trainer(accelerator='gpu', devices=2)`.\n",
      "  rank_zero_warn(\n",
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00,  8.89it/s]\n",
      "GPU available: True, used: False\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/private/home/daviddale/.conda/envs/stopes/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1814: PossibleUserWarning: GPU available but not used. Set `accelerator` and `devices` using `Trainer(accelerator='gpu', devices=2)`.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.7208645939826965\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00,  9.07it/s]\n",
      "GPU available: True, used: False\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/private/home/daviddale/.conda/envs/stopes/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1814: PossibleUserWarning: GPU available but not used. Set `accelerator` and `devices` using `Trainer(accelerator='gpu', devices=2)`.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8370150923728943\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00,  9.00it/s]\n",
      "GPU available: True, used: False\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "/private/home/daviddale/.conda/envs/stopes/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1814: PossibleUserWarning: GPU available but not used. Set `accelerator` and `devices` using `Trainer(accelerator='gpu', devices=2)`.\n",
      "  rank_zero_warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-0.4274933338165283\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00,  8.93it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-0.10516911745071411\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def score_pair(src, trg):\n",
    "    seg_scores, sys_score = model.predict([{'src': src, 'mt': trg}], batch_size=8, gpus=0)\n",
    "    # with 0 gpus, this is actually faster\n",
    "    return seg_scores[0]\n",
    "\n",
    "print(score_pair('hallo Welt', 'hello world'))\n",
    "print(score_pair('hello world', 'hallo Welt'))\n",
    "print(score_pair('hallo Welt', 'halo over my head'))\n",
    "print(score_pair('halo over my head', 'hallo Welt'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "f03336af",
   "metadata": {},
   "outputs": [],
   "source": [
    "def score_pairs(src, trg, batch_size=8, gpus=0):\n",
    "    seg_scores, sys_score = model.predict(\n",
    "        [{'src': s, 'mt': t} for s, t in zip(src, trg)], \n",
    "        batch_size=batch_size, \n",
    "        gpus=gpus,\n",
    "    )\n",
    "    # with 0 gpus, this is actually faster\n",
    "    return seg_scores\n",
    "\n",
    "\n",
    "def get_scores_batched(hyp_sets, sources, **kwargs):\n",
    "    srcs = []\n",
    "    tgts = []\n",
    "    ids = []\n",
    "    for i, (hyp_set, src) in enumerate(zip(hyp_sets, sources)):\n",
    "        id_old = len(srcs)\n",
    "        for mt in hyp_set:\n",
    "            srcs.append(src)\n",
    "            tgts.append(mt)\n",
    "        ids.append((id_old, len(srcs)))\n",
    "    scores = score_pairs(srcs, tgts, **kwargs)\n",
    "    results = []\n",
    "    for i, (start, end) in enumerate(ids):\n",
    "        results.append(scores[start:end])\n",
    "    return results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "484c13f4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6c8bf25bcdaa4d659acab8d7d07c20ea",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   2%|███▋                                                                                                                                                                                    | 1/50 [00:00<00:02, 17.84it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [00:03<00:00, 13.18it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:40, 12.37it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:38<00:00, 12.92it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:29, 16.71it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:33<00:00, 15.08it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:27, 18.13it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:31<00:00, 15.92it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:28, 17.22it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:30<00:00, 16.29it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:35, 14.04it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:30<00:00, 16.34it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:28, 17.54it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:31<00:00, 16.04it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:31<00:00, 15.94it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:28, 17.57it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:29<00:00, 16.67it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:27, 17.84it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:29<00:00, 16.84it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:30, 16.11it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:30<00:00, 16.35it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:27, 18.02it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:30<00:00, 16.44it/s]\n",
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|▎                                                                                                                                                                                      | 1/500 [00:00<00:27, 17.91it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 500/500 [00:31<00:00, 15.93it/s]\n"
     ]
    }
   ],
   "source": [
    "hypotheses_scores['COMET-QE'] = {\n",
    "    k: get_scores_batched(vs, smpl.src.tolist(), gpus=1)\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "27bc64df",
   "metadata": {},
   "source": [
    "### By LASER2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "66bb8289",
   "metadata": {},
   "outputs": [],
   "source": [
    "spm_tokenizer = spm.SentencePieceProcessor()\n",
    "spm_tokenizer.Load(LASER_DIR + '/laser2.spm')\n",
    "\n",
    "laser_encoder = SentenceEncoder(\n",
    "    LASER_DIR + '/laser2.pt',\n",
    "    max_sentences=None,\n",
    "    max_tokens=None,\n",
    "    spm_vocab=LASER_DIR + '/laser2.pt',\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "5a8d99ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "def encode_sents(sents):\n",
    "    tokenized_sents = [\n",
    "        \" \".join(spm_tokenizer.EncodeAsPieces(sent))\n",
    "        for sent in sents\n",
    "    ]\n",
    "    emb = laser_encoder.encode_sentences(tokenized_sents)\n",
    "    return emb / ((emb**2).sum(1, keepdims=True) ** 0.5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "2930761d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8148769\n",
      "0.67164296\n"
     ]
    }
   ],
   "source": [
    "def score_pair(src, trg):\n",
    "    embs = encode_sents([src, trg])\n",
    "    return embs[0].dot(embs[1])\n",
    "\n",
    "print(score_pair('hallo Welt', 'hello world'))\n",
    "print(score_pair('hallo Welt', 'halo over my hed'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "b881041f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "4edb73be767443d58742fc3026931491",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "hypotheses_scores['LASER2'] = {\n",
    "    k: [[score_pair(x, smpl.iloc[i].src) for x in hyps] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "6b3c9652",
   "metadata": {},
   "outputs": [],
   "source": [
    "laser_encoder.encoder.to('cpu');\n",
    "cleanup();"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cf354e79",
   "metadata": {},
   "source": [
    "### By ALTI+ "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "a2718f8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "hub = FairseqTransformerHub(cfg=de2en.cfg, models=de2en.models, task=de2en.task)\n",
    "hub.cuda();"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "42864053",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.73475236\n",
      "0.4881617\n"
     ]
    }
   ],
   "source": [
    "def score_pair(src, trg):\n",
    "    with torch.inference_mode():\n",
    "        alti = compute_alti_nllb(hub, src, trg)\n",
    "    scores = compute_alti_metrics(*alti)\n",
    "    return scores['avg_sc']\n",
    "\n",
    "print(score_pair('hallo Welt', 'hello world'))\n",
    "print(score_pair('hallo Welt', 'halo over my head'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "fe313bef",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7cee3a4125604961802ca16b1cc83dec",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "hypotheses_scores['ALTI_avg_sc'] = {\n",
    "    k: [[score_pair(x, smpl.iloc[i].src) for x in hyps] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "a36cccec",
   "metadata": {},
   "outputs": [],
   "source": [
    "hub.to('cpu');\n",
    "cleanup()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5b866ee4",
   "metadata": {},
   "source": [
    "### By XNLI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "a8569208",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at joeddav/xlm-roberta-large-xnli were not used when initializing XLMRobertaForSequenceClassification: ['roberta.pooler.dense.weight', 'roberta.pooler.dense.bias']\n",
      "- This IS expected if you are initializing XLMRobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing XLMRobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "mname = 'joeddav/xlm-roberta-large-xnli'\n",
    "\n",
    "model = AutoModelForSequenceClassification.from_pretrained(mname).cuda()\n",
    "tokenizer = AutoTokenizer.from_pretrained(mname)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "22e78f83",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_clf_scores(texts1, texts2, batch_size=32, label='entailment', verbose=True):\n",
    "    scores = []\n",
    "    t = trange if verbose else range\n",
    "    for i in t(0, len(texts1), batch_size):\n",
    "        xx, yy = texts1[i:i+batch_size], texts2[i:i+batch_size]\n",
    "        with torch.inference_mode():\n",
    "            inputs = tokenizer(xx, yy, truncation=True, padding=True, return_tensors='pt').to(model.device)\n",
    "            proba = torch.softmax(model(**inputs).logits, -1)[:, model.config.label2id[label]].cpu().numpy()\n",
    "        scores.append(proba)\n",
    "    scores = np.concatenate(scores)\n",
    "    return scores\n",
    "\n",
    "def get_nli_scores(texts1, texts2, verbose=True):\n",
    "    return get_clf_scores(texts1, texts2, verbose=verbose) * get_clf_scores(texts2, texts1, verbose=verbose)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "f21c9455",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.99302286\n",
      "0.049959093\n"
     ]
    }
   ],
   "source": [
    "def score_pair(src, trg):\n",
    "    return get_nli_scores([src], [trg], verbose=False)[0]\n",
    "\n",
    "print(score_pair('hallo Welt', 'hello world'))\n",
    "print(score_pair('hallo Welt', 'halo over my head'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "97f75cce",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c634b4ef5bb64cb99aef1181f7b64e4c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "hypotheses_scores['XNLI'] = {\n",
    "    k: [[score_pair(x, smpl.iloc[i].src) for x in hyps] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "f795037a",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.to('cpu')\n",
    "cleanup();"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "816460ca",
   "metadata": {},
   "source": [
    "#### By ref-ChrF++ (oracle)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "5693f2bd",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "0cb3e0eeb48d4316a0c71cc24cd29db6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "chrfpp = CHRF(word_order=2)\n",
    "\n",
    "hypotheses_scores['ref_chrf'] = {\n",
    "    k: [[chrfpp.sentence_score(x, [smpl.iloc[i].ref]).score for x in hyps] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6d533c8f",
   "metadata": {},
   "source": [
    "### Now compute the selections based on the hypotheses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "2fc0f795",
   "metadata": {},
   "outputs": [],
   "source": [
    "selections = {\n",
    "    score_method: {\n",
    "        gen_method: [\n",
    "            hyps[np.argmax(hypotheses_scores[score_method][gen_method][i])]\n",
    "            for i, hyps in enumerate(hyps_list)\n",
    "        ]\n",
    "        for gen_method, hyps_list in smpl_diverse.items()\n",
    "    } \n",
    "    for score_method in hypotheses_scores\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3af77d72",
   "metadata": {},
   "source": [
    "### The reference"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "4cf58654",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "becd5a2ebbee41a090c6e22c47c22ad8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "selections['ref'] = {\n",
    "    k: smpl.ref.tolist()\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1092424a",
   "metadata": {},
   "source": [
    "### By default "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "id": "2fcd233f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f3b4e2b0117d4abfb34b82d77482b974",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "selections['first'] = {\n",
    "    k: [hyps[0] for i, hyps in enumerate(vs)]\n",
    "    for k, vs in tqdm(smpl_diverse.items(), total=len(smpl_diverse))\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "45dd0899",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "400"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(selections['first']['sampling'])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "933d629c",
   "metadata": {},
   "source": [
    "the baseline (default translation) corresponds to taking the first hypothesis from beam search."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "089cdb6e",
   "metadata": {},
   "source": [
    "# Evaluate the selections"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18fd342e",
   "metadata": {},
   "source": [
    "### src-NLI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "id": "72d3aeb1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at joeddav/xlm-roberta-large-xnli were not used when initializing XLMRobertaForSequenceClassification: ['roberta.pooler.dense.weight', 'roberta.pooler.dense.bias']\n",
      "- This IS expected if you are initializing XLMRobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing XLMRobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "mname = 'joeddav/xlm-roberta-large-xnli'\n",
    "\n",
    "model = AutoModelForSequenceClassification.from_pretrained(mname).cuda()\n",
    "tokenizer = AutoTokenizer.from_pretrained(mname)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "c191595c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_clf_scores(texts1, texts2, batch_size=32, label='entailment', verbose=True):\n",
    "    scores = []\n",
    "    t = trange if verbose else range\n",
    "    for i in t(0, len(texts1), batch_size):\n",
    "        xx, yy = texts1[i:i+batch_size], texts2[i:i+batch_size]\n",
    "        with torch.inference_mode():\n",
    "            inputs = tokenizer(xx, yy, truncation=True, padding=True, return_tensors='pt').to(model.device)\n",
    "            proba = torch.softmax(model(**inputs).logits, -1)[:, model.config.label2id[label]].cpu().numpy()\n",
    "        scores.append(proba)\n",
    "    scores = np.concatenate(scores)\n",
    "    return scores\n",
    "\n",
    "def get_nli_scores(texts1, texts2, verbose=True):\n",
    "    return get_clf_scores(texts1, texts2, verbose=verbose) * get_clf_scores(texts2, texts1, verbose=verbose)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "19a64b51",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a9f93f468ec84f89be70d1704eef37d7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "sel_src_nli_raw = {\n",
    "    selector: {\n",
    "        sampler: get_nli_scores(sampled, smpl.src.tolist(), verbose=False).tolist()#.mean()\n",
    "        for sampler, sampled in by_sampler.items()\n",
    "    }\n",
    "    for selector, by_sampler in tqdm(selections.items())\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "2a42c543",
   "metadata": {},
   "outputs": [],
   "source": [
    "sel_src_nli = {k1: {k2: np.mean(v2) for k2, v2 in v1.items()} for k1, v1 in sel_src_nli_raw.items()}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "id": "f7977605",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "69b8e0baf4834b4689666babc4c72745",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/13 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6c3f1e7ddd00404c9082aa7848f9b3be",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/13 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "0.512619"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bl_nli = get_nli_scores(smpl.mt.tolist(), smpl.src.tolist())\n",
    "bl_nli.mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7e57d0f3",
   "metadata": {},
   "source": [
    "### src-ref-COMET"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "ba98b88b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "wmt20-comet-da is already in cache.\n",
      "Some weights of the model checkpoint at xlm-roberta-large were not used when initializing XLMRobertaModel: ['lm_head.layer_norm.weight', 'lm_head.bias', 'lm_head.decoder.weight', 'roberta.pooler.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.dense.weight', 'roberta.pooler.dense.bias']\n",
      "- This IS expected if you are initializing XLMRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing XLMRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Encoder model frozen.\n"
     ]
    }
   ],
   "source": [
    "model_path = comet.download_model(\"wmt20-comet-da\")\n",
    "model = comet.load_from_checkpoint(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "f571e691",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['default', 'sampling', 'sampling_p08', 'beam_search', 'beam_diversity_1', 'beam_diversity_3', 'beam_diversity_10', 'beam_dbs_1', 'beam_dbs_3', 'beam_dbs_10', 'beam_dropout', 'greedy_dropout'])"
      ]
     },
     "execution_count": 85,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "smpl_diverse.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "id": "5295403a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(82800, 3)\n",
      "(30117, 3)\n"
     ]
    }
   ],
   "source": [
    "data_for_comet = pd.DataFrame([\n",
    "    {'mt': hyp, 'src': smpl.src.iloc[i], 'ref': smpl.ref.iloc[i]}\n",
    "    for gen, by_gen in smpl_diverse.items()\n",
    "    for i, hyps in enumerate(by_gen)\n",
    "    for hyp in hyps\n",
    "] + [\n",
    "    {'mt': mt, 'src': smpl.src.iloc[i], 'ref': smpl.ref.iloc[i]}\n",
    "    for selector, by_sampler in selections.items()\n",
    "    for sampler, sampled in by_sampler.items()\n",
    "    for i, mt in enumerate(sampled)\n",
    "])\n",
    "print(data_for_comet.shape)\n",
    "data_for_comet = data_for_comet.drop_duplicates().reset_index(drop=True)\n",
    "print(data_for_comet.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "49031c4d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "GPU available: True, used: True\n",
      "TPU available: False, using: 0 TPU cores\n",
      "IPU available: False, using: 0 IPUs\n",
      "HPU available: False, using: 0 HPUs\n",
      "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n",
      "Predicting: 0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0:   0%|                                                                                                                                                                                               | 0/942 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
      "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 942/942 [05:11<00:00,  3.03it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "To disable this warning, you can either:\n",
      "\t- Avoid using `tokenizers` before the fork if possible\n",
      "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Predicting DataLoader 0: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 942/942 [05:11<00:00,  3.02it/s]\n"
     ]
    }
   ],
   "source": [
    "seg_scores_comet_ref, sys_score_comet_ref = model.predict(\n",
    "    data_for_comet.to_dict('records'), batch_size=32, gpus=1\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "d37301e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "texts2comet = {}\n",
    "for i, row in data_for_comet.iterrows():\n",
    "    texts2comet[(row.src, row.mt, row.ref)] =  seg_scores_comet_ref[i] "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2b7de843",
   "metadata": {},
   "source": [
    "Add scores for all the hypotheses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "id": "1dc52834",
   "metadata": {},
   "outputs": [],
   "source": [
    "hypotheses_scores['COMET'] = {\n",
    "    gen_method: [\n",
    "        [texts2comet[(smpl.src.iloc[i], hyp, smpl.ref.iloc[i])] for hyp in hyps]\n",
    "        for i, hyps in enumerate(by_gen)\n",
    "    ]\n",
    "    for gen_method, by_gen in smpl_diverse.items()\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "f5c67c2e",
   "metadata": {},
   "outputs": [],
   "source": [
    "for selector, by_sampler in selections.items():\n",
    "    for sampler, sampled in by_sampler.items():\n",
    "        for i, mt in enumerate(sampled):\n",
    "            _ = texts2comet[(smpl.src.iloc[i], mt, smpl.ref.iloc[i])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "id": "f04937ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "sel_comet_raw = {\n",
    "    selector: {\n",
    "        sampler: [\n",
    "            texts2comet[(smpl.src.iloc[i], mt, smpl.ref.iloc[i])] \n",
    "            for i, mt in enumerate(sampled)\n",
    "        ]\n",
    "        for sampler, sampled in by_sampler.items()\n",
    "    }\n",
    "    for selector, by_sampler in selections.items()\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "id": "d04ab5e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "sel_comet = {k1: {k2: np.mean(v2) for k2, v2 in v1.items()} for k1, v1 in sel_comet_raw.items()}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "id": "7282e676",
   "metadata": {},
   "outputs": [],
   "source": [
    "sel_comet = {k1: {k2: np.mean(v2) for k2, v2 in v1.items()} for k1, v1 in sel_comet_raw.items()}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6c20fb92",
   "metadata": {},
   "source": [
    "# Save the results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "id": "00c04650",
   "metadata": {},
   "outputs": [],
   "source": [
    "os.makedirs('../computed_data', exist_ok=True)\n",
    "with open('../computed_data/diverse-decoding-results.json', 'w') as f:\n",
    "    json.dump({\n",
    "        'data': smpl.to_dict(orient='records'),\n",
    "        'candidates': smpl_diverse,\n",
    "        'candidate_scores': {k1: {k2: \n",
    "                                  [[float(h) for h in hl] for hl in v2] \n",
    "                                  for k2, v2 in v1.items()\n",
    "                                 } for k1, v1 in hypotheses_scores.items()},\n",
    "        'selections': selections,\n",
    "        'nli_scores': sel_src_nli_raw, \n",
    "        'comet_scores': sel_comet_raw\n",
    "    }, f, ensure_ascii=False, indent=2)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9.12 64-bit ('stopes')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  },
  "vscode": {
   "interpreter": {
    "hash": "1ca3a0506e51af745c7bcd9c038acd1c8e798bbf80786ca4fe805f822747bfa6"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
