# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.

from collections import Counter
from multiprocessing import Pool
import os

import torch

from fairseq.tokenizer import tokenize_line
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
from . import encoder


class BPEDictionary(object):
    """A mapping from symbols to consecutive integers"""

    def __init__(self, bpe_dict_path, pad='<|endoftext|>', eos='<|endoftext|>', unk='<|endoftext|>', bos='<|endoftext|>'):
        self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
        self.enc = encoder.get_encoder(bpe_dict_path)
        self.bos_index = self.enc.encoder[bos]
        self.pad_index = self.enc.encoder[pad]
        self.eos_index = self.enc.encoder[eos]
        self.unk_index = self.enc.encoder[unk]

    def __eq__(self, other):
        return self.enc.encoder == other.enc.encoder

    def __getitem__(self, idx):
        if idx < len(self.enc.decoder):
            return self.enc.decoder[idx]
        return self.unk_word

    def __len__(self):
        """Returns the number of symbols in the dictionary"""
        return len(self.enc.encoder)

    def index(self, sym):
        """Returns the index of the specified symbol"""
        if sym in self.enc.encoder:
            return self.enc.encoder[sym]
        return self.unk_index

    def string(self, tensor, bpe_symbol=None, escape_unk=False):
        """Helper for converting a tensor of token indices to a string.

        Can optionally remove BPE symbols or escape <unk> words.
        """
        if torch.is_tensor(tensor) and tensor.dim() == 2:
            return '\n'.join(self.string(t) for t in tensor)

        sent = self.enc.decode(tensor.tolist())
        return data_utils.process_bpe_symbol(sent, self.eos_word)

    def bos(self):
        """Helper to get index of beginning-of-sentence symbol"""
        return self.bos_index

    def pad(self):
        """Helper to get index of pad symbol"""
        return self.pad_index

    def eos(self):
        """Helper to get index of end-of-sentence symbol"""
        return self.eos_index

    def unk(self):
        """Helper to get index of unk symbol"""
        return self.unk_index

    def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True,
                    consumer=None, append_eos=True, reverse_order=False):
        idxs = self.enc.encode(line.strip())
        if reverse_order:
            idxs = list(reversed(idxs))
        nwords = len(idxs)
        ids = torch.IntTensor(nwords + 1 if append_eos else nwords)

        for i, idx in enumerate(idxs):
            word = self[idx]
            if consumer is not None:
                consumer(word, idx)
            ids[i] = idx
        if append_eos:
            ids[nwords] = self.eos_index
        return ids

