
from typing import Tuple, List, Mapping
import torch
from tqdm import trange

def load_vocab(path):
    """Load vocab and build word dict.
    Args:
        path: vocab path.
    """
    word_to_ix = {'[PAD]': 0, '[UNK]': 1}
    with open(path, 'r', encoding='utf-8') as f:
        for word in f.readlines():
            word = word.rstrip("\n")
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)
    return word_to_ix


def create_dataset(seqs: List[List[str]],
                   tags: List[List[str]],
                   word_to_ix: Mapping[str, int],
                   max_seq_len: int,
                   pad_ix: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Convert List[str] -> torch.Tensor.
    Returns:
        seqs_tensor: shape=[num_seqs, max_seq_len].
        seqs_mask: shape=[num_seqs, max_seq_len].
        tags_tesnor: shape=[num_seqs, max_seq_len].
    """
    assert len(seqs) == len(tags)
    num_seqs = len(seqs)
    tags_tesnor = torch.ones(num_seqs, max_seq_len) * pad_ix
    for i in trange(num_seqs):

        for j, tag in enumerate(tags[i]):
            tags_tesnor[i, j] = word_to_ix.get(tag, word_to_ix['[UNK]'])
    return tags_tesnor.long()


def create_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
    """Convert mask to attention mask.
    """
    extended_attention_mask = raw_mask.unsqueeze(1).unsqueeze(2)
    extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
    return extended_attention_mask.float()


def create_transformer_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
    """Convert mask to transformer attention mask.
    """
    return (1 - raw_mask).bool()


if __name__ == '__main__':
    word_to_ix = load_vocab("")
    tags = create_dataset(tags,word_to_ix, 32, word_to_ix['[PAD]'])