from torch import device
from torch.cuda import is_available
from re import sub
from typing import Tuple, Dict, List

device = device('cuda' if is_available() else 'cpu')
SOS_token = 0  # start of sentence token
EOS_token = 1  # end of sentence token
MAX_LENGTH = 10
DataType = Tuple[Dict[str, int], Dict[int, str], int, Dict[str, int], Dict[int, str], int, List[List[str]]]

def str_normalize(s: str) -> str:
    """String normalization"""
    s = s.lower().strip()
    s = sub(r'([.!?])', r' \1', s)
    s = sub(r'[^a-zA-Z.!?]+', r' ', s)
    return s

def get_data(data_path: str, encoding: str='utf-8') -> DataType:
    """Convert strings from a TXT file into
    pairs of sentences in English and French

    Returns tuple of `en_word2idx`, `en_idx2word`, `en_word_n`,
    `fr_word2idx`, `fr_idx2word`, `fr_word_n`, `pairs`"""
    # read data file and cut into pairs
    with open(data_path, 'r', encoding=encoding) as f:
        lines = f.read().strip().split('\n')
    pairs = [[str_normalize(s) for s in l.split('\t')] for l in lines]

    # initialize dictionaries
    en_word2idx = {'SOS': 0, 'EOS': 1}
    fr_word2idx = {'SOS': 0, 'EOS': 1}
    # initialize word counts
    en_word_n = 2
    fr_word_n = 2

    # create dictionaries for English and French
    for pair in pairs:
        # English dictionary
        for word in pair[0].split(' '):
            if word not in en_word2idx:
                en_word2idx[word] = en_word_n
                en_word_n += 1

        # French dictionary
        for word in pair[1].split(' '):
            if word not in fr_word2idx:
                fr_word2idx[word] = fr_word_n
                fr_word_n += 1

    # create reverse dictionaries
    en_idx2word = {v: k for k, v in en_word2idx.items()}
    fr_idx2word = {v: k for k, v in fr_word2idx.items()}

    # return data
    return (
        en_word2idx,
        en_idx2word,
        en_word_n,
        fr_word2idx,
        fr_idx2word,
        fr_word_n,
        pairs
    )

if __name__ == '__main__':
    from rich import print

    data_path = '../data/en2fr.txt'
    _, _, en_word_n, _, _, fr_word_n, pairs = get_data(data_path)
    print(f'Number of pairs: {len(pairs)}')
    print(f'Number of English words: {en_word_n}')
    print(f'Number of French words: {fr_word_n}')
