
from typing import List, Dict, Tuple

EXCLUDE_LAST_CHARS_LIST = ['.', '?', '!', '。', '？', '！']

def preprocess_all(file_path: str) -> Tuple[List[List[str]], List[List[str]]]:
    with open(file_path, 'r', encoding='utf-8') as f:
        file_content = f.read()

    source_sequences_list = list()
    target_sequences_list = list()
    lines_list = file_content.split('\n')
    
    for line_str in lines_list:
        sentences_list = line_str.split('\t')
        
        if len(sentences_list) < 2:
            continue
        
        source_sentence = sentences_list[0]
        target_sentence = sentences_list[1]
        
        # 处理源序列
        source_words_list = tokenize_eng(source_sentence)
        source_sequences_list.append(source_words_list)
        
        # 处理目标序列
        target_words_list = tokenize_cmn(target_sentence)
        target_sequences_list.append(target_words_list)
    
    return source_sequences_list, target_sequences_list

def tokenize_eng(sentence_str: str) -> List[str]:
    sentence_str = sentence_str.strip()
    sentence_str = sentence_str.lower()
    
    if len(sentence_str) > 0:
        last_char = sentence_str[-1]
    
        # 去掉末尾的标点符
        if last_char in EXCLUDE_LAST_CHARS_LIST:
            sentence_str = sentence_str[:-1]
    
    words_list = sentence_str.split(' ')
    
    return words_list

def tokenize_cmn(sentence_str: str) -> List[str]:
    sentence_str = sentence_str.strip()
    sentence_str = sentence_str.lower()
    
    if len(sentence_str) > 0:
        last_char = sentence_str[-1]
    
        # 去掉末尾的标点符
        if last_char in EXCLUDE_LAST_CHARS_LIST:
            sentence_str = sentence_str[:-1]
    
    words_list = list(sentence_str)
    
    return words_list

