
from typing import List, Dict, Tuple

def preprocess(file_path: str, max_line_num=None) -> List[List[str]]:
    sequences_list = list()
    line_count = 0
    
    with open(file_path, 'r', encoding='utf-8') as file:
        while True:
            if (max_line_num is not None) and (line_count >= max_line_num):
                break
            
            # 逐行读取内容
            line_str = file.readline() 
            line_length = len(line_str)
           
            if line_length <= 0:
                break
            
            if line_length > 1:
                line_count += 1
                last_char = line_str[-2]
                
                if last_char == '.' or last_char == '?' or last_char == '!':
                    # 去掉末尾的标点符和换行符
                    line_str = line_str[:-2]
                else:
                    # 去掉末尾的换行符
                    line_str = line_str[:-1]
                    
                line_str = line_str.lower()
                words_list = line_str.split(' ')
                sequences_list.append(words_list)
            
    return sequences_list

def tokenize(sentence_str: str) -> List[str]:
    sentence_str = sentence_str.strip()
    sentence_str = sentence_str.lower()
    
    if len(sentence_str) > 0:
        last_char = sentence_str[-1]
    
        # 去掉末尾的标点符
        if last_char == '.' or last_char == '?' or last_char == '!':
            sentence_str = sentence_str[:-1]
    
    words_list = sentence_str.split(' ')
    
    return words_list
