import collections
import re

def read_time_machine():
    with open("Time Machine.txt", 'r', encoding='utf-8') as f:
        lines = f.readlines()
    return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]

#lines = read_time_machine()
# print(len(lines))
# print(lines[30])

def tokensize(lines, token='word'):
    if token == 'word':
        return [line.split() for line in lines]
    elif token == 'char':
        return [list(line) for line in lines]

# tokens = tokensize(lines)
# for i in range(11):
#     print(tokens[i])

def count_corpus(tokens):
    """统计词元出现的次数"""
    if len(tokens) == 0 or isinstance(tokens[0], list): #检查tokens[0]是否为list
        tokens = [token for line in tokens for token in line]
    return collections.Counter(tokens)  #返回Counter对象

class Vocab:
    def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):#最小词频阈值和特殊标记
        if tokens is None:
            tokens = []
        if reserved_tokens is None:
            reserved_tokens = []
        counter = count_corpus(tokens)
        self.token_freqs = sorted(counter.items(), key = lambda x:x[1], reverse=True)
                                                    #x[1]代表取键值对的第二个值
        self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens#特殊标记的加上unknown
        uniq_tokens += [token for token, freq in self.token_freqs
                        if freq >= min_freq and token not in uniq_tokens]
        self.idx_to_token, self.token_to_idx = [], dict()   #给idx返回token和给token返回idx
        for token in uniq_tokens:
            self.idx_to_token.append(token)
            self.token_to_idx[token] = len(self.idx_to_token) - 1

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, tokens):
        if not isinstance(tokens, (list, tuple)):#是否为列表或者元组，否则为单个标记
            return self.token_to_idx.get(tokens, self.unk)#第二个参数是键不存在时的默认值
        return [self.__getitem__(token) for token in tokens]

    def to_tokens(self, indices):                   #与上面的函数互为反函数
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[idx] for idx in indices]


# lines = read_time_machine()
# tokens = tokensize(lines)
# vocab = Vocab(tokens)
# # print(list(vocab.token_to_idx.items())[:20])
# # [('<unk>', 0), ('the', 1), ('i', 2), ('and', 3), ('of', 4), ('a', 5),
# # ('to', 6), ('was', 7), ('in', 8), ('that', 9), ('my', 10), ('it', 11),
# # ('had', 12), ('me', 13), ('as', 14), ('at', 15), ('for', 16), ('with', 17),
# # ('time', 18), ('but', 19)]
#
# for i in [0, 10]:
#     print('words:', tokens[i])
#     print('indices:', vocab.__getitem__(tokens[i]))

def load_corpus_time_machine(max_tokens=-1):
    lines = read_time_machine()
    tokens = tokensize(lines, 'char')   #按字符串分词
    vocab = Vocab(tokens)
    corpus = [vocab[token] for lines in tokens for token in lines]#vocab[token]直接调用__getitem__
    if max_tokens > 0:
        corpus = corpus[:max_tokens]
    return corpus, vocab    #语料库大小和索引数量

corpus, vocab = load_corpus_time_machine()
# print(len(corpus), len(vocab))