from collections import defaultdict, Counter

corpus = ["我喜欢吃苹果", "我喜欢吃香蕉", "我喜欢吃葡萄", "他不喜欢吃香蕉", "他喜欢吃苹果", "她喜欢吃草莓"]


# 文本转换为单个字符的列表
def tokenize(text):
    return [char for char in text]


# 计算N-Gram的词频
def count_ngram(corpus, n):
    ngrams_count = defaultdict(Counter)
    for text in corpus:
        tokens = tokenize(text)
        for i in range(len(tokens) - n + 1):
            ngram = tuple(tokens[i:i + n])
            prefix = ngram[:-1]
            token = ngram[-1]
            ngrams_count[prefix][token] += 1
    return ngrams_count


# 词频
bigram_counts = count_ngram(corpus, 2)
print("Bigram 词频: ")
for prefix, counts in bigram_counts.items():
    print("{}:{}".format("".join(prefix), dict(counts)))


# 计算N-Gram出现的概率
def ngram_probatilities(ngram_counts):
    ngram_probs = defaultdict(Counter)
    for prefix, tokens_count in ngram_counts.items():
        total_count = sum(tokens_count.values())
        for token, count in tokens_count.items():
            ngram_probs[prefix][token] = count / total_count
    return ngram_probs


# 出现的概率
bigram_probs = ngram_probatilities(bigram_counts)

print("\nbigram 出现的概率:")
for prefix, probs in bigram_probs.items():
    print("{}:{}".format("".join(prefix), dict(probs)))


# 定义生成下一个词的函数
def generate_next_token(prefix, ngram_probs):
    if not prefix in ngram_probs:
        return None
    next_token_probs = ngram_probs[prefix]
    next_token = max(next_token_probs, key=next_token_probs.get)
    return next_token


# 生成连续文本的函数
# ngram_probs 给定前缀的下一个词出现的概率字典
def generate_text(prefix, ngram_probs, n, length=6):
    tokens = list(prefix)
    print(tuple(tokens[-(n - 1):]))
    for _ in range(length - len(prefix)):
        next_token = generate_next_token(tuple(tokens[-(n - 1):]), ngram_probs)
        if not next_token:
            break
        tokens.append(next_token)
    return "".join(tokens)


generated_text = generate_text("我", bigram_probs, 2)
print("\n 生成的文本:", generated_text)
