from collections import defaultdict, Counter


class SimpleTokenizer:
    def __init__(self):
        # 词汇表token -> token_id的映射
        self.vocab = {}
        # 概率表 每个词后面跟其他词的概率
        # 格式{词:{下一个词:概率}}
        self.next_word_probs = {}

    def train(self, texts):
        """
        训练步骤
        1、收集所有的单词，建立词汇表 每个单词是一个token
        2、统计每个词后面跟的其他词出现的概率
        3、计算概率（某个词的出现次数/总次数）
        """
        all_words = set()
        for text in texts:
            all_words.update(text.split())
        print(f"Collected {all_words} unique tokens.")
        # 给每个单词分配一个ID（按字母 顺序排序）
        self.vocab = {word: idx for idx, word in enumerate(sorted(all_words))}
        # 统计每个词后面跟的其他词出现的次数
        word_pair_counts = defaultdict(Counter)
        for text in texts:
            words = text.split()
            # 遍历相邻的两个词
            for i in range(len(words) - 1):
                current_word = words[i]
                next_word = words[i + 1]
                # 计算current_word后面跟next_word的次数
                word_pair_counts[current_word][next_word] += 1
        # 计算概率
        for word, next_word_count in word_pair_counts.items():
            # 计算这个词后面跟其他词的总次数
            total_count = sum(next_word_count.values())
            # 计算每个下一个词的概率，出现次数/总次数
            self.next_word_probs[word] = {
                next_word: count / total_count
                for next_word, count in next_word_count.items()
            }
        print(f"Training completed:{self.next_word_probs}")

    def predict(self, current_word, temperature=1.0):
        """
        根据当前词预测下一个词
        """
        if current_word not in self.next_word_probs:
            return None
        """
        带temperature的采样
        temperature 越高，采样越随机，越低则越确定性
        """
        # next_words = list(self.next_word_probs[current_word].keys())
        # probs = [prob for prob in self.next_word_probs[current_word].values()]
        # # 简单的温度调整（这里只是一个示例，实际应用中可能需要更复杂的采样方法）
        # adjusted_probs = [p ** (1 / temperature) for p in probs]
        # total = sum(adjusted_probs)
        # adjusted_probs = [p / total for p in adjusted_probs]
        # # 选择下一个词
        # import random

        # next_word = random.choices(next_words, weights=adjusted_probs, k=1)[0]
        # return next_word

        """
        不带temperature的采样
        直接选择概率最高的词
        """

        # 获取这个词后面跟的其它词出现的概率
        probabilities = self.next_word_probs[current_word]
        # 按概率从高到底排序
        sorted_candicates = sorted(
            probabilities.items(), key=lambda x: x[1], reverse=True
        )
        print(sorted_candicates)
        most_likes_word = sorted_candicates[0][0]
        return most_likes_word


# 1.创建分词器
tokenizer = SimpleTokenizer()
# 2.准备训练数据
texts = [
    "hello world python good",
    "hello world",
    "hello world",
    "hello python",
    "hello python",
    "hello hello",
]
tokenizer.train(texts)
# print(tokenizer.vocab)

# for word, probs in tokenizer.next_word_probs.items():
#     print(f"{word}后面跟其它的token的概率是:")
#     for next_word, prob in sorted(probs.items(), key=lambda x: x[1], reverse=True):
#         print(f"    {next_word}:{prob*100:.2f}%")
start = "hello"
predicted_word = tokenizer.predict(start, temperature=0.5)
print(f"{start} 的下一个词是:{predicted_word}")
