import unittest

import inference
from train_tokenizer.train_tokenizer import main

class MyTestCase(unittest.TestCase):
    @unittest.skip("demo")
    def test_eg_min(self):
        print('demo')

    @unittest.skip("训练分词器")
    def test_train_tokenizer(self):
        main()

    @unittest.skip("inference.py_句子相似对比")
    def test_inference1(self):
        # 对比【1】通过词嵌入矩阵计算相似度
        word1 = "低维展开的智子"
        word2 = "宇宙的黑暗森林状态"
        vector1 = inference.get_word_vector(word1, inference.tokenizer, inference.word_embeddings)
        vector2 = inference.get_word_vector(word2, inference.tokenizer, inference.word_embeddings)
        similarity = inference.cosine_similarity(vector1, vector2)
        print(f"Embedding Layer Similarity: '{word1}' and '{word2}' is: {similarity:.4f}")

        # 对比【2】通过transformer最后一层的输出计算相似度
        sentence1 = "低维展开的智子"
        sentence2 = "宇宙的黑暗森林状态"
        # sentence2 = "时间简史主要讲了什么？"
        vector1 = inference.get_sentence_vector(sentence1, inference.tokenizer, inference.model)
        vector2 = inference.get_sentence_vector(sentence2, inference.tokenizer, inference.model)
        similarity = inference.cosine_similarity(vector1, vector2)
        print(f"\"{sentence1}\" vs \"{sentence2}\" \n Transformer Layer Similarity: {similarity:.4f}")

    # @unittest.skip("inference模仿textbook.py预测")
    def test_inference2(self):
        # Initiate from trained model
        # model =Model(max_token_value=vocab_size).to(device)
        # model.load_state_dict(torch.load('model/model-scifi.pt'))
        # model.eval()
        # start='奥特曼出生在一个小村庄'
        # start_ids =encode(start)
        # x=(torch,tensor(start_ids, dtype=torch.long, device=device)[None, ...])
        # y= model.generate(x,max_new_tokens=500)
        # print('--------')
        # print(decode(y[0].tolist()))
        print('--------')

if __name__ == '__main__':
    unittest.main()
