import torch
import transformers
from configparser import ConfigParser
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import LlamaForCausalLM, LlamaTokenizer


# TODO 因为有加载模型checkpoint, 调用特别慢, 莫非模型没有训练?
class TranslateLlama():

    _instance = None

    _PATH_MODEL = None
    _tokenizer = None
    _model = None

    def __init__(self, path_config):
        # 创建ConfigParser对象
        config = ConfigParser()
        # 读取配置文件
        # config.read('./files/config.inf', encoding='utf-8')
        config.read(path_config, encoding='utf-8')
        # 获取default部分的api_key
        self._PATH_MODEL = config.get('default', 'llama3_path')
        print("llama3_path from:", self._PATH_MODEL)
        if self._tokenizer == None or self._model == None:
            # self._tokenizer = LlamaTokenizer.from_pretrained(self._PATH_MODEL)
            # self._model = LlamaForCausalLM.from_pretrained(self._PATH_MODEL)
            # 加载LLaMA tokenizer
            self._tokenizer = AutoTokenizer.from_pretrained(self._PATH_MODEL)
            # 加载LLaMA模型
            self._model = AutoModelForCausalLM.from_pretrained(self._PATH_MODEL)
            print("init model llama3")


    def __new__(cls, path_config):
        # print("__new__")
        if cls._instance is None:
            instance = super().__new__(cls)
            instance.__init__(path_config)  # 如果需要初始化操作
            cls._instance = instance
        return cls._instance


    def testLlama3Zh2En(self):
        # TODO 并不能翻译, 需要重新写, 前置需要把模型调用加快
        # 准备输入文本
        english_text = "青桔柠檬百香果茶"
        inputs = self._tokenizer(english_text, return_tensors="pt")
        # 使用模型进行翻译
        translated_text = self._model.generate(**inputs, max_length=128, num_beams=4, early_stopping=True)
        # 解码输出，移除特殊标记
        translated_output = self._tokenizer.decode(translated_text[0], skip_special_tokens=True)
        #
        print(f"英文原文: {english_text}")
        print(f"中文翻译: {translated_output}")


    def test(self):
        # 示例输入文本
        input_text = "Hello, world!"
        # 对输入文本进行tokenize
        input_ids = self._tokenizer.encode(input_text, return_tensors='pt')
        # 使用模型进行预测
        outputs = self._model.generate(input_ids)
        # 解码并打印输出
        print(self._tokenizer.decode(outputs[0], skip_special_tokens=True))


    def testPipe(self):
        # 使用模型
        pipeline = transformers.pipeline(
            # "text-generation",
            model=self._model,
            tokenizer=self._tokenizer,
            # torch_dtype=torch.float16,
            # device_map="auto",
        )
        # 获取结果
        sequences = pipeline(
            'I have tomatoes, basil and cheese at home. What can I cook for dinner?\n',
            do_sample=True,
            top_k=10,
            num_return_sequences=1,
            eos_token_id=self._tokenizer.eos_token_id,
            max_length=400,
        )
        # 输出结果
        for seq in sequences:
            print(f"{seq['generated_text']}")


if __name__ == '__main__':
    th1 = TranslateLlama('../files/config.inf')
    # th1.testLlama3Zh2En()
    th1.testPipe()