#
from typing import Dict
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from common.app_registry import AppRegistry as AR

class CpmEngine(object):
    def __init__(self):
        self.name = 'apps.cpm.cpm_engine.CpmEngine'

    @staticmethod
    def infer(query:str) -> str:
        torch.manual_seed(0)
        path = 'openbmb/MiniCPM4-8B'
        device = "cuda"
        if AR.tokenizer is None:
            AR.tokenizer = AutoTokenizer.from_pretrained(path)
        if AR.model is None:
            AR.model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16, device_map=device, trust_remote_code=True)
        # User can directly use the chat interface
        # responds, history = model.chat(tokenizer, "Write an article about Artificial Intelligence.", temperature=0.7, top_p=0.7)
        # print(responds)
        # User can also use the generate interface
        messages = [
            {"role": "user", "content": query},
        ]
        prompt_text = AR.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
        )
        model_inputs = AR.tokenizer([prompt_text], return_tensors="pt").to(device)

        model_outputs = AR.model.generate(
            **model_inputs,
            max_new_tokens=1024,
            top_p=0.7,
            temperature=0.7
        )
        output_token_ids = [
            model_outputs[i][len(model_inputs[i]):] for i in range(len(model_inputs['input_ids']))
        ]

        responses = AR.tokenizer.batch_decode(output_token_ids, skip_special_tokens=True)[0]
        return responses