from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from typing import Optional

class HuggingFaceManager:
    
    def __init__(self, 
                 model_path: str, 
                 tokenizer_path: Optional[str]=None,
                 model_load_args: Optional[dict]=None,
                 tokenizer_args: Optional[dict]=None,
                 generate_args: Optional[dict]=None,
                 decode_args: Optional[dict]=None
                 ):
        self.model_path = model_path 
        self.tokenizer_path = tokenizer_path if tokenizer_path else model_path
        self.model_load_args = model_load_args
        self.tokenizer_args = tokenizer_args
        self.generate_args = generate_args
        self.decode_args = decode_args
        
        self.device: str = None
        
    def __call__(self, *args, **kwargs):
        return self.forward(*args, **kwargs)
    
    def init(self):
        if not self.model_load_args:
            self._init_model_load_args()
        
        if not self.tokenizer_args:
            self._init_tokenizer_args()
        
        if not self.generate_args:
            self._init_generate_args()
            
        if not self.decode_args:
            self._init_decode_args()
        
        self.model = AutoModelForCausalLM.from_pretrained(self.model_path,
                                                        **self.model_load_args)
        self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path,
                                                       **self.tokenizer_args)
        
        self.device = self.model.device
        
    def _init_model_load_args(self):
        self.model_load_args = dict(
            device_map='auto',
            torch_dtype=torch.bfloat16
        )
        
    def _init_tokenizer_args(self):
        self.tokenizer_args = dict(
            return_tensors="pt", 
            padding=True, 
            truncation=False
        )
        
    def _init_generate_args(self):
        self.generate_args = dict(
            max_new_tokens=200, 
            do_sample=True
        )
        
    def _init_decode_args(self):
        self.decode_args = dict(
            skip_special_tokens=False,
            )
        
    def set_model_load_args(self, new_args: dict):
        self.model_load_args = new_args 
        
    def set_tokenizer_args(self, new_args: dict):
        self.tokenizer_args = new_args 
        
    def set_generate_args(self, new_args: dict):
        self.generate_args = new_args 
        
    def set_decode_args(self, new_args: dict):
        self.decode_args = new_args 
        
    def forward(self, 
                prompts: list[str] | str):
        self.init()
        
        if isinstance(prompts, str):
            prompts = [prompts]
            
        model_inputs = self.tokenizer(prompts, 
                                      **self.tokenizer_args).to(self.device)
        with torch.no_grad():
            output_ids = self.model.generate(**model_inputs, 
                                             **self.generate_args)
        
        responses = [self.tokenizer.decode(output_ids[len(input_ids):], **self.decode_args) 
                     for input_ids, output_ids in zip(model_inputs['input_ids'],output_ids)]
        return responses
    
    generate = forward
        
if __name__ == '__main__':
    model_path = "/mnt/public/open_source_model/Qwen2.5/Qwen2.5-7B-Instruct"

    system = ''
    user = '帮我找一些上班摸鱼的vscode插件'
    data = f'<|im_start|>system\n{system}<|im_end|>\n<|im_start|>user\n{user}<|im_end|>\n<|im_start|>assistant'


    manager = HuggingFaceManager(model_path)
    print(manager(data))


