import os
import torch
import torch_npu
import acl
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor, AutoConfig
from transformers_stream_generator import init_stream_support
init_stream_support()

from peft import PeftModel
import argparse

from protect import hook_open, hook_import


def truncate_context(context, max_len, max_new_len):
    context_max_len = int(max_len - 50 - max_new_len)
    context_len = len(context)
    if(context_len <= context_max_len):
        return context
    else:
        return context[context_len-context_max_len : ]
    
def truncate_ocr(ocr, max_len, max_new_len):
    ocr_max_len = int(max_len - 50 - max_new_len)
    ocr_len = len(ocr)
    if(ocr_len <= ocr_max_len):
        return ocr
    else:
        return ocr[ : ocr_max_len]

class Server_Model():
    def __init__(self, args):
        ckpt = os.getenv('CKPT_MODEL_PATH',None)
        if(ckpt is None):
            ckpt = args.ckpt_path

        is_lora = os.getenv('IS_LORA',None)
        if(is_lora is None):
            is_lora = args.is_lora

        self.device = torch.device("npu")
        
        os.environ['MODEL_PATH'] = ckpt
        hook_open.install()
        hook_import.install()
        self.config = AutoConfig.from_pretrained(ckpt, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(ckpt, trust_remote_code=True, torch_dtype=torch.float16).eval()
        self.process = AutoProcessor.from_pretrained(ckpt, trust_remote_code=True)
        
        hook_open.uninstall()
        hook_import.uninstall()

        if(is_lora == True):
            self.model = PeftModel.from_pretrained(self.model, ckpt+'/lora')

        self.model = self.model.to(self.device)

        print('model is ready')

    def generate(self, image, question, context, deterministic, kwargs):
        if image is not None:
            try:
                image = Image.open(image).convert('RGB')
            except:
                return "输入图片文件有误"

        question = [question]
        context = [context]
        prompt = "###问题：\n{}\n\n###答案："

        inputs = self.process.custom_process(image, question, context, prompt)
        inputs = {k: v.to(self.device) for k, v in inputs.items()}

        if(deterministic):
            torch.manual_seed(42)
        output = self.model.generate(**inputs,**kwargs)
        output = self.process.batch_decode(output)

        # output =  [t.replace(' ','') for t in output]

        return output
    
    def generate_stream(self, image, question, context, deterministic, kwargs):
        acl.rt.set_device(0)
        if image is not None:
            try:
                image = Image.open(image).convert('RGB')
            except:
                return "输入图片文件有误"

        question = [question]
        context = [context]
        prompt = "###问题：\n{}\n\n###答案："

        inputs = self.process.custom_process(image, question, context, prompt)
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        kwargs['do_stream'] = True

        if(deterministic):
            torch.manual_seed(42)
        generator = self.model.generate(**inputs,**kwargs)

        all_token_list = []
        pre_words = ''
        for now_token in generator:
            all_token_list.extend(now_token.tolist())
            all_words = self.process.decode(all_token_list,
                                              skip_special_tokens=True
            )
            now_words = all_words.replace(pre_words, '')
            yield now_words
            pre_words = all_words

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Training")

    parser.add_argument("--ckpt-path", required=False, 
                        default='/home/mllm-enc/taichu_mllm_enc', help="模型文件地址")
    parser.add_argument("--is_lora", required=False, default=True, help="是否有Lora")
    args = parser.parse_args()

    kwarg = dict(
        do_sample = True,
        num_beams = 1,
        top_p = 0.9,
        temperature = 1,
        num_return_sequences = 1,
        repetition_penalty = 1.5,
        length_penalty = 1,
        max_new_tokens = 100,
    )

    picture_byte_stream = './server/merlion.png'

    service_object = Server_Model(args)

    generator_out = service_object.generate_stream(picture_byte_stream, '描述图片', '', True, kwarg)
    
    for word in generator_out:
        print(word)