import os , traceback
import time

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
from utils.loggingConfig import get_logger


logger = get_logger('')
try:
    import torch
    import torch_npu

    if torch.npu.is_available() :
        devicemap = {"":"npu:0"}
        device = 'npu:0'
    elif torch.cuda.is_available() :
        devicemap = {"":"cuda:0"}
        device = 'cuda:0'
    else:
        devicemap = 'cpu'
        device = 'cpu'
except Exception as e :
    # traceback.print_exc()
    devicemap = 'cpu'
    device = 'cpu'

from transformers import AutoProcessor, AutoModelForImageTextToText

class stepFunModel():
    """ stepFunModel 模型 """
    _instance = None
    _initialized = False

    def __new__(cls):
        """
        单例模式：确保只创建一个实例
        """
        if cls._instance is None:
            cls._instance = super(stepFunModel, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        logger.info(f'####### stepFunModel.class.device_map:{devicemap}')
        logger.info(f'####### stepFunModel.class.device:{device}')
        """
        @param backbone_device:
        @param codec_device:
        """
        # 避免重复初始化
        if stepFunModel._initialized:
            return

        model_path = os.getenv('STEPFUN_MODEL_PATH')
        model = AutoModelForImageTextToText.from_pretrained(model_path, device_map=devicemap)
        processor = AutoProcessor.from_pretrained(model_path)

        self._model = model
        self._processor = processor

        stepFunModel._initialized = True

        logger.info(f'####### stepFunModel load end #######')

# 只加载一次
stepFunModel()

async def stepFunService(image_path):
    logger.info(f"######### stepFunService.stepFunService.image_path -> {image_path}")
    t1 = time.time()
    model = stepFunModel()
    inputs = model._processor(image_path, return_tensors="pt").to(device)
    generate_ids = model._model.generate(
        **inputs,
        do_sample=False,
        tokenizer=model._processor.tokenizer,
        stop_strings="<|im_end|>",
        max_new_tokens=4096,
    )

    orcmsg = model._processor.decode(generate_ids[0, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
    t2 = time.time()
    logger.info(f'############## stepFunService.stepFunService.ocr_parse_spend_time -> {str(t2 - t1)}')
    return orcmsg
