# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from model.base import BaseModel
from utils import MODELS
import io
import torch
from PIL import Image
from .utils.gme_inference import GmeQwen2VL


@MODELS.register_module("gme")
class GMEModel(BaseModel):
    MODE = "gme"

    def __init__(self, model_name, modal_type, **kwargs):
        super().__init__(model_name, **kwargs)
        self.model_path = kwargs.get('model_path')
        if not self.model_path:
            raise ValueError(f"GMEModel {model_name} must have path_or_dir")
        # In order to be compatible with single text, single image, text + image and other input modal types, we need to pass a parameter to specify the modal type
        assert modal_type in ["image", "text", "multimodal"]
        self.modal_type = modal_type
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = GmeQwen2VL(self.model_path)

    def infer(self, row_data):
        row_data = self._preprocess_row_data(row_data)
        text = row_data.get("text", None)
        frames = row_data.get("frames", None)
        if text is not None:
            if not isinstance(text, list):
                text = [text]
        if frames is not None:
            if not isinstance(frames, list):
                frames = [frames]
            frames = [Image.open(io.BytesIO(frame)) for frame in frames]
        embedding = self.model.get_fused_embeddings(texts=text, images=frames, show_progress_bar=False)
        embedding = embedding.squeeze(0).tolist()
        return {
            'embedding': embedding
        }
