from __future__ import annotations

import base64
import logging
import math
import os
import requests
from io import BytesIO
from typing import Dict, List, Optional

import torch
from PIL import Image
from torch import nn

from transformers import AutoModelForVision2Seq, AutoProcessor

class GmeQwen2VLWithHash(nn.Module):
    def __init__(
            self,
            model_name: str = "Alibaba-NLP/gme-Qwen2-VL-2B-Instruct",
            model_path: Optional[str] = None,
            min_image_tokens: int = 256,
            max_image_tokens: int = 352,
            max_length: int = 768,
            device: str = "cuda" if torch.cuda.is_available() else "cpu",
            hash_dim: int = 64,
            in_features: int = 1536,
            **kwargs,
    ) -> None:
        super().__init__()
        self.hash_dim = hash_dim
        self.normalize = True
        self.in_features = in_features
        self.device = device
        model_name = model_path or model_name


        self.gme_base = AutoModelForVision2Seq.from_pretrained(
            model_name,
            torch_dtype=torch.float32,
            output_hidden_states=True,
            return_dict=True,** kwargs
        ).to(self.device)

        min_pixels = min_image_tokens * 28 * 28
        max_pixels = max_image_tokens * 28 * 28

        self.processor = AutoProcessor.from_pretrained(
            model_name,
            min_pixels=min_pixels,
            max_pixels=max_pixels, **kwargs
        )

        self.processor.tokenizer.padding_side = 'right'
        self.default_instruction = 'You are a helpful assistant.'
        self.max_length = max_length

        self.lm_head_features = self.gme_base.lm_head.out_features

        self.hash_layer = nn.Sequential(
            nn.Linear(self.lm_head_features, self.in_features),
            nn.Linear(self.in_features, self.in_features),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(self.in_features, self.in_features),
            nn.ReLU(),
            nn.Linear(self.in_features, self.hash_dim),
            nn.Tanh()
        ).to(self.device)

    def get_image_embeddings(self, images: List[str | Image.Image]):
        input_images = []
        for img in images:
            if isinstance(img, str):
                img = fetch_image(img)
            input_images.append(img)

        input_texts = [
            f'<|im_start|>system\n{self.default_instruction}<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>\n<|im_start|>assistant\n<|endoftext|>'
            for _ in input_images
        ]

        inputs = self.processor(
            text=input_texts,
            images=input_images,
            padding=True,
            truncation=True,
            max_length=self.max_length,
            return_tensors='pt'
        )

        inputs = {k: v.to(self.device) for k, v in inputs.items()}

        with torch.set_grad_enabled(self.training):
            input_ids = inputs['input_ids']
            attention_mask = inputs['attention_mask']
            pixel_values = inputs.get('pixel_values')

            grid_thw = inputs.get('image_grid_thw', None)

            inputs_embeds = self.gme_base.model.get_input_embeddings()(input_ids)
            if pixel_values is not None:
                pixel_values = pixel_values.type(self.gme_base.visual.get_dtype())

                image_embeds = self.gme_base.visual(
                    pixel_values,
                    grid_thw=grid_thw
                ).to(inputs_embeds.device)
                image_mask = input_ids == self.gme_base.config.image_token_id
                inputs_embeds[image_mask] = image_embeds

            outputs = self.gme_base.model(
                input_ids=None,
                attention_mask=attention_mask,
                inputs_embeds=inputs_embeds
            )
            hidden_states = outputs.hidden_states[-1]


            sequence_lengths = attention_mask.sum(dim=1) - 1
            batch_size = hidden_states.shape[0]
            gme_emb = hidden_states[torch.arange(batch_size, device=hidden_states.device), sequence_lengths]

            if self.normalize:
                gme_emb = torch.nn.functional.normalize(gme_emb, p=2, dim=1)

        return gme_emb

    def forward(self, image):
        emb = self.get_image_embeddings(image)
        hash_code = self.hash_layer(emb)
        return emb, hash_code

IMAGE_FACTOR = 28
MIN_PIXELS = 4 * 28 * 28
MAX_PIXELS = 16384 * 28 * 28
MAX_RATIO = 200


def round_by_factor(number: int, factor: int):
    return round(number / factor) * factor


def ceil_by_factor(number: int, factor: int):
    return math.ceil(number / factor) * factor


def floor_by_factor(number: int, factor: int):
    return math.floor(number / factor) * factor


def smart_resize(
        height: int,
        width: int,
        factor: int = IMAGE_FACTOR,
        min_pixels: int = MIN_PIXELS,
        max_pixels: int = MAX_PIXELS
):
    h_bar = max(factor, round_by_factor(height, factor))
    w_bar = max(factor, round_by_factor(width, factor))

    if h_bar * w_bar > max_pixels:
        beta = math.sqrt((height * width) / max_pixels)
        h_bar = floor_by_factor(height / beta, factor)
        w_bar = floor_by_factor(width / beta, factor)
    elif h_bar * w_bar < min_pixels:
        beta = math.sqrt(min_pixels / (height * width))
        h_bar = ceil_by_factor(height * beta, factor)
        w_bar = ceil_by_factor(width * beta, factor)

    if max(h_bar, w_bar) / min(h_bar, w_bar) > MAX_RATIO:
        logging.warning(f"图像宽高比超过阈值{MAX_RATIO}，已自动调整")
        if h_bar > w_bar:
            h_bar = w_bar * MAX_RATIO
        else:
            w_bar = h_bar * MAX_RATIO

    return h_bar, w_bar


def fetch_image(image: str, size_factor: int = IMAGE_FACTOR):
    try:
        if image.startswith(("http://", "https://")):
            image_obj = Image.open(requests.get(image, stream=True, timeout=10).raw)
        elif image.startswith("file://"):
            image_obj = Image.open(image[7:])
        elif image.startswith("data:image"):
            _, base64_data = image.split("base64,", 1)
            image_obj = Image.open(BytesIO(base64.b64decode(base64_data)))
        else:  # 本地路径
            if not os.path.exists(image):
                raise FileNotFoundError(f"图像文件不存在: {image}")
            image_obj = Image.open(image)
    except Exception as e:
        logging.warning(f"加载图像失败: {e}，使用空白图像替代")
        image_obj = Image.new('RGB', (224, 224), color=(255, 255, 255))

    image_obj = image_obj.convert("RGB")
    width, height = image_obj.size
    resized_height, resized_width = smart_resize(height, width, factor=size_factor)
    return image_obj.resize((resized_width, resized_height))