import torch.nn as nn
from transformers import BertModel, BertTokenizer
import torch
from transformers import ViTModel, ViTImageProcessor
from PIL import Image
from io import BytesIO
import requests
import torchvision.transforms as T

def get_transformer_model():
    model = BertModel.from_pretrained("bert-base-uncased")
    model.eval()
    return model.encoder.layer  # 返回 transformer encoder 层列表

def get_input_embedding(text, max_length=128, device="cpu"):
    """
    将文本转为合法的 Transformer 输入 embedding。

    参数:
        text (str): 输入文本
        max_length (int): 最大序列长度（默认128）
        device (str): 放置到的设备（默认 CPU）

    返回:
        torch.Tensor: [seq_len, hidden_dim] 形状的嵌入张量
    """
    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    model = BertModel.from_pretrained("bert-base-uncased")
    model.eval()
    model.to(device) 

    inputs = tokenizer(text, return_tensors="pt", padding="max_length", truncation=True, max_length=max_length)
    input_ids = inputs["input_ids"].to(device)

    with torch.no_grad():
        embeddings = model.embeddings(input_ids)  # [1, seq_len, hidden_dim]
    
    return embeddings.squeeze(0).to("cpu")  # → [seq_len, hidden_dim]

def get_image_tensor(image_path_or_url, device="cpu"):
    """图像预处理 + ViT embedding 层（替代 Hugging Face resize）"""
    processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224")
    model = ViTModel.from_pretrained("google/vit-base-patch16-224").to(device)
    model.eval()

    if image_path_or_url.startswith("http"):
        image = Image.open(BytesIO(requests.get(image_path_or_url).content)).convert("RGB")
    else:
        image = Image.open(image_path_or_url).convert("RGB")

    # 使用 torchvision resize，避免 transformers 中 reducing_gap 错误
    transform = T.Compose([
        T.Resize((224, 224)),
        T.ToTensor(),
        T.Normalize(mean=processor.image_mean, std=processor.image_std),
    ])
    image_tensor = transform(image).unsqueeze(0).to(device)  # [1, 3, 224, 224]

    with torch.no_grad():
        embeddings = model.embeddings(image_tensor)  # [1, 197, 768]

    return embeddings.squeeze(0).to("cpu"), model.encoder.layer  # [197, 768], List[ViTLayer], str