import torch
from transformers import AutoModelForCausalLM
import os

# gpu_id = 0

# device = torch.device(f"cuda:{gpu_id}")
# os.environ["HIP_VISIBLE_DEVICES"] = str(gpu_id)
# torch.cuda.set_device(gpu_id)
# print(f"Using device: {device}")

def load_deqascore():
    # model = AutoModelForCausalLM.from_pretrained(
    #     "zhiyuanyou/DeQA-Score-Mix3",
    #     trust_remote_code=True,
    #     attn_implementation="eager",
    #     torch_dtype=torch.float16,
    #     device_map=None,
    # ).cuda()
    # model = AutoModelForCausalLM.from_pretrained(
    #     "zhiyuanyou/DeQA-Score-Mix3",
    #     trust_remote_code=True,
    #     attn_implementation="eager",
    #     torch_dtype=torch.float16,
    #     device_map=None,
    # ).to(device)
    model = AutoModelForCausalLM.from_pretrained(
        "zhiyuanyou/DeQA-Score-Mix3",
        trust_remote_code=True,
        attn_implementation="eager",
        torch_dtype=torch.float16,
        device_map="auto",  # 自动分配到多个 DCU 上
    )
    model.requires_grad_(False)
    
    @torch.no_grad()
    def compute_deqascore(images):
        score = model.score(images)
        score = score / 5
        score = [sc.item() for sc in score]
        return score

    return compute_deqascore