SIGLIP_PATH = '/data/models/siglip2-base-patch16-224' # 1 * 768
# siglip2-so400m-patch16-512/ 1 * 1152

import torch
from transformers.image_utils import load_image

def test_model():
    from transformers import SiglipConfig, SiglipModel


def test_txtmodel():
    from transformers import SiglipTextConfig, SiglipTextModel
    ## txt siglip
    ## Initializing a SiglipTextConfig with google/siglip-base-patch16-224 style configuration
    txt_config = SiglipTextConfig(name_or_path=SIGLIP_PATH)
    ## visual siglip
    sig_txt_model = SiglipTextModel(txt_config)

    sig_txt_model()

test_data = \
{'path': '/mnt/nas/shengjie/qdrant_data/images/RBTxCO_2021春夏_日本_外套_艺术字体_60563318_21252727.jpg', 
'description': 'long sleeves and a front zipper closure'}

def txt_token():
    # from transformers import AutoTokenizer

    # tokenizer = AutoTokenizer.from_pretrained(
    #     SIGLIP_PATH,  # 当前目录
    #     tokenizer_file="tokenizer.json",  # 显式指定文件
    #     config="config.json",
    #     local_files_only=True
    # )
    from transformers import AutoProcessor,AutoModel, AutoTokenizer
    model = AutoModel.from_pretrained(SIGLIP_PATH, device_map="cuda").eval()

    processor = AutoProcessor.from_pretrained(
        SIGLIP_PATH,  # 从本地目录加载
        device_map="cuda"
    )
    # tokenizer = AutoTokenizer.from_pretrained(
    #     SIGLIP_PATH,
    #     device_map="cuda"
    # )

    # 使用示例
    text_input = processor(text=test_data['description'], return_tensors="pt").to(model.device)
    # inputs = tokenizer([test_data['description']], padding="max_length", return_tensors="pt") # 一样的

    text_input2 = processor(text="green", return_tensors="pt").to(model.device)
    text_input3 = processor(text="", return_tensors="pt").to(model.device)
    image_input = processor(images=load_image( test_data['path'] ), return_tensors="pt").to(model.device)

    with torch.no_grad():
        txt_embeddings = model.get_text_features(**text_input)  # 1 1152  -0.005327973514795303
        txt_embeddings2 = model.get_text_features(**text_input2)  # 1 1152 0.006948389112949371
        txt_embeddings3 = model.get_text_features(**text_input3)  # 1 1152 0.006948389112949371
        img_embeddings = model.get_image_features(**image_input)  # 1 1152

    # 计算上面两个embedding的相似度
    import torch.nn.functional as F
    # txt_embeddings : 1 x d, img_embeddings : 1 x d
    similarity = F.cosine_similarity(txt_embeddings , img_embeddings)  # 图片匹配  -0.005327973514795303
    similarity2 = F.cosine_similarity(txt_embeddings , txt_embeddings2) # 不相关文本匹配 0.7877845764160156
    similarity3 = F.cosine_similarity(txt_embeddings , txt_embeddings3) # 空文本匹配 0.7586076259613037
    print("Cosine similarity between text and image embeddings:", similarity.item())
    print("Cosine similarity between text and image embeddings:", similarity2.item())
    print("Cosine similarity between text and image embeddings:", similarity3.item())


if __name__=='__main__':
    import argparse, os
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id(s) to use')
    args, _ = parser.parse_known_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda


    txt_token()