import os

import torch
import warnings
warnings.simplefilter("ignore") # Stop spam of future warnings I'm seeing
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def clip_vit_b32():
    clip_path = '/data/models/clip-ViT-B-32'
    from sentence_transformers import SentenceTransformer
    from PIL import Image

    #Load CLIP model
    model = SentenceTransformer(clip_path,device='cuda')
    return model


def get_embedding(clip_model, text_or_imgpath):
    vector = clip_model.encode(text_or_imgpath)
    return vector


def test():

    test_data = \
        {'path': '/mnt/nas/shengjie/qdrant_data/images/RBTxCO_2021春夏_日本_外套_艺术字体_60563318_21252727.jpg', 
        'description': 'long sleeves and a front zipper closure'}

    import torch.nn.functional as F

    clip_model = clip_vit_b32()
    # text = ('This garment is a beige blazer with a structured design. \
    #         It features a single-breasted front with two buttons, a notched lapel, \
    #             and a breast pocket. The blazer has a clean, minimalist aesthetic with a \
    #             smooth texture and subtle herringbone pattern. It is suitable for fall or \
    #                 winter seasons due to its neutral tone and layered look. The cuffs are buttoned, \
    #                     the neckline is a classic notch, and it has two flap pockets. The blazer is made of \
    #                         a smooth fabric, likely a blend of materials such as cotton or polyester, providing \
    #                             both comfort and durability. The overall design is elegant and versatile, making it \
    #                                 appropriate for both professional and casual settings.')
    # text2 = 'red clothing'
    txt_emb = get_embedding(clip_model, test_data['description'])
    txt_emb2 = get_embedding(clip_model, '')
    img_emb = get_embedding(clip_model, test_data['path'])

    # 文本相关 txt = desc
    similarity = F.cosine_similarity(torch.tensor(txt_emb).unsqueeze_(0), 
                                    torch.tensor(img_emb).unsqueeze_(0))    
    similarity2 = F.cosine_similarity(torch.tensor(txt_emb).unsqueeze_(0), 
                                    torch.tensor(txt_emb2).unsqueeze_(0))    
    print("Cosine similarity between text and image embeddings:", similarity.item()) # 图片匹配 0.7332472801208496
    print("Cosine similarity between text and image embeddings:", similarity2.item())  # 空文本 0.7923544645309448
    
    # 文本不相关 txt != desc
    txt_emb3 = get_embedding(clip_model, 'red clothing')
    similarity = F.cosine_similarity(torch.tensor(txt_emb3).unsqueeze_(0), 
                                    torch.tensor(img_emb).unsqueeze_(0))    
    similarity2 = F.cosine_similarity(torch.tensor(txt_emb3).unsqueeze_(0),  
                                    torch.tensor(txt_emb2).unsqueeze_(0))   
    print("Cosine similarity between text and image embeddings:", similarity.item()) # 图片匹配 0.7167011499404907
    print("Cosine similarity between text and image embeddings:", similarity2.item()) # 空文本 0.7785323858261108

    # 此时 不相关txt cosim desc
    similarity3 = F.cosine_similarity(torch.tensor(txt_emb).unsqueeze_(0), 
                                    torch.tensor(txt_emb3).unsqueeze_(0))  
    print("Cosine similarity between text and image embeddings:", similarity3.item()) # 不相关文本 VS 相关文本 0.7571032643318176

def test2():
    # test_data = \
    #     {'path': '/mnt/nas/shengjie/qdrant_data/images/RBTxCO_2021春夏_日本_外套_艺术字体_60563318_21252727.jpg', 
    #     'description': 'long sleeves and a front zipper closure'}

    import torch.nn.functional as F

    clip_model = clip_vit_b32()
    txt_emb = get_embedding(clip_model, ['111' for _ in range(1000)])
    print(txt_emb.shape) #  list -> (2, 512) str -> (512,)
    

def test_empty_txt():
    desc_parequest_file = '/mnt/nas/shengjie/qdrant_data/resources_text/images_embeddings.parquet'
    img_parequest_file = '/mnt/nas/shengjie/qdrant_data/resources_text/images_embeddings.parquet'
    import pandas as pd
    import torch.nn.functional as F

    # 从 Parquet 文件读取
    df_desc = pd.read_parquet(desc_parequest_file)  # embeddings_path 是文件路径
    df_img = pd.read_parquet(img_parequest_file)  # embeddings_path 是文件路径

    # 检查数据
    # print(df.head())  # 查看前几行
    # print(df.dtypes)  # 检查列类型
    '''df.dtypes
    path             object
    width             int64
    height            int64
    area              int64
    aspect_ratio    float64
    description      object
    embedding        object
    dtype: object

    type(df["embedding"].iloc[0]) == <class 'numpy.ndarray'>
    df["embedding"].iloc[0].shape == (512,)
    '''

    empty_txt = ''
    clip_model = clip_vit_b32()
    empty_emb = get_embedding(clip_model, empty_txt)
    irrelevant_txt = 'human eat pig on menu'
    irrelevant_emb = get_embedding(clip_model, irrelevant_txt) # 512
    # 1 分别 df desc Cossim df img => t2i_sim      相关sim 
    # 2 分别 empty emb Cossim df img => et2i_sim   空的sim
    # 3 分别 confuse emb Cossim df img => ct2i_sim 不相关sim
    # 4 遍历 (相关sim 空的sim 不相关sim)
    #       if 相关sim > 空的sim > 不相关sim                succ+1
    #       if 相关sim > 空的sim and 不相关sim > 空的sim    
    #           if 相关sim > 不相关sim                     half fail+1s
    #           else                                      fail+1
    #       if 相关sim < 空的sim and 不相关sim > 空的sim    reverse fail+1
    #           此时 相关sim < 不相关sim
    #       if 相关sim < 空的sim < 不相关sim                fail+1
    # 根据注释逻辑，进行相似度排序和统计
    succ = 0
    half_fail = 0
    fail = 0
    reverse_fail = 0

    t2i_sims = []  # 相关 sim
    et2i_sims = [] # 空文本 sim
    ct2i_sims = [] # 不相关 sim

    desc_txts = df_desc['description'].tolist() 
    desc_embs = df_desc['embedding'].tolist()  # [[512], ...]
    img_embs = df_img['embedding'].tolist()
    n = len(desc_embs)
    for i in range(n):
        rel_txt = desc_txts[i]
        # INSERT_YOUR_CODE
        import random
        rel_txt_sentences = [s.strip() for s in rel_txt.split('.') if s.strip()]
        if rel_txt_sentences:
            rel_txt = random.choice(rel_txt_sentences)
        # rel_emb = desc_embs[i]
        rel_emb = get_embedding(clip_model, rel_txt)
        img_emb = img_embs[i]

        # 相关sim
        t2i_sim = F.cosine_similarity(
            torch.tensor(rel_emb).unsqueeze(0),
            torch.tensor(img_emb).unsqueeze(0)
        ).item()

        # 空文本sim
        et2i_sim = F.cosine_similarity(
            torch.tensor(empty_emb).unsqueeze(0),
            torch.tensor(img_emb).unsqueeze(0)
        ).item()

        # 不相关文本sim
        ct2i_sim = F.cosine_similarity(
            torch.tensor(irrelevant_emb).unsqueeze(0),
            torch.tensor(img_emb).unsqueeze(0)
        ).item()

        t2i_sims.append(t2i_sim)
        et2i_sims.append(et2i_sim)
        ct2i_sims.append(ct2i_sim)

        # 逻辑判断
        if t2i_sim > et2i_sim > ct2i_sim:
            succ += 1
        elif t2i_sim > et2i_sim and ct2i_sim > et2i_sim:
            if t2i_sim > ct2i_sim:
                half_fail += 1
            else:
                fail += 1
        elif t2i_sim < et2i_sim and ct2i_sim > et2i_sim:
            reverse_fail += 1
        elif t2i_sim < et2i_sim < ct2i_sim:
            fail += 1

        # 可选: 打印每个样例判断
        # print(f'idx={i} 相关={t2i_sim:.4f} 空={et2i_sim:.4f} 不相关={ct2i_sim:.4f}')
    
    '''
    succ=496, half_fail=4, fail=0, reverse_fail=0, total=500
    mean t2i_sim: 1.0000, et2i_sim: 0.4099, ct2i_sim: 0.3354
    '''
    print(f"succ={succ}, half_fail={half_fail}, fail={fail}, reverse_fail={reverse_fail}, total={n}")



if __name__ == '__main__':
    from util_for_argparse import get_cuda_args
    get_cuda_args()

    # test()
    test2()
    # test_empty_txt()
    # search_by_txt('yellow clothing')

'''
(flux2) shengjie@iv-ydtfdhw0lcs6ipln22hi:~/code/flux_demo$ python demo_siglip.py -c 4
Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
Cosine similarity between text and image embeddings: 0.008951790630817413
(flux2) shengjie@iv-ydtfdhw0lcs6ipln22hi:~/code/flux_demo$ python Text-to-Text-Search-main/util_for_clip.py -c 4
Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
Cosine similarity between text and image embeddings: 0.7332472801208496
(flux2) shengjie@iv-ydtfdhw0lcs6ipln22hi:~/code/flux_demo$ 
'''