'''
1 获取所有图片的embedding
2 计算embedding最相似的图片，余弦相似度，越接近1越相似
'''
import os
# from os.path import join as osj
from util_for_os import ose,osj
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

import faiss,pdb,torch

from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import shutil
from tqdm import tqdm
import numpy as np

from demo_dinov3 import get_dino3_processor,get_pooled_feature_by_dino3


# SIGLIP_PATH = '/data/models/siglip2-so400m-patch16-512'

# def get_siglip2():
#     processor = AutoProcessor.from_pretrained(SIGLIP_PATH,)
#     model = AutoModel.from_pretrained(SIGLIP_PATH, device_map="cuda").eval()
#     return processor,model.cuda()
 
# def extract_feature_dino( filepath='',file_pil=None , image_processor=None, model=None ):
#     # prepare input image
#     image = load_image(filepath) if file_pil is None else file_pil
#     inputs = image_processor(images=[image], 
#                              return_tensors="pt").to(model.device)

#     with torch.no_grad():
#         image_embeddings = model.get_image_features(**inputs)  # 1 1152
    
#     return image_embeddings.cpu().numpy()

date = '0908'

categories = ['coat','sweater','leather','jean']
categories = ['coat','sweater','leather']
categories = ['coat','sweater']
# categories = ['jean']

for category in categories:
    
    img_dir = f'/mnt/nas/datasets/diction/{category}{date}_img_clo'
# img_dir = f'/mnt/nas/datasets/diction/leather{date}_img_clo'
# img_dir = f'/mnt/nas/datasets/diction/sweater{date}_img_clo'
    assert ose(img_dir)
    assert ose( osj(img_dir , 'names.txt') )


processor,dino3 = get_dino3_processor()

for category in categories:
    
    img_dir = f'/mnt/nas/datasets/diction/{category}{date}_img_clo'
    assert ose(img_dir)
    assert ose( osj(img_dir , 'names.txt') )

    with open( osj(img_dir , 'names.txt')) as f:
        filenames = f.readlines()
        filenames = [f.strip() for f in filenames if f.endswith('.jpg\n') ]
    # pdb.set_trace()
    tmp_save_emb_pt = os.path.join( img_dir , 'tmp_save_emb_pt_dino3.pt' )
    sim_dir = f'{img_dir}_sim_dino3'
    import shutil
    if os.path.exists(sim_dir):shutil.rmtree( sim_dir )
    os.makedirs(sim_dir)
    diff_dir = f'{img_dir}_diff_dino3'
    if os.path.exists(diff_dir):shutil.rmtree( diff_dir )
    os.makedirs(diff_dir)


    # Step 1: Extract embeddings for all images and save to CSV
    embeddings = []
    abs_paths = []

    # Check if cached embeddings exist
    if not os.path.exists(tmp_save_emb_pt):
        for filename in tqdm(filenames):
            filepath = os.path.join(img_dir, filename)
            try:
                # emb = extract_feature_siglip2(filepath=filepath, image_processor=processor, model=model)
                emb = get_pooled_feature_by_dino3( url=filepath , 
                                                    processor=processor,
                                                    model=dino3)
                embeddings.append(emb.cpu()) # (1, 4096)
                abs_paths.append(filepath)
                # pdb.set_trace()
            except Exception as e:
                pdb.set_trace()
                print(f"Error processing {filename}: {e}")


        will_save_emb = torch.stack([  torch.tensor( x )
                                                for x in embeddings]) # torch.Size([N, 1, 4096])
        # pdb.set_trace()
        torch.save( {
                    "filepath": abs_paths,
                    "embeddings": will_save_emb
                    } , 
                    tmp_save_emb_pt )
        print('save to ',tmp_save_emb_pt,' ',len(embeddings))
    else:
        print("Loading cached embeddings...")
        saved_emb = torch.load(tmp_save_emb_pt)
        # Convert string representations back to numpy arrays
        # pdb.set_trace()
        embeddings = saved_emb['embeddings'].numpy()
        # embeddings = embeddings.squeeze(1)
        abs_paths = saved_emb['filepath']

        
    # Step 2: Calculate pairwise similarities and move similar/different images
    threshold = 0.98  # Similarity threshold
    sim_paths = set()

    for i in tqdm(range(len(embeddings))):
        for j in range(i + 1, min(i+50 , len(embeddings))):
            sim = cosine_similarity(embeddings[i], embeddings[j])[0][0]
            
            if sim > threshold:
                from util_flux import horizontal_concat_images
                horizontal_concat_images( [ load_image( abs_paths[i] ) , 
                                        load_image( abs_paths[j] ) ] ).save( 'tmp_dino3.jpg' )
                # print(sim)
                # pdb.set_trace()
                sim_paths.add( abs_paths[j] )

    diff_paths = set( abs_paths ) - sim_paths
    for path in tqdm(sim_paths):
        shutil.copy2( path , sim_dir )
    for path in tqdm( diff_paths ):
        shutil.copy2( path , diff_dir )


    print("Processing complete!")
    print(f"Similar images saved to: {sim_dir}")
    print(f"Different images saved to: {diff_dir}")


'''
date=0908
bash restore.sh /mnt/nas/datasets/diction/coat"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/sweater"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/leather"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/jean"$date"_img_clo

date=0905
bash restore.sh /mnt/nas/datasets/diction/coat"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/sweater"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/leather"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/jean"$date"_img_clo_diff
'''