'''
1 获取所有图片的embedding
2 计算embedding最相似的图片，余弦相似度，越接近1越相似
'''
import os
# from os.path import join as osj
from util_for_os import ose,osj,osb
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id(s) to use')
args, _ = parser.parse_known_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

import faiss,pdb,torch

from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import shutil
from tqdm import tqdm
import numpy as np

SIGLIP_PATH = '/data/models/siglip2-so400m-patch16-512'

def get_siglip2():
    processor = AutoProcessor.from_pretrained(SIGLIP_PATH,)
    '''
    # 使用示例
    text_input = processor(text="一只猫", return_tensors="pt")
    image_input = processor(images=image, return_tensors="pt")
    '''
    model = AutoModel.from_pretrained(SIGLIP_PATH, device_map="cuda").eval()
    '''
    with torch.no_grad():
        image_embeddings = model.get_image_features(**inputs)  # 1 1152
    '''
    return processor,model.cuda()
def extract_feature_siglip2( filepath='',file_pil=None , image_processor=None, model=None ):
    # prepare input image
    image = load_image(filepath) if file_pil is None else file_pil
    inputs = image_processor(images=[image], 
                             return_tensors="pt").to(model.device)

    with torch.no_grad():
        image_embeddings = model.get_image_features(**inputs)  # 1 1152
    
    return image_embeddings.cpu().numpy()

# date = '0917'

# categories = ['coat','sweater','leather','jean']
# categories = ['coat','sweater','leather']
# categories = ['coat','sweater']

dates = ['1106']
categories = ['jacket','hoodie','formalatt','weddress']
# categories = ['dress']
# categories = ['dress','slipdress','feather']


for date in dates:
    for category in categories:
        
        img_dir = f'/mnt/nas/datasets/diction/{category}{date}_img_clo'
    # img_dir = f'/mnt/nas/datasets/diction/leather{date}_img_clo'
    # img_dir = f'/mnt/nas/datasets/diction/sweater{date}_img_clo'
        assert ose(img_dir), img_dir
        assert ose( osj(img_dir , 'names.txt') ), osj(img_dir , 'names.txt')

# for category in ['coat','leather','sweater']:
#     img_dir = f'/mnt/nas/datasets/diction/{category}{date}_img_clo'
processor, model = get_siglip2()

for date in dates:
    for category in categories:
        img_dir = f'/mnt/nas/datasets/diction/{category}{date}_img_clo'
        assert ose(img_dir)
        assert ose( osj(img_dir , 'names.txt') )

        with open( osj(img_dir , 'names.txt')) as f:
            filenames = f.readlines()
            filenames = [f.strip() for f in filenames if f.endswith('.jpg\n') ]
        # pdb.set_trace()
        tmp_save_emb_pt = os.path.join( img_dir , 'tmp_save_emb_pt.pt' )
        sim_dir = f'{img_dir}_sim'
        import shutil
        if os.path.exists(sim_dir):shutil.rmtree( sim_dir )
        os.makedirs(sim_dir)
        diff_dir = f'{img_dir}_diff'
        if os.path.exists(diff_dir):shutil.rmtree( diff_dir )
        os.makedirs(diff_dir)

        '''
        构建
        filepath : emb  => csv

        for f in filenames: build csv by { abspath , emb_numpy } 

        for i in (...):  # 计算emb相似程度
            for j in range(i+1):
                cosim( [i],[j] )
                if sim > 0.9 => sim
                else => diff
        '''
        # Initialize SigLIP model

        # Step 1: Extract embeddings for all images and save to CSV
        embeddings = []
        abs_paths = []

        # Check if cached embeddings exist
        if not os.path.exists(tmp_save_emb_pt):
            for filename in tqdm(filenames):
                filepath = os.path.join(img_dir, filename)
                try:
                    emb = extract_feature_siglip2(filepath=filepath, image_processor=processor, model=model)
                    embeddings.append(emb) # (1, 1152)
                    abs_paths.append(filepath)
                    # pdb.set_trace()
                except Exception as e:
                    pdb.set_trace()
                    print(f"Error processing {filename}: {e}")

            # Save embeddings to CSV
            # embeddings_array = np.vstack(embeddings)
            # df = pd.DataFrame({
            #     'filepath': abs_paths,
            #     'embedding': list(embeddings_array)
            # })
            # df.to_csv(tmp_save_emb_csv, index=False)

            will_save_emb = torch.stack([  torch.tensor( x )
                                                    for x in embeddings]) # torch.Size([3714, 1, 1152])
            # pdb.set_trace()
            torch.save( {
                        "filepath": abs_paths,
                        "embeddings": will_save_emb
                        } , 
                        tmp_save_emb_pt )
            print('save to ',tmp_save_emb_pt,' ',len(embeddings))
        else:
            print("Loading cached embeddings...")
            saved_emb = torch.load(tmp_save_emb_pt)
            # Convert string representations back to numpy arrays
            # pdb.set_trace()
            embeddings = saved_emb['embeddings'].numpy()
            # embeddings = embeddings.squeeze(1)
            abs_paths = saved_emb['filepath']

            
        # Step 2: Calculate pairwise similarities and move similar/different images
        threshold = 0.98  # Similarity threshold
        sim_paths = set()

        for i in tqdm(range(len(embeddings))):
            for j in range(i + 1, min(i+50 , len(embeddings))):
                sim = cosine_similarity(embeddings[i], embeddings[j])[0][0]
                
                if sim > threshold: # 判断为相似
                    from util_flux import horizontal_concat_images
                    horizontal_concat_images( [ load_image( abs_paths[i] ) , 
                                            load_image( abs_paths[j] ) ] ).save( 'tmp.jpg' )

                    sim_paths.add( abs_paths[j] )

        diff_paths = set( abs_paths ) - sim_paths
        for path in tqdm(sim_paths):
            shutil.copy2( path , sim_dir )
        for path in tqdm( diff_paths ):
            shutil.copy2( path , diff_dir )


        print("Processing complete!")
        print(f"Similar images saved to: {sim_dir}")
        print(f"Different images saved to: {diff_dir}")


'''
date=1020
bash restore.sh /mnt/nas/datasets/diction/dress"$date"
bash restore.sh /mnt/nas/datasets/diction/suit"$date"
bash restore.sh /mnt/nas/datasets/diction/suitset"$date"
bash restore.sh /mnt/nas/datasets/diction/trousers"$date"

date=1020
bash restore.sh /mnt/nas/datasets/diction/dress"$date"_img
bash restore.sh /mnt/nas/datasets/diction/suit"$date"_img
bash restore.sh /mnt/nas/datasets/diction/suitset"$date"_img
bash restore.sh /mnt/nas/datasets/diction/trousers"$date"_img

date=1106
bash restore.sh /mnt/nas/datasets/diction/jacket"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/hoodie"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/formalatt"$date"_img_clo
bash restore.sh /mnt/nas/datasets/diction/weddress"$date"_img_clo

date=0917
bash restore.sh /mnt/nas/datasets/diction/slipdress"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/suit"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/suitset"$date"_img_clo_diff
bash restore.sh /mnt/nas/datasets/diction/trousers"$date"_img_clo_diff

'''