import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import torch
import numpy as np
import os
import re
import faiss
import pdb
from PIL import Image
import shutil
from tqdm import tqdm
from util_flux import horizontal_concat_images
from faiss import normalize_L2


def load_local_image(filename_name):
    # Sanitize the filename name to match the local image file naming convention
    # sanitized_filename_name = filename_name.replace(" ", "_").replace("[", "").replace("]", "")
    sanitized_filename_name = filename_name

    # Path to the local image file
    image_path = os.path.join(IMAGE_DIR, f"{sanitized_filename_name}")

    # pdb.set_trace()
    # Check if the image exists in the folder
    if os.path.exists(image_path):
        img = Image.open(image_path)

        # Convert image to RGB if not already in that mode
        if img.mode != 'RGB':
            img = img.convert('RGB')

        return img
    else:
        print(f"Image for {filename_name} not found.")
        return None

def clean_feature_string(feature_str):
    cleaned_str = re.sub(r'[\[\]]', '', feature_str)  # Remove brackets
    cleaned_values = np.fromstring(cleaned_str, sep=' ')  # Parse values into numpy array
    return cleaned_values.astype('float32')

# Function to get top K similar countries using FAISS
def get_top_k_similar_countries(input_filename, df, k=5):
    countries = df['filename'].values
    # features = np.array([clean_feature_string(f) for f in df['features'].values])
    features = np.stack(df["features"].values).astype('float32')

    # Find the index of the input filename
    try:
        input_idx = list(countries).index(input_filename)
    except ValueError:
        return f"filename '{input_filename}' not found in the dataset."

    input_embedding = features[input_idx].reshape(1, -1).astype('float32')

    # Normalize the feature vectors for cosine similarity
    features_normalized = features / np.linalg.norm(features, axis=1, keepdims=True)

    # Create a FAISS index for similarity search
    dim = features.shape[1]
    index = faiss.IndexFlatIP(dim)  

    # pdb.set_trace()

    # Add all features to the FAISS index
    index.add(features_normalized)

    # Search for the top K most similar countries
    distances, top_k_idx = index.search(input_embedding, k+1)  # k+1 to exclude the filename itself

    # Return top K countries with their similarity scores
    return [(countries[i], distances[0][j]) for j, i in enumerate(top_k_idx[0]) if i != input_idx]

IMAGE_DIR = "/data/shengjie/VITON-HD_ori/train/cloth"
SAVE_DIR = "/mnt/nas/shengjie/datasets/cloth_similar"
if os.path.exists(SAVE_DIR):shutil.rmtree(SAVE_DIR)
os.makedirs(SAVE_DIR)  # 确保输出目录存在
# --- 加载嵌入向量数据 ---
df = pd.read_csv('viton_embeddings_clip.csv')
df['processed'] = False  # 标记是否已处理
count = 1  # 全局计数器
# filename = df.iloc[0]['filename']
# filename = '00011_00.jpg'
# --- 主处理逻辑 ---

# 修复步骤
import ast
def str_to_array(s):
    # pdb.set_trace()
    # 1. 去掉多余括号和换行符
    s_clean = s.replace("[[", "").replace("]]", "").replace("\n", "").strip()
    # 2. 将连续空格替换为逗号
    s_with_commas = re.sub(r"\s+", ", ", s_clean)
    # 3. 用 ast.literal_eval 解析
    return np.array(ast.literal_eval(f"[{s_with_commas}]"), dtype="float32")

# 将特征堆叠成矩阵 (n_samples, feature_dim)
features_str = df["features"].values
# features = np.stack()  # FAISS 需要 float32
# pdb.set_trace()
features = np.vstack([str_to_array(s) for s in features_str])
# features = np.fromstring( features_str.strip('[]'),sep=' ' ).astype('float32')
normalize_L2(features)
# features = features / np.linalg.norm(features, axis=1, keepdims=True)
dim = features.shape[1]  # 特征维度

# 创建索引（这里用 IVF 加速搜索）
nlist = 50  # 聚类中心数
'''
IndexFlatL2（暴力搜索，精确但慢）
IndexIVFFlat（倒排索引 + 聚类，加速搜索）
IndexIVFPQ（倒排索引 + 量化，牺牲精度换速度）
'''
quantizer = faiss.IndexFlatIP(dim) 
# index = quantizer
index = faiss.IndexIVFFlat(quantizer, dim, nlist, 
                           faiss.METRIC_INNER_PRODUCT)
# 训练索引并添加数据
index.train(features)
index.add(features)

def get_top5_similar(filename, df, index, k=5):
    """
    输入: filename (str), 数据框 df, FAISS 索引 index
    输出: 最相似的5个 filename 和距离
    """
    # 找到目标文件的特征
    target_feature = df[df["filename"] == filename]["features"].values[0]
    target_feature = str_to_array(target_feature)
    target_feature = np.expand_dims(target_feature, axis=0).astype('float32')  # 转为 (1, dim)

    # FAISS 搜索
    distances, indices = index.search(target_feature, k=k)  # 返回 (1, k) 的数组

    # pdb.set_trace()
    # 提取结果
    similar_filenames = df.iloc[indices[0]]["filename"].tolist()
    similar_distances = distances[0].tolist()

    return list(zip(similar_filenames, similar_distances))

for _ in tqdm(range(len(df))):  # 进度条
    # 1. 找到下一个未处理的文件
    unprocessed = df[~df['processed']]
    if len(unprocessed) == 0:
        break
    current_file = unprocessed.iloc[0]['filename']
    # 标记为已处理  top-5 第一张就是 自己
    # df.loc[df['filename'] == current_file, 'processed'] = True
    
    # 2. 获取Top-5相似图像
    # top_5 = get_top_k_similar_countries(current_file, df, k=5)
    # top_5.insert(0,(current_file,1.0))
    top_5 = get_top5_similar(current_file, df,index, k=5)
    # top_5.insert(0,(current_file,0.0))

    # 3. 保存拼接结果和原始文件
    save_subdir = os.path.join(SAVE_DIR, f"group_{count}")
    os.makedirs(save_subdir, exist_ok=True)
    
    # 3.1 保存拼接图
    concat_images = [load_local_image(os.path.join(IMAGE_DIR, f)) for f, _ in top_5]
    concat_result = horizontal_concat_images(concat_images)
    pdb.set_trace()
    concat_result.save(os.path.join(SAVE_DIR, f"group_{count}_concat.jpg"))
    
    # 3.2 保存原始文件（带分数命名）
    for i, (filename, score) in enumerate(top_5):
        src_path = os.path.join(IMAGE_DIR, filename)
        dst_path = os.path.join(save_subdir, f"{count}-{i+1}_{score:.4f}.jpg")
        shutil.copy(src_path, dst_path)
        
        # 标记为已处理
        df.loc[df['filename'] == filename, 'processed'] = True
    
    # 4. 更新计数器
    count += 1
    # pdb.set_trace()

# # Display top 5 similar flags 
# top_5_countries = get_top_k_similar_countries(filename,df, k=5)

# from util_flux import horizontal_concat_images
# concat_res = horizontal_concat_images([load_local_image(f) for f,_ in top_5_countries])
# concat_scores = [score for _,score in top_5_countries]
# concat_res.save('tmp2.jpg')
# # 1 找到 topk 后 move 统一命名 img_{count}-1/2/3/4/5_{score}.jpg && count += 1
# # 2 删除/标记 df中这些 列名，下次再检索就跳过
# # 3 直到df中所有 filename 被遍历 
# pdb.set_trace()

# for idx, (filename, score) in enumerate(top_5_countries):
#     # Load the flag image for each filename from the local folder
#     img = load_local_image(filename)

#     pdb.set_trace()
#     display(img)