'''
1 构建向量库 {category}_{date}.faiss
2 网页端测试，查看召回的 正反面 相似度 规律 (and) janus 对衣服做描述，取出里面背面的照片
3 for img in imgs 
    if is_front( janus(img) ):
        recalled_info = faiss( img )   
        for re_img,re_sim in recalled_info:
            if  re_sim > 0.96 and is_back(re_img):
                horizontal_concat( [img, re_img] ).save(tar_path)
'''
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import util_for_huggingface

import pandas as pd
import pdb,json

from util_for_os import osj,ose,osb
from demo_dinov3 import get_dino3_processor,get_pooled_feature_by_dino3

get_data_dir = lambda category,date : f'/mnt/nas/datasets/diction/{category}{date}_img_clo_diff'
categories = ['coat','sweater','leather']
dates = ["0808","0811","0812","0813","0814","0815","0818","0819","0820","0821","0822"]

def main():
    # stage1()
    # stage2()  # 获得 emb 保存 到 .pt
    # stage3()  # 提取 .pt 获得 .faiss .idmap
    stage4() # 根据 img => .faiss .idmap => recalled_info 

## 2
import torch
from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image
from util_flux import process_img_1024,vertical_concat_images

SIGLIP_PATH = 'google/siglip2-so400m-patch16-512'

def get_siglip2():
    processor = AutoProcessor.from_pretrained(SIGLIP_PATH,)
    model = AutoModel.from_pretrained(SIGLIP_PATH, device_map="cuda").eval()
    return processor,model.cuda()
# 5.2 GB
def extract_feature_siglip2( filepath='',file_pil=None , image_processor=None, model=None ):
    # prepare input image
    image = load_image(filepath) if file_pil is None else file_pil
    inputs = image_processor(images=[image], 
                             return_tensors="pt").to(model.device)

    with torch.no_grad():
        image_embeddings = model.get_image_features(**inputs)  # 1 1152
    
    return image_embeddings.cpu().numpy()
def get_boxed_img( imgpath , box ):
    return load_image(imgpath).crop(box)
from tqdm import tqdm
def stage2():
    # image_processor,model = get_siglip2()
    processor,dino3 = get_dino3_processor()

    # with open(json_path) as f:
    #     json_data = json.load( f )
    
    get_save_path = lambda data_dir : osj( data_dir , 'faiss_emb_dino.pt')
    
    for cat in categories:
        for date in dates:
            data_dir = get_data_dir( cat , date )
            
            names_list_path = osj( data_dir , 'names.txt' )
            
            assert ose(data_dir),data_dir
            assert ose(names_list_path),names_list_path
            
    for cat in categories:
        for date in dates:
            will_save = []
            
            data_dir = get_data_dir( cat , date )
            
            names_list_path = osj( data_dir , 'names.txt' )
            
            with open(names_list_path,encoding='utf-8') as f:
                names = f.readlines() 
            
            for name in tqdm(names):
                name = name.strip()
                if not name.endswith('.jpg') : continue
                
                abs_path = osj( data_dir , name )
                if not ose( abs_path ):
                    print( 'not exists ', abs_path ) 
                    continue
                
                img = process_img_1024( abs_path )
                
                with torch.no_grad():
                    emb = get_pooled_feature_by_dino3( img=img , 
                                                    processor=processor,
                                                    model=dino3)
                # emb = extract_feature_siglip2(file_pil=img,
                #                         image_processor=image_processor,  # 1 1152
                #                         model=model)
                
                will_save.append({
                    'imgpath':abs_path,
                    'emb':emb.clone().detach(),
                })
                # break
            torch.save( {
                    "metadata": [{"imgpath": x["imgpath"],} 
                                    for x in will_save],
                    "embeddings": torch.stack([  torch.tensor( x["emb"] )
                                                for x in will_save])
                    } , 
                    get_save_path(data_dir) )
            print('save to ',get_save_path(data_dir),' ',len(will_save))
            # pdb.set_trace()
            
        #     break
        # break
    
import numpy as np
from typing import Tuple, Dict, Any
def get_embeddings_id_map(emb_data: Dict[str, Any]) -> Tuple[np.ndarray, Dict[int, Dict[str, Any]]]:
    """
    从保存的嵌入数据中提取嵌入矩阵和ID映射表
    
    参数:
        emb_data: torch.load()加载的字典，包含:
            - "metadata": List[Dict] 每个元素的元数据
            - "embeddings": torch.Tensor 对应的嵌入矩阵
    
    返回:
        embeddings: (N, 1152)的numpy数组
        id_to_metadata: 字典 {id -> 元数据}
    
    示例:
        emb_data = torch.load("embeddings.pt")
        embs, id_map = get_embeddings_id_map(emb_data)
    """
    try:
        # 1. 提取嵌入矩阵并转换为numpy
        # embeddings = emb_data["embeddings"].numpy()  # (N, 1152)
        embeddings = emb_data["embeddings"].cpu().numpy()  # shape: (N, 1, 1152)
        embeddings = embeddings.squeeze(1)  # -> (N, 1152)

        # 2. 构建ID映射表
        id_to_metadata = {
            i: {
                "imgpath": item["imgpath"],
                # 可扩展其他元数据字段
            }
            for i, item in enumerate(emb_data["metadata"])
        }
        
        # 验证维度一致性
        assert len(id_to_metadata) == embeddings.shape[0], \
            f"元数据数量({len(id_to_metadata)})与嵌入数量({embeddings.shape[0]})不匹配"
            
        return embeddings, id_to_metadata
        
    except KeyError as e:
        raise ValueError(f"输入数据缺少必要字段: {str(e)}") from e
    except Exception as e:
        raise RuntimeError(f"处理嵌入数据时出错: {str(e)}") from e

def stage3():
    import faiss,torch
    from tqdm import tqdm
    get_emb_path = lambda data_dir : osj( data_dir,'faiss_emb_dino.pt')
    get_save_faiss_path = lambda data_dir : osj( data_dir , 'faiss_emb_dino.faiss' )
    get_save_idmap_path = lambda data_dir : osj( data_dir , 'faiss_emb_dino_idmap.pt' )
    
    for cat in categories:
        for date in dates:
            data_dir = get_data_dir( cat , date )
            
            # names_list_path = osj( data_dir , 'names.txt' )
            emb_path = get_emb_path(data_dir)
            
            assert ose(data_dir),data_dir
            assert ose(emb_path),emb_path
            
    for cat in tqdm(categories):
        for date in tqdm(dates):
            data_dir = get_data_dir( cat , date )
            
            # names_list_path = osj( data_dir , 'names.txt' )
            emb_path = get_emb_path(data_dir)
    
            save_faiss_path = get_save_faiss_path(data_dir)
            save_idmap_path = get_save_idmap_path(data_dir)

            emb_data = torch.load( emb_path )
            # pdb.set_trace()
            embeddings,id_to_metadata  = get_embeddings_id_map(emb_data)
            print(embeddings.shape)

            # pdb.set_trace()  # embddings shape (N, 4096) 
            
            # 构建索引并保存ID映射
            index = faiss.IndexFlatL2( 4096 ) 
            index.add(embeddings)

            # 保存索引和映射
            faiss.write_index(index, save_faiss_path)
            torch.save(id_to_metadata, save_idmap_path)
    

def search_by_faiss(query_emb, index, id_map , k=5):
    """ query_emb: 1x1154 tensor """
    D, I = index.search(query_emb, k)
    res_show = []
    from util_flux import horizontal_concat_images,process_img_1024,vertical_concat_images
    for d,i in zip(D[0] , I[0]):
        print('distance :' , d)
        # pdb.set_trace()
        imgpath = id_map[i]['imgpath']
        # box = id_map[i]['box']

        res_show.append( process_img_1024('',img_pil=load_image(imgpath)) ) 
    
    row = 5
    tmp_shows = []
    for i in range(0,len(res_show),row):
        tmp_shows.append( horizontal_concat_images(res_show[i:i+5]) )
    vertical_concat_images(tmp_shows).save('tmp2.jpg')
    return res_show,[{"distance": d, **id_map[i]} for d,i in zip(D[0],I[0])]

# 检索
import faiss
def load_faiss_index(index_path: str, gpu_id: int = -1) -> faiss.Index:
    """
    加载FAISS索引文件，支持自动切换CPU/GPU
    
    参数:
        index_path: .faiss文件路径
        gpu_id: 使用的GPU ID (-1表示CPU)
    
    返回:
        faiss.Index对象
    
    异常:
        FileNotFoundError: 文件不存在时抛出
        RuntimeError: 索引加载失败时抛出
    """
    if not os.path.exists(index_path):
        raise FileNotFoundError(f"FAISS索引文件不存在: {index_path}")
    
    try:
        # 基础加载
        index = faiss.read_index(index_path)
        
        # GPU加速 (可选)
        if gpu_id >= 0:
            res = faiss.StandardGpuResources()
            index = faiss.index_cpu_to_gpu(res, gpu_id, index)
            print(f"已启用GPU加速 (Device {gpu_id})")
            
        return index
        
    except Exception as e:
        raise RuntimeError(f"加载FAISS索引失败: {str(e)}") from e
    
def load_idmap(idmap_path: str, verbose: bool = True) -> dict:
    """
    加载ID映射表
    
    参数:
        idmap_path: .pt文件路径
        verbose: 是否打印加载信息
    
    返回:
        {id: metadata} 格式的字典
    
    异常:
        FileNotFoundError: 文件不存在时抛出
        RuntimeError: 文件损坏时抛出
    """
    if not os.path.exists(idmap_path):
        raise FileNotFoundError(f"ID映射文件不存在: {idmap_path}")
    
    try:
        id_map = torch.load(idmap_path)
        if verbose:
            print(f"成功加载ID映射表，共 {len(id_map)} 条记录")
            
        # 兼容性检查
        if not isinstance(id_map, dict):
            raise ValueError("ID映射文件格式错误，应为字典类型")
            
        return id_map
        
    except Exception as e:
        raise RuntimeError(f"加载ID映射表失败: {str(e)}") from e
    
def stage4():
    from tqdm import tqdm
    get_faiss_path = lambda data_dir : osj( data_dir , 'faiss_emb_dino.faiss' )
    get_idmap_path = lambda data_dir : osj( data_dir , 'faiss_emb_dino_idmap.pt' )
    # processor , model = get_siglip2()
    processor , model = get_dino3_processor()

    for cat in tqdm(categories):
        for date in tqdm(dates):
            data_dir = get_data_dir( cat , date )
    # for t in types:
            # 选择一个faiss和id map文件
            index = load_faiss_index( get_faiss_path( data_dir ) )
            # pdb.set_trace()
            id_map = load_idmap( get_idmap_path( data_dir ) )

            # img_dir = f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg/'
            names_path =  osj( data_dir , 'names.txt' )
            with open( names_path , encoding='utf-8' ) as f:
                names = f.readlines()
            for name in tqdm(names):
                name = name.strip()
                if not name.endswith('.jpg'):
                    continue
                img_path = osj( data_dir , name )
                img = process_img_1024(img_path)
                query_emb = get_pooled_feature_by_dino3(img=img,
                                                        processor=processor,
                                                        model=model)
                

                # 确保输入的emb是numpy
                search_by_faiss( query_emb.cpu().numpy() , index , id_map , k=10 )

                pdb.set_trace()


    
    
if __name__=='__main__':
    main()