'''
基于 dataset_pd_build dataset_sift, dataset_sft2

1, 基于 json 构建 基础 csv
2, 基于 csv 提取embedding
   json data [
        name='',
        box_sleeve:[ []... ]
        box_{t} : [ ... ]
        ...
   ]
   choose t
   save to faiss_emb_{t}.pt

3, 保存faiss 和 idmap 的文件
4, 随便取出一张进行测试
'''
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import util_for_huggingface

import pandas as pd
import pdb,json

from util_for_os import osj,ose,osb

def main():
    # stage1()
    # stage2()  # 获得 emb 保存 到 .pt
    # stage3()  # 提取 .pt 获得 .faiss .idmap
    stage4() # 根据 img => .faiss .idmap => recalled_info 

    # pdb.set_trace()

json_path = "/mnt/nas/shengjie/datasets/data_merged20250722.json"  # 替换为你的图片文件夹路径
save_path = '/mnt/nas/shengjie/datasets/data_merged20250722.csv'
save_emb_csv = '/mnt/nas/shengjie/datasets/data_merged20250722_embeddings_siglip2.csv'


## 1
def build_image_path_dataframe(json_path):
    """
    构建包含文件名和绝对路径的DataFrame
    
    Args:
        folder_path (str): 图片文件夹路径
        
    Returns:
        pd.DataFrame: 两列（filename, abs_path）的DataFrame
    """
    with open(json_path) as f:
        json_data = json.load( f )
    # for jd in json_data:
    #     img_path = jd['name']

    
    # 创建DataFrame
    df = pd.DataFrame({
        'filename': [osb(f['name']) for f in json_data],
        'abs_path': [f['name'] for f in json_data]
    })
    
    return df
def stage1():
    # 示例调用
    df_images = build_image_path_dataframe(json_path)
    df_images.to_csv(save_path, index=False)
    # 查看结果
    print(df_images.head())


## 2
import torch
from transformers import AutoModel, AutoProcessor
from transformers.image_utils import load_image

SIGLIP_PATH = 'google/siglip2-so400m-patch16-512'

def get_siglip2():
    processor = AutoProcessor.from_pretrained(SIGLIP_PATH,)
    model = AutoModel.from_pretrained(SIGLIP_PATH, device_map="cuda").eval()
    return processor,model.cuda()
# 5.2 GB
def extract_feature_siglip2( filepath='',file_pil=None , image_processor=None, model=None ):
        # prepare input image
    image = load_image(filepath) if file_pil is None else file_pil
    # inputs = image_processor(img, return_tensors='pt')
    # image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg")
    inputs = image_processor(images=[image], 
                             return_tensors="pt").to(model.device)

    # pdb.set_trace()
    # inputs['pixel_values'] = inputs['pixel_values'].cuda()
    # for k in inputs:
    #     inputs[k] = inputs[k].cuda()
    with torch.no_grad():
        image_embeddings = model.get_image_features(**inputs)  # 1 1152
    
    # pdb.set_trace()

    # embedding = outputs.last_hidden_state
    # embedding = embedding[:, 0, :].squeeze(1)

    return image_embeddings.cpu().numpy()
def get_boxed_img( imgpath , box ):
    return load_image(imgpath).crop(box)
from tqdm import tqdm
def stage2():
    image_processor,model = get_siglip2()

    with open(json_path) as f:
        json_data = json.load( f )
    
    types = ['collar','sleeve','pockets']
    get_save_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}.pt'

    
    for t in types:
        count = 0
        will_save = []
        for jd in tqdm(json_data):
            imgpath = jd['name']
            if len(jd[ f'box_{t}' ])==0:continue
            for box in jd[ f'box_{t}' ]: # eg : box = [ 78,27,182,781 ] 左上角右下角的坐标
                box_img = get_boxed_img( imgpath , box ) # pil

                # pdb.set_trace() 

                siglip_emb = extract_feature_siglip2(file_pil=box_img,
                                        image_processor=image_processor,  # 1 1152
                                        model=model)
                will_save.append({
                    'name':imgpath,
                    f'box_{t}':box,
                    'siglip_emb':siglip_emb,
                })
                # count += 1
                # print('\rprocess num: ',count,end='',flush=True)
                # break
            # break
        # torch.save(
        #   {
        #     "metadata": [{"imgpath": x["imgpath"], "coords": x["coords"]} for x in flat_data],
        #     "embeddings": torch.stack([x["emb"] for x in flat_data])
        #  }, "data.pt")
        torch.save( {
                    "metadata": [{"name": x["name"], f"box_{t}": x[f"box_{t}"]} 
                                 for x in will_save],
                    "embeddings": torch.stack([  torch.tensor( x["siglip_emb"] )
                                               for x in will_save])
                    } , 
                   get_save_path(t) )
        print('save to ',get_save_path(t),' ',len(will_save))
        # pdb.set_trace()

## 3 保存 faiss
import faiss,torch
import numpy as np
from typing import Tuple, Dict, Any
def get_embeddings_id_map(emb_data: Dict[str, Any],t) -> Tuple[np.ndarray, Dict[int, Dict[str, Any]]]:
    """
    从保存的嵌入数据中提取嵌入矩阵和ID映射表
    
    参数:
        emb_data: torch.load()加载的字典，包含:
            - "metadata": List[Dict] 每个元素的元数据
            - "embeddings": torch.Tensor 对应的嵌入矩阵
    
    返回:
        embeddings: (N, 1152)的numpy数组
        id_to_metadata: 字典 {id -> 元数据}
    
    示例:
        emb_data = torch.load("embeddings.pt")
        embs, id_map = get_embeddings_id_map(emb_data)
    """
    try:
        # 1. 提取嵌入矩阵并转换为numpy
        # embeddings = emb_data["embeddings"].numpy()  # (N, 1152)
        embeddings = emb_data["embeddings"].numpy()  # shape: (N, 1, 1152)
        embeddings = embeddings.squeeze(1)  # -> (N, 1152)

        # 2. 构建ID映射表
        id_to_metadata = {
            i: {
                "name": item["name"],
                "box": item.get(f"box_{t}", []),  # 动态适配box类型
                # 可扩展其他元数据字段
            }
            for i, item in enumerate(emb_data["metadata"])
        }
        
        # 验证维度一致性
        assert len(id_to_metadata) == embeddings.shape[0], \
            f"元数据数量({len(id_to_metadata)})与嵌入数量({embeddings.shape[0]})不匹配"
            
        return embeddings, id_to_metadata
        
    except KeyError as e:
        raise ValueError(f"输入数据缺少必要字段: {str(e)}") from e
    except Exception as e:
        raise RuntimeError(f"处理嵌入数据时出错: {str(e)}") from e

def stage3():
    types = ['collar','sleeve','pockets']
    get_emb_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}.pt'
    get_save_faiss_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}.faiss'
    get_save_idmap_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}_idmap.pt'

    for t in types:
        emb_path = get_emb_path(t)
        save_faiss_path = get_save_faiss_path(t)
        save_idmap_path = get_save_idmap_path(t)

        '''
        emb保存时>>
        torch.save( {
            "metadata": [{"name": x["name"], f"box_{t}": x[f"box_{t}"]} 
                            for x in will_save],
            "embeddings": torch.stack([  torch.tensor( x["siglip_emb"] )
                                        for x in will_save])
            } , 
            get_save_path(t) )
        emb加载时>>
            ...

            # 准备数据
            embeddings = torch.stack([x["siglip_emb"] for x in flat_data]).numpy()  # (N,1152)

            # 构建索引并保存ID映射
            index = faiss.IndexFlatL2(1152)
            index.add(embeddings)

            # 创建反向映射表
            id_to_metadata = {i: {"name": flat_data[i]["name"], f"box_{t}": flat_data[i][f"box_{t}"]} 
                            for i in range(len(flat_data))}

            # 保存索引和映射
            faiss.write_index(index, "index.faiss")
            torch.save(id_to_metadata, "id_map.pt")
        '''
        emb_data = torch.load( emb_path )
        # pdb.set_trace()
        embeddings,id_to_metadata  = get_embeddings_id_map(emb_data,t)

        # pdb.set_trace()  # embddings shape (70578, 1, 1152) 
        # 构建索引并保存ID映射
        index = faiss.IndexFlatL2(1152) 
        index.add(embeddings)

        # 保存索引和映射
        faiss.write_index(index, save_faiss_path)
        torch.save(id_to_metadata, save_idmap_path)


def search_by_faiss(query_emb, index, id_map , k=5):
    """ query_emb: 1x1154 tensor """
    D, I = index.search(query_emb, k)
    res_show = []
    from util_flux import horizontal_concat_images,process_img_1024
    for d,i in zip(D[0] , I[0]):
        print('distance :' , d)
        # pdb.set_trace()
        imgpath = id_map[i]['name']
        box = id_map[i]['box']

        res_show.append( process_img_1024('',img_pil=load_image(imgpath).crop(box)) ) 
    horizontal_concat_images(res_show).save('tmp.jpg')
    return res_show,[{"distance": d, **id_map[i]} for d,i in zip(D[0],I[0])]

# 检索
def load_faiss_index(index_path: str, gpu_id: int = -1) -> faiss.Index:
    """
    加载FAISS索引文件，支持自动切换CPU/GPU
    
    参数:
        index_path: .faiss文件路径
        gpu_id: 使用的GPU ID (-1表示CPU)
    
    返回:
        faiss.Index对象
    
    异常:
        FileNotFoundError: 文件不存在时抛出
        RuntimeError: 索引加载失败时抛出
    """
    if not os.path.exists(index_path):
        raise FileNotFoundError(f"FAISS索引文件不存在: {index_path}")
    
    try:
        # 基础加载
        index = faiss.read_index(index_path)
        
        # GPU加速 (可选)
        if gpu_id >= 0:
            res = faiss.StandardGpuResources()
            index = faiss.index_cpu_to_gpu(res, gpu_id, index)
            print(f"已启用GPU加速 (Device {gpu_id})")
            
        return index
        
    except Exception as e:
        raise RuntimeError(f"加载FAISS索引失败: {str(e)}") from e
    
def load_idmap(idmap_path: str, verbose: bool = True) -> dict:
    """
    加载ID映射表
    
    参数:
        idmap_path: .pt文件路径
        verbose: 是否打印加载信息
    
    返回:
        {id: metadata} 格式的字典
    
    异常:
        FileNotFoundError: 文件不存在时抛出
        RuntimeError: 文件损坏时抛出
    """
    if not os.path.exists(idmap_path):
        raise FileNotFoundError(f"ID映射文件不存在: {idmap_path}")
    
    try:
        id_map = torch.load(idmap_path)
        if verbose:
            print(f"成功加载ID映射表，共 {len(id_map)} 条记录")
            
        # 兼容性检查
        if not isinstance(id_map, dict):
            raise ValueError("ID映射文件格式错误，应为字典类型")
            
        return id_map
        
    except Exception as e:
        raise RuntimeError(f"加载ID映射表失败: {str(e)}") from e
    
def stage4():
    get_faiss_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}.faiss'
    get_idmap_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}_idmap.pt'
    processor , model = get_siglip2()

    types = ['collar','sleeve','pockets']
    for t in types:
        # 选择一个faiss和id map文件
        index = load_faiss_index( get_faiss_path(t) )
        # pdb.set_trace()
        id_map = load_idmap( get_idmap_path(t) )

        img_dir = f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg/'
        for entry in os.scandir( img_dir ):
            if (cid := input("输入内容（输入 'q' 退出）: ")) == 'q':
                break
            if entry.is_file() and not entry.name.endswith('.txt'):
                imgpath = osj( img_dir , entry.name )
        # 输入一个emb(img)
        # while (cid := input("输入内容（输入 'q' 退出）: ")) != 'q':
            # imgpath = f'/mnt/nas/shengjie/datasets/cloth_{t}_localimg/{t}_{int(cid):07d}.jpg'
                query_emb = extract_feature_siglip2( imgpath , 
                                        image_processor=processor,
                                        model=model )  # numpy
                

                # search
                # 确保输入的emb是numpy
                search_by_faiss( query_emb , index , id_map , k=5 )

        pdb.set_trace()


# 编写一个函数
def get_rag_by_faiss( imgpath , processor , model , cloth_type ,top_k=5 ):
    assert cloth_type in ['collar','sleeve','pockets'],"cloth type must in ['collar','sleeve','pockets']"
    t = cloth_type
    
    get_faiss_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}.faiss'
    get_idmap_path = lambda t : f'/mnt/nas/shengjie/datasets/faiss_emb_{t}_idmap.pt'
    # processor , model = get_siglip2()
    # 选择一个faiss和id map文件
    index = load_faiss_index( get_faiss_path(t) )
    # pdb.set_trace()
    id_map = load_idmap( get_idmap_path(t) )

    query_emb = extract_feature_siglip2( imgpath , 
                                        image_processor=processor,
                                        model=model )  # numpy
                

    # search
    # 确保输入的emb是numpy
    rag_images,rag_info = search_by_faiss( query_emb , index , id_map , k=5 )
    return rag_images,rag_info

# 写一个gradio
#   图片是输入框,图片搜索结果输出框
#   选择 cloth type 的框
#   输入 topk 的 框

if __name__=='__main__':
    main()