from apply_clip import OpenCLIPNetwork,OpenCLIPNetworkConfig
import torch
import numpy as np
from tqdm import tqdm
import os

class SAM_CLIP():
    def __init__(self, sam_ckpt_path,feature_folder,mask_folder,sam_type='efficient',cfg=None):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        if sam_type == 'efficient' and cfg is not None:
            from apply_efficient_sam import Efficient_SAM_Encoder
            self.sam_encoder = Efficient_SAM_Encoder(sam_ckpt_path,cfg)
        elif sam_type == 'normal':
            from apply_sam import SAM_Encoder
            self.sam_encoder = SAM_Encoder(sam_ckpt_path)
        else:
            raise ValueError("sam_type must be 'efficient' or 'normal', and cfg must be provided for 'efficient' type.")
        self.clip_encoder = OpenCLIPNetwork(OpenCLIPNetworkConfig)
        self.feature_folder = feature_folder
        self.mask_folder = mask_folder
    
    #image (1, C, H, W)的张量
    def _embed_clip_sam_tiles(self,image):
        aug_imgs = torch.cat([image])
        #seg_imgs b,3,H,W     seg_maps H,W
        seg_images, seg_map = self.sam_encoder.sam_encoder(aug_imgs)

        #clip_embeds = {}
        with torch.no_grad():
            clip_embed = self.clip_encoder.encode_image(seg_images.to(self.device))
        clip_embed /= clip_embed.norm(dim=-1, keepdim=True)
        clip_embed = clip_embed.detach().cpu().half().numpy()
        return clip_embed, seg_map
    
    def sava_numpy(self,index, data):
        save_path_f = os.path.join(self.feature_folder ,f'feature_{index}.npy')
        save_path_s = os.path.join(self.mask_folder,f'mask_{index}.npy')
        np.save(save_path_s, data['seg_maps'])
        np.save(save_path_f, data['feature'])

    #image_list形成一个形状为(N, C, H, W)的张量
    def create(self,image_list, data_list):
        assert image_list is not None, "image_list must be provided to generate features"
        timer = 0
        self.sam_encoder.mask_generator.predictor.model.to('cuda')

        for i, img in tqdm(enumerate(image_list), desc="Embedding images", leave=False):
            timer += 1
            #img size (3, H, W)
            try:
                # img_embed (batch_size,patch_num,embed_dims)
                img_embed, seg_map = self._embed_clip_sam_tiles(img.unsqueeze(0))
                print(img_embed.shape,seg_map.shape)
            except:
                raise ValueError(timer)

        #self.sam_encoder.mask_generator.predictor.model.to('cpu')
        # rgb_0.png
            index = data_list[i].split('.')[0].split('_')[1]
            curr = {'seg_maps': seg_map, 'feature': img_embed}
            self.sava_numpy(index, curr)