import os
import random
import numpy as np
import torch
import argparse
from SAM_CLIP import SAM_CLIP
import cv2
import re

def seed_everything(seed_value):
    random.seed(seed_value)
    np.random.seed(seed_value)
    torch.manual_seed(seed_value)
    os.environ['PYTHONHASHSEED'] = str(seed_value)
    
    if torch.cuda.is_available(): 
        torch.cuda.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = True

if __name__ == "__main__":
    seed_everything(42)
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset_path", type=str, required=True)
    parser.add_argument("--sam_ckpt_path", type=str, default="ckpts/sam_vit_b_01ec64.pth")
    parser.add_argument("--resolution", type=int, default=-1)
    parser.add_argument("--model_type", type=str, default="efficient")
    parser.add_argument("--model_cfg", type=str, default=None)
    args = parser.parse_args()
    torch.set_default_dtype(torch.float32)

    dataset_path = args.dataset_path
    sam_ckpt_path = args.sam_ckpt_path
    img_folder = os.path.join(dataset_path, 'rgb')
    data_list = os.listdir(img_folder)
    data_list.sort(key=lambda x: int(re.search(r'\d+', x).group()))
    nums = 11
    if nums < len(data_list):
        data_list = data_list[:nums]
    print(data_list)
    feature_folder = os.path.join(dataset_path, 'feature')
    os.makedirs(feature_folder, exist_ok=True)
    mask_folder = os.path.join(dataset_path, 'mask')
    os.makedirs(mask_folder, exist_ok=True)

    model = SAM_CLIP(sam_ckpt_path,feature_folder,mask_folder,sam_type=args.model_type,cfg=args.model_cfg)
    img_list = []

    for data_path in data_list:
        img_path = os.path.join(img_folder, data_path)
        image = cv2.imread(img_path)
        orig_w, orig_h = image.shape[1], image.shape[0]
        if args.resolution == -1:
            if orig_h > 1080:
                if not WARNED:
                    print("[ INFO ] Encountered quite large input images (>1080P), rescaling to 1080P.\n "
                        "If this is not desired, please explicitly specify '--resolution/-r' as 1")
                    WARNED = True
                global_down = orig_h / 1080
            else:
                global_down = 1
        else:
            global_down = orig_w / args.resolution
        
        scale = float(global_down)
        resolution = (int( orig_w  / scale), int(orig_h / scale))
        
        image = cv2.resize(image, resolution)
        image = torch.from_numpy(image)
        img_list.append(image)

    #(H, W, C)变换为(C, H, W) 同时添加batch维度
    images = [img_list[i].permute(2, 0, 1)[None, ...] for i in range(len(img_list))]
    #torch.cat(images, dim=0)会将这些张量沿着第一个维度（即batch维度）拼接起来，
    # 形成一个形状为(N, C, H, W)的张量，其中N是images列表中图像的总数
    imgs = torch.cat(images)

    model.create(imgs, data_list)
