import os
from PIL import Image
import numpy as np
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import matplotlib.pyplot as plt
import torch
import random
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore', category=UserWarning)

from utils.common import check_and_create_directory, list_all_files, chunk_list
from utils.auto_sam import auto_sam
from utils.vit.vit import vit_encode

if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='Process some paths.')
    parser.add_argument('--mode', type=str, required=True, help='Mode (train/val)')
    args = parser.parse_args()

    mode = args.mode
    input_path = '/home/zry/datasets/building/%s/rgbs' % mode
    output_path = '/home/zry/datasets/building/%s/seg' % mode
    tmp_path = '/home/zry/datasets/building/%s/tmp' % mode
    
    batch_size = 64
    device = "cuda:0"

    # 将每个rgbs对应的标签信息记录下来
    # 注意，对于第192组vit encoding的结果，实际上不足256个，只有62个数据
    seg_images_labels = torch.load('%s/seg_labels.npy' % tmp_path)  # 存储了每个分割出来的图片跟对应的标签
    # seg_images = [item for sublist in seg_images_labels['seg_images'] for item in sublist]
    seg_images = seg_images_labels['seg_images']
    seg_labels = seg_images_labels['labels']
    NONE_LABEL = max(seg_labels) + 1  # 用来标记没有经过分割的部分
    
    sam_image_files = list_all_files(tmp_path, '.pt')
    # 遍历每一个要进行分割的图片，也就是原始的训练数据
    for i, f in tqdm(enumerate(sam_image_files), total = len(sam_image_files)):
        image_name = os.path.split(f)[-1][:-3]
        image_file = os.path.join(input_path, image_name)
        image = Image.open(image_file)
        image = np.array(image)
        class_image = torch.ones(image.shape[:-1]) * NONE_LABEL

        # 遍历每一个分割后的子图，其实主要是访问对应的mask，以便标记其label
        seg_image_pt_path = os.path.join(tmp_path, image_name + '.pt')
        seg_image_pt = torch.load(seg_image_pt_path)
        masks = seg_image_pt
        for i, mask in enumerate(masks):
            mask_image_path = os.path.join(tmp_path, f"%s.%d.jpg"%(image_name, i))
            idx = seg_images.index(mask_image_path)
            label = seg_labels[idx]
            m = mask['segmentation']
            class_image[m] = label
        
        # 保存最后的分割标签图
        torch.save(class_image, os.path.join(output_path, image_name.replace('.jpg', '.pt')))
    
    print('All rgbs segmentation-and-label work have been done.')
    


