import os
from PIL import Image
import numpy as np
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import matplotlib.pyplot as plt
import torch
import random
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore', category=UserWarning)

from utils.common import check_and_create_directory, list_all_files, chunk_list
from utils.auto_sam import auto_sam
from utils.vit.vit import vit_encode


if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='Process some paths.')
    parser.add_argument('--mode', type=str, required=True, help='Mode (train/val)')
    args = parser.parse_args()

    mode = args.mode
    input_path = '/home/zry/datasets/building/%s/rgbs' % mode
    output_path = '/home/zry/datasets/building/%s/seg' % mode
    tmp_path = '/home/zry/datasets/building/%s/tmp' % mode

    check_and_create_directory(output_path)
    batch_size = 128
    device = "cuda:0"

    image_paths = list_all_files(input_path, '.jpg')
    pth_paths = list_all_files(output_path, '.pth')
    check_idx = len(pth_paths)


    for idx, path in tqdm(enumerate(image_paths), total=len(image_paths)):
        # x, y, width, height = [int(i) for i  in mask['bbox']]
        # cropped_image = output_image[y:y+height, x:x+width]
        # cimage = Image.fromarray(cropped_image)
        if idx < check_idx: continue

        image = Image.open(path)
        image_array = np.array(image)

        image_label = os.path.split(path)[-1]
        pt_path  = os.path.join(tmp_path, '%s.pt' % image_label)
        # 找到这张图所对应的
        pt = torch.load(pt_path)
        
        # 遍历每一个子图
        n = int(pt.max().item())
        batch = []
        for i in range(n):
            fi_base = pt_path.replace('.pt', '')
            fi = os.path.join(tmp_path, '%s.%d.bbox' % (fi_base, i))
            bbox = torch.load(fi)
            mask = pt == i
            mask_np = np.expand_dims(mask.numpy(), axis=-1)
            sam_image = image_array * mask_np
            x, y, width, height = [int(i) for i  in bbox]
            cropped_image = sam_image[y:y+height, x:x+width]
            batch.append(Image.fromarray(cropped_image))
        
        # 进行vit计算
        enc = vit_encode(device, batch)
        enc_obj = {
            "base_image": path,
            "enc": enc
        }
        torch.save(enc_obj, os.path.join(output_path, f'vit_obj_%d.pth'%(idx)))




    # seg_batchs = chunk_list(image_paths, batch_size)

    # # pth编号是从0开始，但是没啥实际意义
    # nidx = len(list_all_files(tmp_path, '.pth'))

    # for _, batch in tqdm(enumerate(seg_batchs), total=len(seg_batchs)):
    #     enc = vit_encode(device, batch)
    #     enc_obj = {
    #         "seg_images": batch,
    #         "enc": enc
    #     }
    #     torch.save(enc_obj, os.path.join(tmp_path, f'vit_obj_%d.pth'%(nidx)))
    #     nidx += 1
        
    # print('You can contine run `python tsne.main.py`.')








    
