# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import numpy as np
import cv2
import os
import json
from tqdm import tqdm
from PIL import Image
import warnings
from prettytable import PrettyTable
from collections import OrderedDict

import torch

from mmseg.utils.sam_tools import get_sam_model, gen_auto_mask, gen_prompt_mask
from mmseg.datasets.builder import DATASETS
from mmseg.datasets.custom import CustomDataset
from mmseg.core import eval_metrics, intersect_and_union, pre_eval_to_metrics

from mmseg.datasets.utils import pixel_selection, region_selection, test_selection
from mmseg.datasets.utils.spatial_purity import SpatialPurity
from mmseg.datasets.utils.floating_region import FloatingRegionScore
import mmcv
from mmcv.image import tensor2imgs


# from .utils import pixel_selection, region_selection
# from .utils.spatial_purity import SpatialPurity
# from .utils.floating_region import FloatingRegionScore


@DATASETS.register_module()
class PVDataset_forAdap(CustomDataset):
    """Potsdam and Vaihingen dataset for Domain adaptation.

    Args:
        split (str): Split txt file for domain A of Potsdam and Vaihingen dataset .
    """

    CLASSES = ('impervious_surface', 'building', 'low_vegetation', 'tree',
               'car', 'clutter')

    PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
               [255, 255, 0], [255, 0, 0]]

    def __init__(self,
                 split,
                 B_split=None,
                 B_img_dir=None,
                 B_img_suffix='.png',
                 B_ann_dir=None,
                 B_seg_map_suffix='.png',
                 B_coco_mask_dir=None,
                 ada_args=None,
                 **kwargs):
        super(PVDataset_forAdap, self).__init__(
            img_suffix='.png', seg_map_suffix='.png', reduce_zero_label=True, split=split, **kwargs)
        assert osp.exists(self.img_dir) and self.split is not None

        self.B_img_dir = B_img_dir
        self.B_img_suffix = B_img_suffix
        self.B_ann_dir = B_ann_dir
        self.B_seg_map_suffix = B_seg_map_suffix
        self.B_split = B_split
        self.B_coco_mask_dir = B_coco_mask_dir
        if B_coco_mask_dir is not None and self.data_root not in self.B_coco_mask_dir:
            self.B_coco_mask_dir = osp.join(self.data_root, B_coco_mask_dir)

        # join paths if data_root is specified
        if self.B_img_dir is not None:
            if not osp.isabs(self.B_img_dir):
                self.B_img_dir = osp.join(self.data_root, self.B_img_dir)
            if not (self.B_ann_dir is None or osp.isabs(self.B_ann_dir)):
                self.B_ann_dir = osp.join(self.data_root, self.B_ann_dir)
            if not (self.B_split is None or osp.isabs(self.B_split)):
                self.B_split = osp.join(self.data_root, self.B_split)
            # load annotations
            self.B_img_infos = self.load_annotations(self.B_img_dir, self.B_img_suffix,
                                                     self.B_ann_dir,
                                                     self.B_seg_map_suffix, self.B_split)
        else:
            self.B_img_infos = None

        self.ada_args = ada_args
        if self.ada_args is not None:
            assert hasattr(self.ada_args, 'active_api'), ''
            if self.ada_args['active_api'] == 'write':
                assert self.B_ann_dir is None, ''
                self._init_ADA_mask(self.ann_dir, self.img_infos)
                self._prepare_ada_args(self.ada_args)
                self.ada_args['active_style'] = 'write'
            elif self.ada_args['active_api'] == 'read':
                self._init_ADA_mask(self.B_ann_dir, self.B_img_infos)

    def _init_ADA_mask(self, root_dir, img_infos):

        save_dir = self.ada_args['save_dir']
        active_mask_dir = os.path.join(save_dir, 'active_mask')
        active_indicator_dir = os.path.join(save_dir, 'active_indicator')
        self.active_mask_dir = active_mask_dir
        self.active_indicator_dir = active_indicator_dir
        os.makedirs(active_mask_dir, exist_ok=True)
        os.makedirs(active_indicator_dir, exist_ok=True)

        for img_info in tqdm(img_infos, desc='init ada mask'):
            filepath = os.path.join(root_dir, img_info['filename'])
            if self.ada_args['active_api'] == 'write':
                img = Image.open(filepath).convert('RGB')
                h, w = img.size[1], img.size[0]
                mask = np.ones((h, w), dtype=np.uint8) * 255
                mask = Image.fromarray(mask)
                indicator = {'active': torch.tensor([0], dtype=torch.bool), 'selected': torch.tensor([0], dtype=torch.bool), }
                mask.save(os.path.join(active_mask_dir, img_info['filename'].split('.')[0] + '.png'))
                torch.save(indicator, os.path.join(active_indicator_dir, img_info['filename'].split('.')[0] + '.pth'))
            img_info['active_mask_path'] = os.path.join(active_mask_dir, img_info['filename'].split('.')[0] + '.png')
            img_info['active_indicator_path'] = os.path.join(active_indicator_dir, img_info['filename'].split('.')[0] + '.pth')

    def _prepare_ada_args(self, ada_kargs):

        if ada_kargs is not None:
            print('************\n' * 5, '\t========\tstart prepare ada args\t========\t')
            if self.ada_args['mode'] == 'PA':
                assert 'pixels' in self.ada_args
                self.pa_select_fn = SpatialPurity(in_channels=ada_kargs['num_classes'], size=2 * ada_kargs['radius'] + 1).cuda()
                if self.ada_args['sample_way'] == 'batch':
                    self.pre_pixels = np.array(self.ada_args['pixels'], dtype=np.int32)
                elif self.ada_args['sample_way'] == 'total':
                    self.pre_pixels = max(1, np.ceil(self.ada_args['pixels'] / self.ada_args['sample_num']).astype(np.int32))
                else:
                    raise ValueError('invalid ada_args: sample_way, is {} should in [batch, total]'.format(self.ada_args['sample_way']))
                print('kernel size:{},\tsample pixels:{},\ttotal num:{}'.format(ada_kargs['radius'], self.pre_pixels, self.pre_pixels * self.ada_args['sample_num']))
            elif self.ada_args['mode'] == 'RA':
                assert 'ratio' in self.ada_args
                self.active_ratio = self.ada_args['ratio']
                self.ra_select_fn = FloatingRegionScore(in_channels=ada_kargs['num_classes'], size=2 * ada_kargs['radius'] + 1).cuda()
                if self.ada_args['sample_way'] == 'batch':
                    self.pre_ratio = self.ada_args['ratio']
                elif self.ada_args['sample_way'] == 'total':
                    self.pre_ratio = self.ada_args['ratio'] / self.ada_args['sample_num']
                else:
                    raise ValueError('invalid ada_args: sample_way, is {} should in [batch, total]'.format(self.ada_args['sample_way']))
                print('kernel size:{},\tsample ratio:{:.4f},\ttotal ratio:{:.4f}'.format(ada_kargs['radius'], self.pre_ratio, self.pre_ratio * self.ada_args['sample_num']))
            elif self.ada_args['mode'] == 'TEST':
                assert 'ratio' in self.ada_args
                self.active_ratio = self.ada_args['ratio']
                self.test_select_fn = FloatingRegionScore(in_channels=ada_kargs['num_classes'], size=2 * ada_kargs['radius'] + 1).cuda()
                if self.ada_args['sample_way'] == 'batch':
                    self.pre_ratio = self.ada_args['ratio']
                elif self.ada_args['sample_way'] == 'total':
                    self.pre_ratio = self.ada_args['ratio'] / self.ada_args['sample_num']
                else:
                    raise ValueError('invalid ada_args: sample_way, is {} should in [batch, total]'.format(self.ada_args['sample_way']))
                print('kernel size:{},\tsample ratio:{:.4f},\ttotal ratio:{:.4f}'.format(ada_kargs['radius'], self.pre_ratio, self.pre_ratio * self.ada_args['sample_num']))
            else:
                raise ValueError('Unsupported adaptation mode: {}'.format(self.ada_args['mode']))
            print('\t========\tover prepare ada args\t========\t', '\n************' * 5, )
            # raise None

    def get_ann_info_B(self, idx):
        """Get annotation by index.

        Args:
            idx (int): Index of data.

        Returns:
            dict: Annotation info of specified index.
        """

        return self.B_img_infos[idx]['ann']

    def prepare_test_img(self, idx):
        """Get testing data after pipeline.

        Args:
            idx (int): Index of data.

        Returns:
            dict: Testing data after pipeline with new keys introduced by
                pipeline.
        """
        img_info = self.img_infos[idx]
        ann_info = self.get_ann_info(idx)
        results = dict(img_info=img_info, ann_info=ann_info)
        self.pre_pipeline(results)
        return self.pipeline(results)

    def prepare_train_img(self, idx):
        """Get training data and annotations after pipeline.
        Args:
            idx (int): Index of data.
        Returns:
            dict: Training data and annotation after pipeline with new keys
                introduced by pipeline.
        """
        img_info = self.img_infos[idx]
        ann_info = self.get_ann_info(idx)
        assert len(self.B_img_infos) > 0
        idx_b = np.random.randint(0, len(self.B_img_infos))
        B_img_info = self.B_img_infos[idx_b]
        results = dict(img_info=img_info, ann_info=ann_info, B_img_info=B_img_info)
        if self.B_ann_dir is not None:
            results.update({'B_ann_info': self.get_ann_info_B(idx_b)})
        self.pre_pipeline(results)
        return self.pipeline(results)

    def pre_pipeline(self, results):
        """Prepare results dict for pipeline."""
        results['seg_fields'] = []
        results['img_prefix'] = self.img_dir
        results['seg_prefix'] = self.ann_dir
        if not self.test_mode:
            results['B_img_prefix'] = self.B_img_dir
        if self.custom_classes:
            results['label_map'] = self.label_map
        results['B_img_prefix'] = self.B_img_dir
        if self.B_ann_dir is not None:
            results['B_seg_prefix'] = self.B_ann_dir
        if self.B_coco_mask_dir is not None:
            results['B_coco_mask_prefix'] = self.B_coco_mask_dir

    @staticmethod
    def get_sam_auto_mask(img_dir, img_infos, save_dir, model_type=None, checkpoint_path=None):
        os.makedirs(save_dir, exist_ok=True)
        mask_generator = get_sam_model(model_type=model_type, checkpoint_path=checkpoint_path, return_type='mask_generator', cuda=True)
        for img_info in tqdm(img_infos, total=len(img_infos)):
            filename = img_info['filename']
            filepath = os.path.join(img_dir, filename)
            print(f"Processing '{filepath}'...")
            image = cv2.imread(filepath)
            if image is None:
                print(f"Could not load '{filename}' as an image, skipping...")
                raise ValueError(f"Could not load '{filename}' as an image")
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            masks = gen_auto_mask(image, mask_generator=mask_generator)
            with open(os.path.join(save_dir, filename.split('.')[0] + '.json'), 'w') as f:
                json.dump(masks, f, indent=2)
        print(f'sam masks generator done！')

    def ada_pre_active(self, index):

        filename = self.img_infos[index]['filename']

        active_mask_path = self.img_infos[index]['active_mask_path']
        active_indicator_path = self.img_infos[index]['active_indicator_path']  # 存储哪些区域被访问过
        active_mask = np.array(Image.open(active_mask_path), dtype=np.uint8)  # 存储哪些区域被挑选过

        active_mask = torch.from_numpy(active_mask).long()

        indicator = torch.load(active_indicator_path)
        active_indicator = indicator['active']
        active_selected = indicator['selected']
        # if first time load, initialize it
        if active_indicator.size() == (1,):
            active_indicator = torch.zeros_like(active_mask, dtype=torch.bool)
            active_selected = torch.zeros_like(active_mask, dtype=torch.bool)

        active_mask = active_mask.cuda(non_blocking=True)
        active_indicator = active_indicator.cuda(non_blocking=True)
        active_selected = active_selected.cuda(non_blocking=True)

        return filename, active_mask, active_indicator, active_selected

    def active_fn(self, logits, ground_truth, indices):
        assert len(logits.shape) == 4
        for logit, index in zip(logits, indices):
            filename, active_mask, active_indicator, active_selected = self.ada_pre_active(index)
            mask_radius = self.ada_args['radius']
            num_classes = self.ada_args['num_classes']
            if self.ada_args['mode'] == 'PA':
                assert 'pixels' in self.ada_args
                select_fn = None if hasattr(self, 'pa_select_fn') else self.pa_select_fn
                active_mask, active_region, active_selected = pixel_selection(
                    logit, active_mask, active_indicator, active_selected, ground_truth.squeeze(), num_classes=num_classes, active_pixels=self.pre_pixels, mask_radius=mask_radius, select_fn=select_fn)
            elif self.ada_args['mode'] == 'RA':
                assert 'ratio' in self.ada_args
                select_fn = None if hasattr(self, 'ra_select_fn') else self.ra_select_fn
                active_mask, active_region, active_selected = region_selection(
                    logit, active_mask, active_indicator, active_selected, ground_truth.squeeze(), num_classes=num_classes, active_ratio=self.pre_ratio, mask_radius=mask_radius, select_fn=select_fn)
            elif self.ada_args['mode'] == 'TEST':
                assert 'ratio' in self.ada_args
                select_fn = None if hasattr(self, 'test_select_fn') else self.test_select_fn
                active_mask, active_region, active_selected = test_selection(
                    logit, active_mask, active_indicator, active_selected, ground_truth.squeeze(), num_classes=num_classes, active_ratio=self.pre_ratio, mask_radius=mask_radius, select_fn=select_fn)
            else:
                raise ValueError('Invalid mode for adaptive evaluation')

            active_mask = Image.fromarray(np.array(active_mask.cpu().numpy(), dtype=np.uint8))
            active_mask.save(self.img_infos[index]['active_mask_path'])
            indicator = {
                'active': active_indicator,
                'selected': active_selected
            }
            torch.save(indicator, self.img_infos[index]['active_indicator_path'])
        return

    def show_data(self,
                  item,
                  palette=None,
                  win_name='',
                  wait_time=0,
                  show_dir=None,
                  opacity=0.5):
        """Draw `result` over `img`.

        Args:
            img (str or Tensor): The image to be displayed.
            result (Tensor): The semantic segmentation results to draw over
                `img`.
            palette (list[list[int]]] | np.ndarray | None): The palette of
                segmentation map. If None is given, random palette will be
                generated. Default: None
            win_name (str): The window name.
            wait_time (int): Value of waitKey param.
                Default: 0.
            show (bool): Whether to show the image.
                Default: False.
            out_file (str or None): The filename to write the image.
                Default: None.
            opacity(float): Opacity of painted segmentation map.
                Default 0.5.
                Must be in (0, 1] range.
        Returns:
            img (Tensor): Only if not `show` or `out_file`
        """
        img = item['img'].data
        seg = item['gt_semantic_seg'].data.numpy()[0]
        name = item['img_metas'].data['ori_filename']
        if isinstance(img, torch.Tensor):
            img = tensor2imgs(img.unsqueeze(0), **item['img_metas'].data['img_norm_cfg'])[0]

        img = mmcv.imread(img)
        img = img.copy()
        # seg = label
        if palette is None:
            if self.PALETTE is None:
                # Get random state before set seed,
                # and restore random state later.
                # It will prevent loss of randomness, as the palette
                # may be different in each iteration if not specified.
                # See: https://github.com/open-mmlab/mmdetection/issues/5844
                state = np.random.get_state()
                np.random.seed(42)
                # random palette
                palette = np.random.randint(
                    0, 255, size=(len(self.CLASSES), 3))
                np.random.set_state(state)
            else:
                palette = self.PALETTE
        palette = np.array(palette)
        assert palette.shape[0] == len(self.CLASSES)
        assert palette.shape[1] == 3
        assert len(palette.shape) == 2
        assert 0 < opacity <= 1.0
        color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
        for label, color in enumerate(palette):
            color_seg[seg == label, :] = color
        # convert to BGR
        color_seg = color_seg[..., ::-1]

        img = img * (1 - opacity) + color_seg * opacity
        img = img.astype(np.uint8)

        if show_dir is not None:
            mmcv.imwrite(img, osp.join(show_dir, name))
        else:
            warnings.warn(' show_dir is not specified, only '
                          'result image will be returned')
            return img


# DATASETS.register_module(name='PVDataset_forAdap', module=PVDataset_forAdap)

if __name__ == '__main__':
    from mmseg.datasets import build_dataset
    from mmengine import Config
    from mmseg.utils.sam_tools import visualize_img_, visualize_masks
    from mmseg.utils.sam_tools import RandomMaskSampler, gen_comb_mask
    from mmseg.models.utils.CowMask import gen_cow_mask, gen_patch_mask, gen_mix_data

    """viz auto dataset augmentation"""
    # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_PotsdamRGB2Vaihingen.py'
    config_path = f'./experiments/deeplabv3plus/ada_base/deeplabv3plus_r50-d8_4x4_512x512_40k_PotsdamRGB2Vaihingen.py'
    # config_path = f'./experiments/deeplabv3plus/ada_base/deeplabv3plus_r50-d8_4x4_512x512_40k_Potsdam2Vaihingen.py'
    # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_Potsdam2Vaihingen.py'
    # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_Vaihingen2Potsdam.py'

    cfg = Config.fromfile(config_path)
    print(cfg)
    DATASETS.register_module(name='PVDataset_forAdap', module=PVDataset_forAdap)

    # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Potsdam_IRRG_DA/viz_dir/train'
    # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/viz_dir/train'
    cfg.data.train.pipeline = cfg.data.viz_pipeline
    dataset = build_dataset(cfg.data.train)

    # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/viz_dir/val'
    # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Potsdam_IRRG_DA/viz_dir/val'
    # cfg.data.val.pipeline = cfg.data.viz_pipeline
    # dataset = build_dataset(cfg.data.val,dict(test_mode=True))

    items_ori = dataset.__getitem__(120)
    # items_ori = dataset.__getitem__(120)
    tensor2imgs(items_ori['img'].data.unsqueeze(0), **items_ori['img_metas'].data['img_norm_cfg'])[0]
    img_A = tensor2imgs(items_ori['img'].data.unsqueeze(0), **items_ori['img_metas'].data['img_norm_cfg'])[0][:, :, ::-1]
    gt_A = items_ori['gt_semantic_seg'].data.cpu().numpy().squeeze()


    # save_dir = './viz_dir/data_aug_by_mask_PotsdamRGB'
    save_dir = './viz_dir/data_aug_by_mask_Potsdam'
    # save_dir = './viz_dir/data_aug_by_mask_Vaihingen'
    visualize_img_(img_A, save_path=os.path.join(save_dir, 'ori_imgA.png'))
    visualize_img_(gt_A, palette=dataset.PALETTE, save_path=os.path.join(save_dir, 'ori_gtA.png'))
    num = 5
    for index in tqdm(range(num)):
        items = dataset.__getitem__(index)
        # img_A = items['img']
        # img_B = items['B_img'].data.cpu().numpy().transpose((1,2,0))[:,:,::-1]
        img_B = tensor2imgs(items['B_img'].data.unsqueeze(0), **items_ori['img_metas'].data['img_norm_cfg'])[0][:, :, ::-1]
        auto_mask = items['B_auto_mask']

        auto_mask_img = np.stack([auto_mask, auto_mask, auto_mask], axis=-1)
        patch_mask = gen_patch_mask(items['img'].data).numpy().transpose((1, 2, 0))[:, :, :1].repeat(3, -1)
        cow_mask = (gen_cow_mask(items['img'].data).numpy().transpose((1, 2, 0)) > 0.5)[:, :, :1].astype('float').repeat(3, -1)
        # gt_A = items['gt_semantic_seg']

        gt_B = items['B_gt_semantic_seg'].data.cpu().numpy().squeeze()


        visualize_img_(img_B,save_path=os.path.join(save_dir, f'imgB{index}.png'))
        visualize_img_(gt_B,save_path=os.path.join(save_dir, f'gtB{index}.png'),palette=dataset.PALETTE)
        # visualize_img_(auto_mask_img,save_path=os.path.join(save_dir, f'imgB{index}.png'))

        img_mix = (1 - auto_mask_img) * img_A + auto_mask_img * img_B
        gt_mix = (1 - auto_mask_img[:, :, 0]) * gt_A + auto_mask_img[:, :, 0] * gt_B
        visualize_img_(img_mix.astype('uint8'), save_path=os.path.join(save_dir, f'ins_mask_mix_img{index}.png'))
        visualize_img_(gt_mix, palette=dataset.PALETTE, save_path=os.path.join(save_dir, f'ins_mask_mix_gt{index}.png'))


        img_mix = (1 - patch_mask) * img_A + patch_mask * img_B
        gt_mix = (1 - patch_mask[:, :, 0]) * gt_A + patch_mask[:, :, 0] * gt_B
        visualize_img_(img_mix.astype('uint8'), save_path=os.path.join(save_dir, f'patch_mask_mix_img{index}.png'))
        visualize_img_(gt_mix, palette=dataset.PALETTE, save_path=os.path.join(save_dir, f'patch_mask_mix_gt{index}.png'))

        img_mix = (1 - cow_mask) * img_A + cow_mask * img_B
        gt_mix = (1 - cow_mask[:, :, 0]) * gt_A + cow_mask[:, :, 0] * gt_B
        visualize_img_(img_mix.astype('uint8'), save_path=os.path.join(save_dir, f'cow_mask_mix_img{index}.png'))
        visualize_img_(gt_mix, palette=dataset.PALETTE, save_path=os.path.join(save_dir, f'cow_mask_mix_gt{index}.png'))

    """viz dataset label setting"""
    # # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_Potsdam2Vaihingen.py'
    # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_Vaihingen2Potsdam.py'
    # cfg = Config.fromfile(config_path)
    # print(cfg)
    # DATASETS.register_module(name='PVDataset_forAdap', module=PVDataset_forAdap)
    #
    # # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Potsdam_IRRG_DA/viz_dir/train'
    # # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/viz_dir/train'
    # # cfg.data.train.pipeline = cfg.data.viz_pipeline
    # # dataset = build_dataset(cfg.data.train)
    #
    # # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Vaihingen_IRRG_DA/viz_dir/val'
    # show_dir = r'/data/yrz/repos/ST-DASegNet/data/Potsdam_IRRG_DA/viz_dir/val'
    # cfg.data.val.pipeline = cfg.data.viz_pipeline
    # dataset = build_dataset(cfg.data.val,dict(test_mode=True))
    #
    # for index in tqdm(range(dataset.__len__())):
    #     items = dataset.__getitem__(index)
    #     dataset.show_data(items, show_dir=show_dir, opacity=1)

    """ada setting"""

    # config_path = f'./experiments/deeplabv3plus/ada_base/deeplabv3plus_r50-d8_4x4_512x512_40k_Potsdam2Vaihingen.py'
    # cfg = Config.fromfile(config_path)
    # print(cfg)
    # DATASETS.register_module(name='PVDataset_forAdap', module=PVDataset_forAdap)
    # # dataset_ada = build_dataset(cfg.data.ada)
    # dataset = build_dataset(cfg.data.train)
    #
    # for index in range(dataset.__len__()):
    #     items = dataset.__getitem__(index)

    """da setting"""

    # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_PotsdamRGB2Vaihingen.py'
    # # config_path = f'./experiments/deeplabv3plus/base/deeplabv3plus_r50-d8_4x4_512x512_40k_Vaihingen2PotsdamRGB.py'
    # cfg = Config.fromfile(config_path)
    # DATASETS.register_module(name='PVDataset_forAdap', module=PVDataset_forAdap) # 需要注释掉前面的注册函数部分
    # # cfg.data.train=0
    # dataset = build_dataset(cfg.data.train)
    # img_dir = dataset.B_img_dir
    # img_infos = dataset.B_img_infos
    # save_dir = img_dir.replace('img_dir', 'auto_mask_dir')
    # print("saving dir is %s" % save_dir)
    # model_type = 'vit_h'
    # checkpoint_path = '/data/yrz/pretrained/sam_vit_h_4b8939.pth'
    #
    # # 可视化一下
    # # for idx in range(dataset.__len__()):
    # #     item = dataset.__getitem__(idx)
    # #     # visualize_img_(item['B_gt_semantic_seg'].data.cpu().numpy())
    # #     item['B_auto_mask'][item['B_auto_mask'] == 255.] = 0.
    # #     visualize_img_(item['B_auto_mask'])
    # #     print(np.unique(item['B_auto_mask']))
    # #     if np.max(item['B_auto_mask']) > 2:
    # #         raise ValueError(item['B_auto_mask'])
    #
    # dataset.get_sam_auto_mask(img_dir,img_infos,save_dir,model_type,checkpoint_path)
