import glob
import os
import random

import cv2
import numpy as np
import torch
# import torchvision.transforms.functional as F
import torch.nn.functional as F
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import transforms


class DynamicFARDataset(torch.utils.data.Dataset):
    def __init__(self, config, flist, batch_size, mask_path=None, augment=True, training=True, test_mask_path=None, test_mask_path1=None,world_size=1):
        super(DynamicFARDataset, self).__init__()
        self.config = config
        self.augment = augment
        self.training = training
        self.batch_size = batch_size
        self.mask_rate = config['mask_rate']
        self.round = config['round']  # for places2 round is 64
        self.use_mpe = config['use_mpe']
        self.pos_num = config['rel_pos_num']
        self.default_size = config['default_size']
        if training:
            self.input_size = config['default_size']
        else:
            self.input_size = config['eval_size']
        self.pos_size = config['pos_size']
        self.world_size = world_size

        self.data = []
        if flist.endswith('txt'):
            f = open(flist, 'r')
            for i in f.readlines():
                i = i.strip()
                self.data.append(i)
            f.close()
        else:
            self.data = glob.glob(flist + '/*')
            self.data = sorted(self.data, key=lambda x: x.split('/')[-1])

        if training:
            self.irregular_mask_list = []
            with open(mask_path[0]) as f:
                for line in f:
                    self.irregular_mask_list.append(line.strip())
            self.irregular_mask_list = sorted(self.irregular_mask_list, key=lambda x: x.split('/')[-1])
            self.segment_mask_list = []
            with open(mask_path[1]) as f:
                for line in f:
                    self.segment_mask_list.append(line.strip())
            self.segment_mask_list = sorted(self.segment_mask_list, key=lambda x: x.split('/')[-1])
        else:
            self.mask_list = glob.glob(test_mask_path + '/*')

            # 使用 sorted() 函数进行排序
            def extract_number(file_path):
                # 提取文件名
                file_name = os.path.basename(file_path)
                # 提取文件名中的数字部分（如 '9' 从 '9.jpg' 中提取）
                return int(file_name.split('.')[0])

            # jpg_list_sorted = sorted(jpg_list, key=extract_number)
            self.mask_list = sorted(self.mask_list, key=extract_number)
            self.mask_list1 = glob.glob(test_mask_path1 + '/*')
            self.mask_list1 = sorted(self.mask_list1, key=extract_number)

        self.ones_filter = np.ones((3, 3), dtype=np.float32)
        self.d_filter1 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32)
        self.d_filter2 = np.array([[0, 0, 0], [1, 1, 0], [1, 1, 0]], dtype=np.float32)
        self.d_filter3 = np.array([[0, 1, 1], [0, 1, 1], [0, 0, 0]], dtype=np.float32)
        self.d_filter4 = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], dtype=np.float32)
        self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
        self.transform1 = transforms.Compose(
        [transforms.ToTensor()])
    def reset_dataset(self, shuffled_idx):
        self.idx_map = {}
        barrel_idx = 0
        count = 0
        for sid in shuffled_idx:
            self.idx_map[sid] = barrel_idx
            count += 1
            if count == self.batch_size:
                count = 0
                barrel_idx += 1
        # random img size:256~512
        if self.training:
            barrel_num = int(len(self.data) / (self.batch_size * self.world_size))
            barrel_num += 2
            if self.round == 1:
                self.input_size = np.clip(np.arange(16, 33, step=(33 - 16) / barrel_num * 2).astype(int) * 16, 256, 512).tolist()
                self.input_size = self.input_size[::-1] + self.input_size
            else:
                self.input_size = []
                input_size = np.clip(np.arange(15, 33, step=(33 - 16) / barrel_num * 2 * self.round).astype(int) * 16, 256, 512).tolist()
                for _ in range(self.round + 1):
                    self.input_size.extend(input_size[::-1])
                    self.input_size.extend(input_size)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        item = self.load_item(index)
        return item

    def load_name(self, index):
        name = self.data[index]
        return os.path.basename(name)

    def split(self,tensor, crop_size=(256, 256)):
        batch_size, channels, h, w = tensor.size()
        crop_height, crop_width = crop_size

        crops = []
        positions = []

        for idx in range(batch_size):
            for i in range(0, h, crop_height):
                for j in range(0, w, crop_width):
                    # 计算裁剪区域的右下角坐标
                    end_h = min(i + crop_height, h)
                    end_w = min(j + crop_width, w)

                    # 计算裁剪区域的左上角坐标，如果裁剪窗口超出图像边界，则进行调整
                    start_h = max(0, end_h - crop_height)
                    start_w = max(0, end_w - crop_width)

                    # 裁剪张量并记录位置
                    crop = tensor[idx:idx + 1, :, start_h:end_h, start_w:end_w]

                    crop = torch.einsum('bchw->bhwc', crop).numpy()[0,:,:,0]
                    crop = Image.fromarray(crop.astype('uint8'))
                    crop = self.transform1(crop).unsqueeze(0)
                    crops.append(crop)
                    positions.append((idx, start_h, start_w))

        # 将所有裁剪的部分拼接为一个张量
        crops_tensor = torch.cat(crops, dim=0)

        return crops_tensor, positions

    def split1(self,tensor, crop_size=(256, 256)):
        batch_size, channels, h, w = tensor.size()
        crop_height, crop_width = crop_size

        crops = []
        crops2 = []
        positions = []

        for idx in range(batch_size):
            for i in range(0, h, crop_height):
                for j in range(0, w, crop_width):
                    # 计算裁剪区域的右下角坐标
                    end_h = min(i + crop_height, h)
                    end_w = min(j + crop_width, w)

                    # 计算裁剪区域的左上角坐标，如果裁剪窗口超出图像边界，则进行调整
                    start_h = max(0, end_h - crop_height)
                    start_w = max(0, end_w - crop_width)

                    # 裁剪张量并记录位置
                    crop = tensor[idx:idx + 1, :, start_h:end_h, start_w:end_w]
                    crop=torch.einsum('bchw->bhwc',crop).numpy()[0]
                    crop = Image.fromarray(crop.astype('uint8'))
                    crop1 = self.transform(crop).unsqueeze(0)
                    crops.append(crop1)
                    crop2 = self.transform1(crop).unsqueeze(0)
                    crops2.append(crop2)
                    positions.append((idx, start_h, start_w))

        # 将所有裁剪的部分拼接为一个张量
        crops_tensor = torch.cat(crops, dim=0)
        crops_tensor_image = torch.cat(crops2, dim=0)

        return crops_tensor_image,crops_tensor, positions

    def load_item(self, index):
        if type(self.input_size) == list:
            maped_idx = self.idx_map[index]
            if maped_idx > len(self.input_size) - 1:
                size = 512
            else:
                size = self.input_size[maped_idx]
        else:
            size = self.input_size
        self.feat_size = size / 16 # upsampled MAE feature size

        # load image
        img = cv2.imread(self.data[index])
        A,B,_=img.shape
        if self.training:
            p1 = self.data[index].replace('VVVV','IIII').replace('VIS', 'IR')
        else:
            p1=self.data[index].replace('VVVV','IIII').replace('VIS', 'IR')
        img1 = cv2.imread(p1)
        print(p1)
        while img is None:
            print('Bad image {}...'.format(self.data[index]))
            idx = random.randint(0, len(self.data) - 1)
            img = cv2.imread(self.data[idx])
        # origin_h, origin_w = img.shape[:2]
        img = img[:, :, ::-1]
        # resize/crop if needed
        h=430
        w=320
        # img = self.resize(img, h, w)

        scale_percent = 256/448

        # 计算新的宽度和高度
        new_width = int(B * scale_percent)
        new_height = int(A * scale_percent)
        if new_width<256:
            rate=259/new_width
            new_width=259
            new_height=int(new_height*rate)
        if new_height<256:
            rate = 259 / new_height
            new_height = 259
            new_width = int(new_width * rate)
        # 使用 cv2.resize 进行等比例缩小
        img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_AREA)
        img_copy=img.copy()


        img=torch.Tensor(img.copy())
        img=torch.unsqueeze(img,0)
        img=torch.einsum('bhwc->bchw',img)
        img,img_256,position=self.split1(img)


        img1 = img1[:, :, ::-1]
        img1 = cv2.resize(img1, (new_width, new_height), interpolation=cv2.INTER_AREA)
        # img1 = self.resize(img1, h, w)
        img1 = torch.Tensor(img1.copy())
        img1 = torch.unsqueeze(img1, 0)
        img1 = torch.einsum('bhwc->bchw', img1)
        img1, img1_256, position1 = self.split1(img1)

        mask = self.load_mask(img_copy, index)
        mask_256 = mask  # cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)

        mask1 = self.load_mask1(img_copy, index)
        mask1_256 = mask1  # cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)


        batch = dict()
        batch['image'] = img#self.to_tensor(img1)
        batch['img_256'] = img_256
        batch['mask'] = mask #self.to_tensor(mask1)
        batch['mask_256'] = mask_256 #self.to_tensor(mask1_256)

        batch['image1'] = img1 #self.to_tensor(img)
        batch['img1_256'] = img1_256
        batch['mask1'] = mask1 #self.to_tensor(mask)
        batch['mask1_256'] =mask1_256 #self.to_tensor(mask_256)

        batch['size_ratio'] = size / self.default_size # 1
        batch['name'] = self.load_name(index)
        batch['feat_size'] = [self.feat_size] #16
        batch['position']=position
        batch['new_width']=new_width
        batch['new_height']=new_height
        if self.use_mpe:
            # load pos encoding
            for i in range(mask.shape[0]):
                rel_pos, abs_pos, direct = self.load_masked_position_encoding(mask_256[i][0].numpy())
                if i==0:
                    batch['rel_pos'] = torch.LongTensor(rel_pos)
                    batch['abs_pos'] = torch.LongTensor(abs_pos)
                    batch['direct'] = torch.LongTensor(direct)
                elif i==1:
                    batch['rel_pos']=torch.stack([batch['rel_pos'],torch.LongTensor(rel_pos)],dim=0)
                    batch['abs_pos'] = torch.stack([batch['abs_pos'], torch.LongTensor(abs_pos)], dim=0)
                    batch['direct'] = torch.stack([batch['direct'], torch.LongTensor(direct)], dim=0)
                else:
                    batch['rel_pos'] = torch.cat([batch['rel_pos'], torch.LongTensor(rel_pos).unsqueeze(0)], dim=0)
                    batch['abs_pos'] = torch.cat([batch['abs_pos'], torch.LongTensor(abs_pos).unsqueeze(0)], dim=0)
                    batch['direct'] = torch.cat([batch['direct'], torch.LongTensor(direct).unsqueeze(0)], dim=0)
        return batch

    def make_coord(self, shape, ranges=None, flatten=True):
        """ Make coordinates at grid centers.
        """
        coord_seqs = []
        for i, n in enumerate(shape):
            if ranges is None:
                v0, v1 = -1, 1
            else:
                v0, v1 = ranges[i]
            r = (v1 - v0) / (2 * n)
            seq = v0 + r + (2 * r) * torch.arange(n).float()
            coord_seqs.append(seq)
        ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
        if flatten:
            ret = ret.view(-1, ret.shape[-1])
        return ret

    def load_masked_position_encoding(self, mask):
        ori_mask = mask.copy()
        ori_h, ori_w = ori_mask.shape[0:2]
        ori_mask = ori_mask / 255
        mask = cv2.resize(mask, (self.pos_size, self.pos_size), interpolation=cv2.INTER_AREA)
        mask[mask > 0] = 255
        h, w = mask.shape[0:2]
        mask3 = mask.copy()
        mask3 = 1. - (mask3 / 255.0)
        pos = np.zeros((h, w), dtype=np.int32)
        direct = np.zeros((h, w, 4), dtype=np.int32)
        i = 0
        while np.sum(1 - mask3) > 0:
            i += 1
            mask3_ = cv2.filter2D(mask3, -1, self.ones_filter)
            mask3_[mask3_ > 0] = 1
            sub_mask = mask3_ - mask3
            pos[sub_mask == 1] = i

            m = cv2.filter2D(mask3, -1, self.d_filter1)
            m[m > 0] = 1
            m = m - mask3
            direct[m == 1, 0] = 1

            m = cv2.filter2D(mask3, -1, self.d_filter2)
            m[m > 0] = 1
            m = m - mask3
            direct[m == 1, 1] = 1

            m = cv2.filter2D(mask3, -1, self.d_filter3)
            m[m > 0] = 1
            m = m - mask3
            direct[m == 1, 2] = 1

            m = cv2.filter2D(mask3, -1, self.d_filter4)
            m[m > 0] = 1
            m = m - mask3
            direct[m == 1, 3] = 1

            mask3 = mask3_

        abs_pos = pos.copy()
        rel_pos = pos / (self.pos_size / 2)  # to 0~1 maybe larger than 1
        rel_pos = (rel_pos * self.pos_num).astype(np.int32)
        rel_pos = np.clip(rel_pos, 0, self.pos_num - 1)

        if ori_w != w or ori_h != h:
            rel_pos = cv2.resize(rel_pos, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
            rel_pos[ori_mask == 0] = 0
            direct = cv2.resize(direct, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
            direct[ori_mask == 0, :] = 0

        return rel_pos, abs_pos, direct

    def load_mask(self, img, index):
        imgh, imgw,c = img.shape
        if self.training is False:
            mask = cv2.imread(self.mask_list[index % len(self.mask_list)], cv2.IMREAD_GRAYSCALE)
            mask = cv2.resize(mask, (imgw, imgh), interpolation=cv2.INTER_NEAREST)
            mask = (mask > 127).astype(np.uint8) * 255
            # cv2.imwrite('222.jpg',mask)
            img = torch.Tensor(mask.copy())
            img = img.unsqueeze(0).unsqueeze(0)
            # img = torch.einsum('bhwc->bchw', img)
            img, position = self.split(img)

            return img
        else:  # train mode: 40% mask with random brush, 40% mask with coco mask, 20% with additions
            rdv = random.random()
            if rdv < self.mask_rate[0]:
                mask_index = random.randint(0, len(self.irregular_mask_list) - 1)
                mask = cv2.imread(self.irregular_mask_list[mask_index],
                                  cv2.IMREAD_GRAYSCALE)
            elif rdv < self.mask_rate[1]:
                mask_index = random.randint(0, len(self.segment_mask_list) - 1)
                mask = cv2.imread(self.segment_mask_list[mask_index],
                                  cv2.IMREAD_GRAYSCALE)
            else:
                mask_index1 = random.randint(0, len(self.segment_mask_list) - 1)
                mask_index2 = random.randint(0, len(self.irregular_mask_list) - 1)
                mask1 = cv2.imread(self.segment_mask_list[mask_index1],
                                   cv2.IMREAD_GRAYSCALE).astype(np.float)
                mask2 = cv2.imread(self.irregular_mask_list[mask_index2],
                                   cv2.IMREAD_GRAYSCALE).astype(np.float)
                mask = np.clip(mask1 + mask2, 0, 255).astype(np.uint8)

            if mask.shape[0] != imgh or mask.shape[1] != imgw:
                mask = cv2.resize(mask, (imgw, imgh), interpolation=cv2.INTER_NEAREST)
            mask = (mask > 127).astype(np.uint8) * 255  # threshold due to interpolation
            return mask

    def load_mask1(self, img, index):
        imgh, imgw,c = img.shape
        if self.training is False:
            print(self.mask_list1[index % len(self.mask_list1)])
            mask=cv2.imread(self.mask_list1[index % len(self.mask_list1)], cv2.IMREAD_GRAYSCALE)
            # p2='/opt/data/private/yudong/1code/my_pretrain5_6/reviewer/xiao_rong/mae-main/source_mae/source_TNO/mask1/'
            # mask = cv2.imread(p2, cv2.IMREAD_GRAYSCALE)
            mask = cv2.resize(mask, (imgw, imgh), interpolation=cv2.INTER_NEAREST)
            mask = (mask > 127).astype(np.uint8) * 255
            # cv2.imwrite('111.jpg',mask)
            img = torch.Tensor(mask.copy())
            img = img.unsqueeze(0).unsqueeze(0)
            # img = torch.einsum('bhwc->bchw', img)
            img, position = self.split(img)

            return img
        else:  # train mode: 40% mask with random brush, 40% mask with coco mask, 20% with additions
            rdv = random.random()
            if rdv < self.mask_rate[0]:
                mask_index = random.randint(0, len(self.irregular_mask_list) - 1)
                p3=self.irregular_mask_list[mask_index]
                mask = cv2.imread(p3,cv2.IMREAD_GRAYSCALE)
            elif rdv < self.mask_rate[1]:
                mask_index = random.randint(0, len(self.segment_mask_list) - 1)
                p4=self.segment_mask_list[mask_index]
                mask = cv2.imread(p4,
                                  cv2.IMREAD_GRAYSCALE)
            else:
                mask_index1 = random.randint(0, len(self.segment_mask_list) - 1)
                mask_index2 = random.randint(0, len(self.irregular_mask_list) - 1)

                p4 = self.segment_mask_list[mask_index1].replace('vis', 'ir')
                mask1 = cv2.resize(cv2.imread(p4,
                                   cv2.IMREAD_GRAYSCALE).astype(np.float),(imgw, imgh))
                p3 = self.irregular_mask_list[mask_index2].replace('vis', 'ir')
                mask2 = cv2.resize(cv2.imread(p3,
                                   cv2.IMREAD_GRAYSCALE).astype(np.float),(imgw, imgh))
                # h,w=mask1.shape
                # mask1=cv2.resize(mask1,(w,h))
                mask = np.clip(mask1 + mask2, 0, 255).astype(np.uint8)

            if mask.shape[0] != imgh or mask.shape[1] != imgw:
                mask = cv2.resize(mask, (imgw, imgh), interpolation=cv2.INTER_NEAREST)
            mask = (mask > 127).astype(np.uint8) * 255  # threshold due to interpolation
            return mask

    def to_tensor(self, img, norm=False):
        # img = Image.fromarray(img)
        img_t = F.to_tensor(img).float()
        if norm:
            img_t = F.normalize(img_t, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        return img_t

    def resize(self, img, height, width, center_crop=False):
        imgh, imgw = img.shape[0:2]

        if center_crop and imgh != imgw:
            # center crop
            side = np.minimum(imgh, imgw)
            j = (imgh - side) // 2
            i = (imgw - side) // 2
            img = img[j:j + side, i:i + side, ...]

        if imgh > height and imgw > width:
            inter = cv2.INTER_AREA
        else:
            inter = cv2.INTER_LINEAR
        img = cv2.resize(img, (height, width), interpolation=inter)

        return img

    def crop(self, img, height, width):
        imgh, imgw = img.shape[0:2]
        w_start = random.randint(0, imgw - width)
        h_start = random.randint(0, imgh - height)
        cropped_img = img[h_start:h_start + height, w_start:w_start + width, :]
        return cropped_img, w_start, h_start

    def load_flist(self, flist):

        if isinstance(flist, list):
            return flist

        # flist: image file path, image directory path, text file flist path
        if isinstance(flist, str):
            if os.path.isdir(flist):
                # flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png'))
                flist = self.getfilelist(flist)
                flist.sort()
                return flist

            if os.path.isfile(flist):
                try:
                    return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')
                except:
                    return [flist]

        return []

    def create_iterator(self, batch_size):
        while True:
            sample_loader = DataLoader(
                dataset=self,
                batch_size=batch_size,
                drop_last=True
            )

            for item in sample_loader:
                yield item

    def getfilelist(self, path):
        all_file = []
        for dir, folder, file in os.walk(path):
            for i in file:
                t = "%s/%s" % (dir, i)
                if t.endswith('.png') or t.endswith('.jpg') or t.endswith('.JPG') or \
                        t.endswith('.PNG') or t.endswith('.JPEG'):
                    all_file.append(t)
        return all_file
