import os
import cv2
import argparse
import torch
import numpy as np
import random
import torch.utils.data as data
import torchvision.transforms as transforms
from MyOption import args
import torchvision.transforms.functional as TF
toGray=transforms.Grayscale() 
from trans_filters import *
from torchvision.transforms import Resize
from PIL import Image
class VISIRData(data.Dataset):
    def __init__(self, set_prefix,split):
        super(VISIRData, self).__init__()

        self.resize = Resize((640, 480))  # 假设你想调整到256x256
        self.split=split
        self.dir_prefix = os.path.join(set_prefix,split)


        self.img1_dir = [entry for entry in os.listdir(self.dir_prefix + '/vi/') if entry.endswith('.png')]
        self.img2_dir = [entry for entry in os.listdir(self.dir_prefix + '/ir/') if entry.endswith('.png')]

        self.patch_size = args.patch_size


    def transform(self,over,under):
        over=TF.to_tensor(over)
        under=TF.to_tensor(under)
        

        vis_image_raw = over
        ir_image=under

        vis_gray=toGray(vis_image_raw)
        joint_in = torch.max(vis_gray, ir_image)
        diff_edge=new_int1(joint_in,vis_gray,ir_image)


        if self.split=='train':

            # Random horizontal flipping
            if random.random() > 0.5:
                over = TF.hflip(over)
                under = TF.hflip(under)

            # Random vertical flipping
            if random.random() > 0.5:
                over = TF.vflip(over)
                under = TF.vflip(under)
        return over,under, diff_edge
    def __len__(self):
        assert len(self.img1_dir) == len(self.img2_dir)
        return len(self.img1_dir)

    def __getitem__(self, idx):

        img1 = cv2.imread(self.dir_prefix + '/vi/' + self.img1_dir[idx])
        img1 = cv2.resize(img1, (640, 480))
        img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2YCrCb)
        img1 = img1[:, :, 0:1]

        img2 = cv2.imread(self.dir_prefix + '/ir/' + self.img2_dir[idx])
        img2 = cv2.resize(img2, (640, 480))
        img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2YCrCb)
        img2 = img2[:, :, 0:1]
        '''if args.img_type1 == 'CT/':
            img1 = cv2.imread(self.dir_prefix + args.img_type1 + self.img1_dir[index], cv2.IMREAD_GRAYSCALE)
        else:
            img1 = cv2.imread(self.dir_prefix + args.img_type1 + self.img1_dir[index])
            img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2YCrCb)
            img1 = img1[:, :, 0:1]
            img1 = img1.squeeze()
        img2 = cv2.imread(self.dir_prefix + args.img_type2 + self.img2_dir[index], cv2.IMREAD_GRAYSCALE)'''
        if self.split=='train':
            img1_p, img2_p = self.get_patch(img1, img2)
            img1_p,img2_p, refedge = self.transform(img1_p,img2_p)

            return img1_p,img2_p, refedge
        else:
            img1,img2, refedge = self.transform(img1,img2)
            return img1, img2,refedge

    def get_patch(self, img1, img2):
        h, w = img1.shape[:2]

        stride = self.patch_size

        x = random.randint(0, w - stride)
        y = random.randint(0, h - stride)

        img1_p = img1[y:y + stride, x:x + stride]
        img2_p = img2[y:y + stride, x:x + stride]

        return img1_p, img2_p


class TestData(data.Dataset):
    def __init__(self, transform=None):
        super(TestData, self).__init__()
        self.transform = transform
        self.dir_prefix = args.dir_test

        self.img1_dir = os.listdir(self.dir_prefix + args.img_type1)
        self.img2_dir = os.listdir(self.dir_prefix + args.img_type2)

    def __getitem__(self, index):
        img_name = str(self.img1_dir[index])
        if args.img_type1 == 'CT/':
            img1 = cv2.imread(self.dir_prefix + args.img_type1 + self.img1_dir[index], cv2.IMREAD_GRAYSCALE)
            img2 = cv2.imread(self.dir_prefix + args.img_type2 + self.img2_dir[index], cv2.IMREAD_GRAYSCALE)
            if self.transform:
                img1 = self.transform(img1)
                img2 = self.transform(img2)

            return img_name, img1, img2  # img1[YCrCb]:3,256,256  img2[Gray]:1,256,256
        else:
            img1 = cv2.imread(self.dir_prefix + args.img_type1 + self.img1_dir[index])
            # img1 = cv2.imread(self.dir_prefix + args.img_type1 + self.img1_dir[index], cv2.IMREAD_GRAYSCALE)
            img2 = cv2.imread(self.dir_prefix + args.img_type2 + self.img2_dir[index], cv2.IMREAD_GRAYSCALE)

            img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2YCrCb)  # CT/PET/SPECT 256,256,3

            img1_Y = img1[:, :, 0:1]
            img1_CrCb = img1[:, :, 1:3].transpose(2, 0, 1)

            if self.transform:
                img1_Y = self.transform(img1_Y)
                img2 = self.transform(img2)

            return img_name, img1_Y, img2, img1_CrCb  # img1[YCrCb]:3,256,256  img2[Gray]:1,256,256

    def __len__(self):
        assert len(self.img1_dir) == len(self.img2_dir)
        return len(self.img1_dir)


if __name__ == "__main__":
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5])])

    TrainDataset = TrainData(transform=transform)
    train_loader = data.DataLoader(TrainDataset,
                                   batch_size=1,
                                   shuffle=True,
                                   drop_last=True,
                                   num_workers=2,
                                   pin_memory=True)
    i = 0
    for idx, data in enumerate(train_loader):
        # print(len(data))
        img1, _ = data
        print(img1)
        print(img1.shape)
        i = i + 1
    print(i)  # 133

    # TestDataset = TestData(transform=transform)
    # test_loader = data.DataLoader(TestDataset,
    #                               batch_size=1,
    #                               shuffle=True,
    #                               drop_last=True,
    #                               num_workers=2,
    #                               pin_memory=True)
    # j = 0
    # for idx, [img_name, data] in enumerate(test_loader):
    #     # print(len(data))
    #     j = j + 1
    # print(j)  # 24
