import os
import cv2
import torch
import csv
import random
import torch.utils.data as data
import torchvision.transforms.functional as TF
from option import args
from torchvision import transforms
from skimage.io import imshow, imread
toGray=transforms.Grayscale() 

from trans_filters import *
class MEFdataset(data.Dataset):
    def __init__(self, transform):
        super(MEFdataset, self).__init__()
        self.dir_prefix = args.dir_train
        self.over = os.listdir(self.dir_prefix + 'over/')
        self.under = os.listdir(self.dir_prefix + 'under/')

        self.patch_size = args.patch_size
        self.transform = transform

    def __len__(self):
        assert len(self.over) == len(self.under)
        return len(self.over)

    def __getitem__(self, idx):
        over = cv2.imread(self.dir_prefix + 'over/' + self.over[idx])
        over = cv2.cvtColor(over, cv2.COLOR_BGR2YCrCb)
        over = over[:, :, 0:1]
        under = cv2.imread(self.dir_prefix + 'under/' + self.under[idx])
        under = cv2.cvtColor(under, cv2.COLOR_BGR2YCrCb)
        under = under[:, :, 0:1]

        over_p, under_p = self.get_patch(over, under)
        if self.transform:
            over_p = self.transform(over_p)
            under_p = self.transform(under_p)

        return over_p, under_p

    def get_patch(self, over, under):
        h, w = over.shape[:2]
        stride = self.patch_size

        x = random.randint(0, w - stride)
        y = random.randint(0, h - stride)

        over = over[y:y + stride, x:x + stride, :]
        under = under[y:y + stride, x:x + stride, :]

        return over, under
    

class VISIRData(MEFdataset):
    def __init__(self, set_prefix,split):
        super(MEFdataset,self).__init__()
        self.split=split
        self.dir_prefix = os.path.join(set_prefix,split)

        self.over= [entry for entry in os.listdir(self.dir_prefix + '/vi/') if entry.endswith('.png')]
        self.under= [entry for entry in os.listdir(self.dir_prefix + '/ir/') if entry.endswith('.png')]

        self.patch_size = args.patch_size
       
    def transform(self,over,under):
        over=TF.to_tensor(over)
        under=TF.to_tensor(under)

        vis_image_raw = over
        ir_image=under

        vis_gray=toGray(vis_image_raw)
        joint_in = torch.max(vis_gray, ir_image)
        diff_edge=new_int1(joint_in,vis_gray,ir_image)


        if self.split=='train':

            # Random horizontal flipping
            if random.random() > 0.5:
                over = TF.hflip(over)
                under = TF.hflip(under)

            # Random vertical flipping
            if random.random() > 0.5:
                over = TF.vflip(over)
                under = TF.vflip(under)
        return over,under, diff_edge
    
    def __getitem__(self, idx):
        over = cv2.imread(self.dir_prefix + '/vi/' + self.over[idx])
        over = cv2.resize(over, (640, 480))
        over = cv2.cvtColor(over, cv2.COLOR_BGR2YCrCb)
        over = over[:, :, 0:1]
        under = cv2.imread(self.dir_prefix + '/ir/' + self.under[idx])
        under = cv2.resize(under, (640, 480))
        under = cv2.cvtColor(under, cv2.COLOR_BGR2YCrCb)
        under = under[:, :, 0:1]

        if self.split=='train':
            over_p, under_p = self.get_patch(over, under)
            over_p,under_p, refedge = self.transform(over_p,under_p)

            return over_p, under_p,refedge
        else:
            over,under, refedge = self.transform(over,under)
            return over, under,refedge
    
class TestData(data.Dataset):
    def __init__(self, transform):
        super(TestData, self).__init__()
        self.transform = transform
        self.dir_prefix = args.dir_test
        self.over_dir = os.listdir(self.dir_prefix + 'over/')
        self.under_dir = os.listdir(self.dir_prefix + 'under/')

    def __getitem__(self, idx):
        over = cv2.imread(self.dir_prefix + 'over/' + self.over_dir[idx])
        under = cv2.imread(self.dir_prefix + 'under/' + self.under_dir[idx])
        over_img = cv2.cvtColor(over, cv2.COLOR_BGR2YCrCb)
        under_img = cv2.cvtColor(under, cv2.COLOR_BGR2YCrCb)

        if self.transform:
            over_img = self.transform(over_img)
            under_img = self.transform(under_img)

        img_stack = torch.stack((over_img, under_img), 0)
        return img_stack

    def __len__(self):
        assert len(self.over_dir) == len(self.under_dir)
        return len(self.over_dir)

class enhancedDotDataset(data.Dataset):
    def __init__(self, data_pth, split='train', enhance_pth='inplace', csv_file='enhanced_images.csv', train=True):
        self.split = split
        self.VIS_files = []
        self.VIS_enhanced_files = []
        self.IR_files = []

        if enhance_pth == 'inplace':
            csv_file_path = os.path.join(data_pth, split, 'vi', 'enhanced', 'enhanced_images.csv')
        else:
            csv_file_path = os.path.join(enhance_pth, 'enhanced_images.csv')

        # 确保CSV文件存在
        assert os.path.exists(csv_file_path), f"CSV file does not exist at the specified path: {csv_file_path}"
        with open(csv_file_path, 'r') as file:
            reader = csv.reader(file)
            next(reader)  # Skip the header row
            for row in reader:
                self.VIS_enhanced_files.append(row[1])
                self.VIS_files.append(row[2])
                self.IR_files.append(row[3])

        print(len(self.VIS_files))

    def transform(self, vis_image, vis_enhanced_image, ir_image):
        # Resize
        # resize = transforms.Resize(size=(480, 640))

        vis_image_raw = TF.to_tensor(vis_image)
        vis_image_equalized = TF.to_tensor(vis_enhanced_image)

        vis_gray = toGray(vis_image_equalized)
        vis_sobel = sk_sobel_filter(vis_gray)
        vis_laplace = sk_laplace_filter(vis_gray)
        vis_image_out = torch.cat((vis_image_raw, vis_sobel, vis_laplace, vis_image_equalized), dim=0)
        # print('vis_image',vis_image.shape)

        ir_image_raw = TF.to_tensor(ir_image)
        ir_sobel = sk_sobel_filter(ir_image_raw)
        ir_laplace = sk_laplace_filter(ir_image_raw)
        ir_image_out = torch.cat((ir_image_raw, ir_sobel, ir_laplace), dim=0)
        # print('ir_image_out',ir_image_out.shape)

        joint_in = torch.max(vis_gray, ir_image_raw)
        joint_edge = sk_sobel_filter(joint_in)
        max_edge = torch.max(vis_sobel, ir_sobel)
        # diff_edge=torch.clamp(F.tanh(8*(max_edge-joint_edge)-0.15),min=0,max=1)
        diff_edge = new_int(joint_in, vis_gray, ir_image_raw)

        if self.split == 'train':
            # Random crop
            i, j, h, w = transforms.RandomCrop.get_params(
                vis_image_out, output_size=(160, 160))
            vis_image_out = TF.crop(vis_image_out, i, j, h, w)
            ir_image_out = TF.crop(ir_image_out, i, j, h, w)
            joint_in = TF.crop(joint_in, i, j, h, w)
            diff_edge = TF.crop(diff_edge, i, j, h, w)

            # Random horizontal flipping
            if random.random() > 0.5:
                vis_image_out = TF.hflip(vis_image_out)
                ir_image_out = TF.hflip(ir_image_out)
                joint_in = TF.hflip(joint_in)
                diff_edge = TF.hflip(diff_edge)

            # Random vertical flipping
            if random.random() > 0.5:
                vis_image_out = TF.vflip(vis_image_out)
                ir_image_out = TF.vflip(ir_image_out)
                joint_in = TF.vflip(joint_in)
                diff_edge = TF.vflip(diff_edge)

        return vis_image_out, ir_image_out, joint_in, diff_edge

    def __getitem__(self, index):
        VIS_img = imread(self.VIS_files[index])
        VIS_enhanced_img = imread(self.VIS_enhanced_files[index])
        IR_img = imread(self.IR_files[index])
        vis_image, ir_image, joint_int, diff_edge = self.transform(VIS_img, VIS_enhanced_img, IR_img)

        return vis_image, ir_image, joint_int, diff_edge

    def __len__(self):
        return len(self.VIS_files)
