import argparse
import os
import random
import sys

import numpy as np
import torch
import torch.utils.data.distributed
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm import tqdm
from transforms import *
import tifffile
import cv2


def file_name(file_dir):
    L = []
    for root, dirs, files in os.walk(file_dir):
        for file in files:
            img_name = os.path.split(file)[1]
            L.append(img_name)

    return L


def remove_leading_slash(s):
    if s[0] == '/' or s[0] == '\\':
        return s[1:]
    return s


def pil_loader(path, mode='rgb'):
    with Image.open(path) as img:
        if mode == 'rgb':
            return img.convert('RGB')
        elif mode == 'l':
            return img.convert('L')
        else:
            return img.convert('F')


class TZBDataset(Dataset):
    def __init__(self, args, mode, transform=None):
        Image.MAX_IMAGE_PIXELS = None
        self.args = args
        self.image_sets = []
        self.depth_sets = []
        self.val_mode = args.val_mode
        if mode == 'online_eval':
            self.image_path = args.data_path_eval
            self.depth_path = args.gt_path_eval
            if args.val_mode == 'online_cut':
                self.width = 0
                self.height = 0
                self.img_id = 0
                self.val_image_num = 0
                if self.args.dataset in ['vaihingen', 'potsdam']:
                    with open(args.filenames_file_eval, 'r') as f:
                        lines = f.read().splitlines()
                    for line in lines:
                        image_path, gt_path = line.split(' ')
                        self.image_sets.append(tifffile.imread(image_path))
                        self.depth_sets.append(cv2.imread(gt_path))
                elif self.args.dataset in ['plain', 'hill', 'mountain']:
                    for i in tqdm(file_name(self.image_path)):
                        self.image_sets.append(tifffile.imread(self.image_path + '/' + i))
                        if self.args.dataset in ['plain', 'hill', 'mountain']:
                            self.depth_sets.append(tifffile.imread(self.depth_path + '/' + i.replace('.TIF', '_gt.tiff')))

                for image in self.image_sets:
                    height_image_num = ((image.shape[0] - self.args.input_height) // self.args.val_step) + 1
                    width_image_num = ((image.shape[1] - self.args.input_width) // self.args.val_step) + 1
                    self.val_image_num += width_image_num * height_image_num
                print(f'There are {self.val_image_num} images in val set')
            elif args.val_mode == 'offline_cut':
                self.image_sets = file_name(self.image_path)
        elif mode == 'train':
            self.image_path = args.data_path
            self.depth_path = args.gt_path

            if self.args.dataset in ['vaihingen', 'potsdam']:
                with open(args.filenames_file_eval, 'r') as f:
                    lines = f.read().splitlines()
                for line in lines:
                    image_path, gt_path = line.split(' ')
                    self.image_sets.append(tifffile.imread(image_path))
                    self.depth_sets.append(cv2.imread(gt_path))
            elif self.args.dataset in ['plain', 'hill', 'mountain']:
                for i in tqdm(file_name(self.image_path)):
                    self.image_sets.append(tifffile.imread(self.image_path + '/' + i))
                    self.depth_sets.append(tifffile.imread(self.depth_path + '/' + i.replace('.TIF', '_gt.tiff')))

            aug_list = []
            print('using tzb_dataloader_new.py')
            if args.do_random_rotate:
                print('RandomRotation')
                aug_list.append(RandomRotation(args.degree))
            if args.hori_flip_prob != 0.0:
                print("RandomHorizontalFlip")
                aug_list.append(RandomHorizontalFlip(args.hori_flip_prob))
            if args.vert_flip_prob != 0.0:
                print("RandomVerticalFlip")
                aug_list.append(RandomVerticalFlip(args.vert_flip_prob))
            self.aug_transform = Compose(aug_list)

        self.mode = mode
        self.transform = transform

    def __getitem__(self, idx):
        if self.mode == 'train':
            chance = random.random()
            if chance < self.args.cutmix:
                image, depth_gt = self.random_cut(self.args.input_width, self.args.input_height, cutmix=True)
            else:
                image, depth_gt = self.random_cut(self.args.input_width, self.args.input_height)

        elif self.mode == 'online_eval':
            if self.val_mode == 'online_cut':
                if idx == 0:
                    self.img_id = 0
                width1 = self.width
                width2 = self.width + self.args.input_width
                height1 = self.height
                height2 = self.height + self.args.input_height
                image = self.image_sets[self.img_id][width1:width2, height1:height2]
                depth_gt = self.depth_sets[self.img_id][width1:width2, height1:height2]

                if width2 + self.args.val_step <= self.image_sets[self.img_id].shape[0]:
                    self.width = width1 + self.args.val_step
                elif height2 + self.args.val_step <= self.image_sets[self.img_id].shape[1]:
                    self.height = height1 + self.args.val_step
                    self.width = 0
                else:
                    self.img_id += 1
                    self.width = 0
                    self.height = 0
            elif self.val_mode == 'offline_cut':
                image = tifffile.imread(self.image_path + '/' + self.image_sets[idx])
                if self.args.dataset in ['plain', 'hill', 'mountain']:
                    depth_gt = tifffile.imread(self.depth_path + '/' + self.image_sets[idx].replace('.TIF', '_gt.tiff'))
                elif self.args.dataset in ['vaihingen']:
                    depth_gt = tifffile.imread(
                        self.depth_path + '/' + self.image_sets[idx].replace('.tiff', '_gt.tiff'))
            # import seaborn as sns
            # import matplotlib.pyplot as plt
            # sns.heatmap(depth_gt)
            # Image.fromarray(image).show()
            # plt.show()
        image = np.asarray(image, dtype=np.float32) / 255.0
        if self.args.dataset in ['plain', 'hill', 'mountain']:
            image = np.stack([image, image, image], axis=2)
        depth_gt = np.asarray(depth_gt, dtype=np.float32)
        if self.args.dataset in ['vaihingen']:
            depth_gt = depth_gt[:, :, 0]
        depth_gt = np.expand_dims(depth_gt, axis=2)

        if self.args.dataset == 'plain' or self.args.dataset == 'hill':
            depth_gt = depth_gt
        elif self.args.dataset == 'vaihingen':
            depth_gt = depth_gt / 255
        elif self.args.dataset == 'mountain':
            depth_gt = depth_gt / 100

        sample = {'image': image, 'depth': depth_gt}

        if self.transform:
            sample = self.transform(sample)

        if self.mode == 'train':
            sample['image'], sample['depth'] = self.aug_transform(sample['image'], sample['depth'])

        return sample

    def random_cut(self, w, h, cutmix=False):
        img_id = random.randint(0, len(self.image_sets) - 1)
        whole_img = self.image_sets[img_id]
        whole_depth = self.depth_sets[img_id]
        width1 = random.randint(0, whole_img.shape[0] - w)
        height1 = random.randint(0, whole_img.shape[1] - h)
        width2 = width1 + w
        height2 = height1 + h

        image = whole_img[width1:width2, height1:height2]
        depth_gt = whole_depth[width1:width2, height1:height2]
        if cutmix:
            width1 = random.randint(0, whole_img.shape[0] - self.args.cut_width)
            height1 = random.randint(0, whole_img.shape[1] - self.args.cut_height)
            width2 = width1 + self.args.cut_width
            height2 = height1 + self.args.cut_height
            sub_width1 = random.randint(0, image.shape[0] - self.args.cut_width)
            sub_height1 = random.randint(0, image.shape[1] - self.args.cut_height)
            sub_width2 = sub_width1 + self.args.cut_width
            sub_height2 = sub_height1 + self.args.cut_height
            cutimg = whole_img[width1:width2, height1:height2]
            cutgt = whole_depth[width1:width2, height1:height2]
            depth_gt[sub_width1:sub_width2, sub_height1:sub_height2] = cutgt
            image[sub_width1:sub_width2, sub_height1:sub_height2] = cutimg
        return image, depth_gt

    def train_preprocess(self, image, depth_gt):
        # Random flipping
        do_flip = random.random()
        if do_flip > 0.5:
            image = (image[:, ::-1, :]).copy()
            depth_gt = (depth_gt[:, ::-1, :]).copy()

        # Random gamma, brightness, color augmentation
        do_augment = random.random()
        if do_augment > 0.5:
            image = self.augment_image(image)

        return image, depth_gt

    def augment_image(self, image):
        # gamma augmentation
        gamma = random.uniform(0.9, 1.1)
        image_aug = image ** gamma

        # brightness augmentation
        if self.args.dataset == 'nyu':
            brightness = random.uniform(0.75, 1.25)
        else:
            brightness = random.uniform(0.9, 1.1)
        image_aug = image_aug * brightness

        # color augmentation
        colors = np.random.uniform(0.9, 1.1, size=3)
        white = np.ones((image.shape[0], image.shape[1]))
        color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
        image_aug *= color_image
        image_aug = np.clip(image_aug, 0, 1)

        return image_aug

    def __len__(self):
        if self.mode == 'train':
            return self.args.image_num
        elif self.mode == 'online_eval':
            if self.val_mode == 'online_cut':
                return self.val_image_num
            elif self.val_mode == 'offline_cut':
                return len(self.image_sets)





