import torch
import glob
import os
from torchvision import transforms
from torchvision.transforms import functional as F
import cv2
from PIL import Image
# import pandas as pd
import numpy as np
from imgaug import augmenters as iaa
import imgaug as ia
from torch.utils.data import DataLoader
import sys
import csv
o_path = os.getcwd()
sys.path.append(o_path)
from utils.config import DefaultConfig
from dataset import custom_transforms as tr


def augmentation():
	# augment images with spatial transformation: Flip, Affine, Rotation, etc...
	# see https://github.com/aleju/imgaug for more details
	pass


def augmentation_pixel():
	# augment images with pixel intensity transformation: GaussianBlur, Multiply, etc...
	pass


class PiFu(torch.utils.data.Dataset):
	def __init__(self, dataset_path, scale, k_fold_test=1, mode='train'):
		super().__init__()
		self.mode = mode
		self.img_path = dataset_path + '/img'
		self.mask_path = dataset_path + '/mask'
		self.image_lists, self.label_lists = self.read_list(self.img_path, k_fold_test=k_fold_test)
		# resize
		self.resize_label = transforms.Resize(scale, Image.NEAREST)
		self.resize_img = transforms.Resize(scale, Image.BILINEAR)
		# normalization
		self.to_tensor = transforms.ToTensor()
		self.cls_lable_list = self.get_cls_lable()
		
	
	def __getitem__(self, index):
		# mode为test时,返回原图和mask的路径
		# load image
		img = Image.open(self.image_lists[index])
		img_name = self.image_lists[index].split('/')[-1].split('.')[0]
		# print(img_name)
		
		cls_lable = self.cls_lable_list[img_name]
		
		labels = self.label_lists[index]  # mask的路径,带s的
		labels = Image.open(labels)
		if self.mode == 'test':
			img = np.array(img)
			labels = np.array(labels).astype('uint8')
			# boundary = labels
		
		elif self.mode == 'val':
			img = self.resize_img(img)
			img = np.array(img)
			
			labels = self.resize_label(labels)
			labels = np.array(labels).astype('uint8')
			# labels = 255 * np.array(labels).astype('uint8')
			# boundary = labels
		
		# augment image and label
		elif self.mode == 'train':
			sample = {'image': img, 'label': labels}
			sample = self.transform_tr_train(sample)
			
			img = np.array(sample['image'])
			labels = np.array(sample['label']).astype('uint8')
			# labels = 255 * np.array(sample['label']).astype('uint8')
			# boundary = self.get_boundary(labels)
		
		# print(np.min(label))
		
		labels = np.reshape(labels, labels.shape + (1,))
		# boundary = np.reshape(boundary, boundary.shape + (1,))
		
		img = self.to_tensor(img.copy()).float()
		labels = self.to_tensor(labels.copy())
		# cls_lable = np.array([cls_lable])
		cls_lable = torch.tensor(cls_lable)
		
		# boundary = self.to_tensor(boundary.copy())
		
		return img, labels, cls_lable
	
	# 测试的时候返回图片路径,获取文件名的
	# return img, labels, boundary, self.image_lists[index]
	
	def __len__(self):
		return len(self.image_lists)
	
	def read_list(self, image_path, k_fold_test=1):
		fold = sorted(os.listdir(image_path))
		# print(fold)
		os.listdir()
		img_list = []
		label_list = []
		# 正常的
		if self.mode == 'train':
			fold_r = fold
			fold_r.remove('f' + str(k_fold_test))  # remove testdata
			for item in fold_r:
				img_list += glob.glob(os.path.join(image_path, item) + '/*.jpg')
			# print(len(img_list))
			label_list = [x.replace('img', 'mask').split('.')[0] + '_segmentation.png' for x in img_list]
		elif self.mode == 'val' or self.mode == 'test':
			fold_s = fold[k_fold_test - 1]
			img_list = glob.glob(os.path.join(image_path, fold_s) + '/*.jpg')
			label_list = [x.replace('img', 'mask').split('.')[0] + '_segmentation.png' for x in img_list]
		
		# dermquest的数据
		# if self.mode == 'train':
		#     fold_r = fold
		#     fold_r.remove('f' + str(k_fold_test))  # remove testdata
		#     for item in fold_r:
		#         img_list += glob.glob(os.path.join(image_path, item) + '/*.jpg')
		#     # print(len(img_list))
		#     label_list = [x.replace('img', 'mask').replace('orig.jpg', 'contour.png') for x in img_list]
		# elif self.mode == 'val' or self.mode == 'test':
		#     fold_s = fold[k_fold_test - 1]
		#     img_list = glob.glob(os.path.join(image_path, fold_s) + '/*.jpg')
		#     label_list = [x.replace('img', 'mask').replace('orig.jpg', 'contour.png') for x in img_list]
		
		return img_list, label_list
	
	def get_boundary(self, mask, thicky=5):
		# print(mask.shape)
		# print(type(mask))
		# print(mask)
		contour, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		tempb = np.zeros_like(mask)
		tempb = cv2.drawContours(tempb, contour, -1, 255, thicky)
		return tempb
	
	def transform_tr_train(self, sample):
		composed_transforms = transforms.Compose([
			tr.RandomHorizontalFlip(),  # 左右
			tr.RandomHorizontalFlip2(),  # 上下
			tr.RandomRotate(20),  # 旋转
			# tr.RandomScaleCrop(base_size=513, crop_size=256),
			tr.RandomCrop_50_100(crop_size=256)
			# tr.Resize_256(crop_size=256)
			# tr.RandomGaussianBlur(),
			# tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
			# tr.ToTensor()
		])
		return composed_transforms(sample)
	
	def transform_tr_val(self, sample):
		composed_transforms = transforms.Compose([
			tr.Resize_256(crop_size=256),
			tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
		])
		return composed_transforms(sample)

	def get_cls_lable(self):
		test_dict = {}
		path1 = []
		path1.append('./dataset/ISIC-2017_Test_v2_Part3_GroundTruth.csv')
		path1.append('./dataset/ISIC-2017_Training_Part3_GroundTruth.csv')
		path1.append('./dataset/ISIC-2017_Validation_Part3_GroundTruth.csv')
		m = n = s = 0
		for path in path1:
			with open(path, 'r') as f:
				reader = csv.reader(f)
				for row in list(reader)[1:]:
					if row[1] == '1.0':
						label = 0
						m += 1
					elif row[2] == '1.0':
						label = 1
						s += 1
					elif row[1] == '0.0' and row[2] == '0.0':
						label = 2
						n += 1
					if label == 2:
						label = 1
					test_dict[row[0]] = label
		print("黑素瘤-Sk-痣 的数量 :", m, s, n)
		print("图片总数为 :", len(test_dict))
		# for k, v in test_dict.items():
		# 	print(k, "    ", v)
		return test_dict
	

if __name__ == '__main__':
	
	args = DefaultConfig()
	dataset_path = os.path.join(args.data, args.dataset)
	print("dataset_path :", dataset_path)
	dataset_train = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='train')
	print("训练集数量大小：", len(dataset_train))
	
	dataloader_train = DataLoader(
		dataset_train,
		batch_size=args.batch_size,
		shuffle=True,
		num_workers=args.num_workers,
		pin_memory=True,
		drop_last=True
	)
	print("训练集一个epoch的batch数量：", len(dataloader_train))
	
	for i, (img, label, cls_lable) in enumerate(dataloader_train):
		img = img[0]
		label = label[0]
		print(cls_lable)
		print(type(cls_lable))
		print(img.shape)
		print(label.shape)

		print(img.dtype)
		print(label.dtype)
	
		print(img.max(), img.min())
		print(label.max(), label.min())
		
		# print(type(label), type(boundary))
		# print(label)
		# print(boundary)
		break


