import torch
from torchvision import datasets
from torchvision.datasets import ImageFolder
import numpy as np
import torchvision.models as models
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
import cv2

from PIL import Image


def letterbox(img,new_shape = (256,256),color=(114,114,114),auto=False,scaleFill=False,scaleup=True):

    # img = np.asarray(img)
    # cv2.imshow('omg',img)
    # cv2.waitKey(0)
    img1 = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
    # cv2.imshow('omg1', img1)
    # cv2.waitKey(0)
    img = cv2.cvtColor(img1, cv2.COLOR_BGR2YCrCb)
    # cv2.imshow('YCrCb',img)
    # cv2.waitKey(0)
    shape = img.shape[:2]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)
    #Scale ratio(new/old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:
        r = min(r,1.0)

    #Computer Padding
    ratio = r,r
    new_unpad = int(round(shape[1]*r)), int(round(shape[0] * r))
    dw,dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, 32), np.mod(dh, 32)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2
    if shape[::-1] != new_unpad:  # resize
        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)

    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    # cv2.imshow('omg',img)
    # cv2.waitKey(0)
    # cv2.imshow('kk', img)
    # cv2.waitKey(0)
    image = Image.fromarray(img)

    return image, ratio, (dw, dh)

class Compoment_DataLoader():
    def __init__(self,dataset,size=320,transform=None):
        super(Compoment_DataLoader,self).__init__()
        self.dataset = dataset
        self.keys = list(self.dataset.keys())
        self.size = size
        self.transform = transform
    def __len__(self):
        return len(self.dataset)
    def __getitem__(self, item):
        key = self.keys[item]
        img = self.dataset[key]
        img2 = letterbox(img,new_shape=320)[0]
        if self.transform is not None:
            img3 = self.transform(img2)
        return key,img3


class My_ImageFolder(ImageFolder):
    def __init__(self,root,size= 256,transform = None):
        super(My_ImageFolder, self).__init__(root,transform)
        self.indices = range(len(self))
        self.size = size
        self.transforms = transform
    def __getitem__(self, item):
        path1 = self.imgs[item][0]
        label1 = self.imgs[item][1]
        img1 = self.loader(path1)

        img2 = letterbox(img1, new_shape=self.size)[0]

        if self.transforms is not None:
            img3 = self.transforms(img2)
            img4 = self.transforms(img1)
        return img3,img4,label1

# transform = transforms.Compose (
#     [transforms.Resize([224,224]),transforms.ToTensor()]
# )
#
# trainset = My_ImageFolder(root=r'E:\RC_Classify\C_4.0',transform=transform)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=1,
#                                           shuffle=True, num_workers=0)
#
# for i,(images,labels) in enumerate(trainloader):
#     print(labels)