import numpy as np
from utils.utils import xyxy2xywh, xywh2xyxy, scale_coords
import cv2
import torch
from torchvision import datasets, models, transforms
from torch.autograd import Variable
from pytorchcifar.models import *
from PIL import Image


class ClassifyModel:

    def __init__(self, cropBoxOf, modelFileName, needtoclassifycls, predcls2origcls, preprocess) -> None:
        pass
        # model = ResNet18()
        # checkpoint = torch.load('./pytorchcifar/checkpointresnet/ckptbestconcernerror.pth', map_location=torch.device('cpu'))
        self.use_gpu = torch.cuda.is_available()
        if self.use_gpu:
            model = torch.load(modelFileName)
            model = model.cuda()
        else:
            model = torch.load(modelFileName, map_location=torch.device('cpu'))
        # model.load_state_dict(checkpoint['net'])
        model.eval()
        self.model = model
        self.cropBoxOf = cropBoxOf
        self.needtoclassifycls = needtoclassifycls
        self.predcls2origcls = predcls2origcls
        self.preprocess = preprocess



    def correctLabel(self, detect_results, image):
        # t1 = time.time()
        image = image.copy()
        result_boxes, result_scores, result_classid = detect_results
        # print(result_classid)
        cboxes = [self.cropBoxOf(result_boxes[i], image.shape[1], image.shape[0]) for i in range(len(result_boxes)) if result_classid[i] in self.needtoclassifycls]
        if len(cboxes)==0:
            return detect_results
        # print(cboxes)
        cimages = [image[box[1]:box[3], box[0]:box[2]] for box in cboxes]

        inputs = self.preprocess(cimages)
        if self.use_gpu:
            inputs = Variable(inputs.cuda())
        else:
            inputs = Variable(inputs)
        with torch.no_grad():
            outputs = self.model(inputs)
        # loss = criterion(outputs, labels)
        _, pred = torch.max(outputs.data, 1)
        # print(pred)
        idx = -1
        for i in range(len(result_classid)):
            if result_classid[i] not in self.needtoclassifycls:
                continue
            idx += 1
            # img_name = time.strftime(str(10)+'%Y%m%d%H%M%S') + '.bmp'
            # path = "./{}/{}_{}.bmp".format(int(pred[idx]), str(img_name), idx)
            # cv2.imwrite(path, cimages[idx])
            # score = math.fabs(outputs.data[0][0] - outputs.data[0][1])
            # print(result_classid[i],  pred, score)
            # if pred == 1:
            #     cnt += 1
            #     cv2.imwrite(str(cnt)+'.jpg', cimages[i])
            result_classid[i] = float(self.predcls2origcls[int(pred[idx])])

            # print(detect_results)
        return result_boxes, result_scores, result_classid


    def apply_classifier(self, x, insize, img, im0, name=''):
        pass
        im0 = [im0] if isinstance(im0, np.ndarray) else im0
        for i, d in enumerate(x):  # per image
            if d is not None and len(d):
                d = d.clone()



                # Rescale boxes from img_size to im0 size
                d[:, :4] = scale_coords(img.shape[2:], d[:, :4], im0[i].shape).round()
                # scale_coords(img.shape[2:], d[:, :4], im0[i].shape)


                # Classes
                orig_cls1 = d[:, 5].long()
                ims = []
                for j, a in enumerate(d):  # per item
                    # Reshape and pad cutouts
                    b = a[:4] # boxes
                    b = self.cropBoxOf(b, im0[i].shape[1], im0[i].shape[0])
                    # size = max(32, int(b[3]-b[1]), int(b[2]-b[0]))
                    cutout = im0[i][int(b[1]):int(b[3]), int(b[0]):int(b[2])]
                    ims.append(cutout)
                inputs = self.preprocess(ims)
                if 0:
                    inputs = Variable(inputs.cuda())
                else:
                    inputs = Variable(inputs)
                outputs = self.model(inputs)
                # loss = criterion(outputs, labels)
                _, pred = torch.max(outputs.data, 1)

                for j, a in enumerate(d):
                    b = a[:4] # boxes
                    b = self.cropBoxOf(b, im0[i].shape[1], im0[i].shape[0])
                    # size = max(32, int(b[3]-b[1]), int(b[2]-b[0]))
                    cutout = im0[i][int(b[1]):int(b[3]), int(b[0]):int(b[2])]
                    # im = cv2.resize(cutout, (224, 224))  # BGR
                    im = cutout
                    im = im[:, :, ::-1]
                    # if (0.5 < a[5]): cv2.imwrite("./" + str(int(a[5] + 0.5)) + "/" + name + "%i.bmp"%j, im[:, :, ::-1])
                    # else: continue
                    # continue
                    inpit = inputs[j]
                    if int(orig_cls1[j]) not in self.needtoclassifycls: continue
                    if 0 and orig_cls1[j] != self.predcls2origcls[int(pred[j])]:
                        path = "./{}-{}/{}_{}.bmp".format(int(orig_cls1[j]), self.predcls2origcls[int(pred[j])], name, j)
                        print(path, outputs.data[j])
                        cv2.imwrite(path, im[:, :, ::-1])
                    else:
                        path = "./{}/{}{}.bmp".format(int(pred[j]), name, j)
                        print(path, outputs.data[j])
                        cv2.imwrite(path, im[:, :, ::-1])
                        print(outputs.data[j])
                    # if pred_cls1[j] != 0:continue
                    print(orig_cls1[j], self.predcls2origcls[int(pred[j])])
                    orig_cls1[j] = self.predcls2origcls[int(pred[j])]
                    x[i][j, 5] = self.predcls2origcls[int(pred[j])]

                # x[i][:, 5] = pred_cls1

                # pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1)  # classifier prediction
                # x[i] = x[i][pred_cls1 == pred_cls2]  # retain matching class detections

        return x

import cv2
from PIL import Image
import numpy
def cv2PIL(cvimg):
    return Image.fromarray(cv2.cvtColor(cvimg,cv2.COLOR_BGR2RGB))

class keepRatioResize():
    def __init__(self, input_size, train = False) -> None:
        self.train = train
        if self.train: initsize = input_size + 5
        else: initsize = input_size
        self.initsize = initsize
        if train:
            self.tr = transforms.Compose([
            transforms.CenterCrop(initsize),
            transforms.RandomCrop(input_size),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0, hue=0),
            transforms.ToTensor(),
            # AddGaussianNoise(0., 0.03),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        else:
            self.tr = transforms.Compose([
            transforms.CenterCrop(input_size),
            # transforms.RandomCrop(input_size),
            # transforms.RandomHorizontalFlip(),
            # transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0, hue=0),
            transforms.ToTensor(),
            # AddGaussianNoise(0., 0.03),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    def __call__(self, img):
        return self.forward(img)

    def __repr__(self):
        return self.__class__.__name__
    def forward(self, img):
        pass
        size = img.size
        mi = min(size)
        ma = max(size)

        img = transforms.Resize(int(mi / ma * self.initsize))(img)
        img = self.tr(img)
        return img
        # img = transforms.RandomCrop(input_size)(img)


def getClassifyModel(algid):
    if algid == 20:
        def cropBoxOf(box, width, height, a = 1.3, b = 5, mi = 40):
            w = box[2] - box[0]
            h = box[3] - box[1]
            # w = max(w,h)
            x = (box[2] + box[0])/2
            y = (box[3] + box[1])/2
            w = w* a + b
            h = h* a + b
            w = max(w, mi)
            h = max(h, mi)
            # h = w
            cbox = [0 for _ in range(4)]
            cbox[0] = int(max(0, x - w/2))
            cbox[2] = int(min(width, x + w/2))
            cbox[1] = int(max(0, y - h/2))
            cbox[3] = int(min(height, y + h/2))
            return cbox
        modelFileName = '../clothes_classify/model/efficientnet-b0cloth-3cls.pth'
        tr = keepRatioResize(224)
        def preprocess(cvimgs):
            tensors = []
            for cvimg in cvimgs:
                pilimg = cv2PIL(cvimg)
                tensors.append(tr(pilimg))
            return torch.stack(tensors)


        return ClassifyModel(cropBoxOf=cropBoxOf, modelFileName = modelFileName,  needtoclassifycls= [0, ], predcls2origcls=[0, 1, 1], preprocess=preprocess)
    elif algid == 10:
        def cropBoxOf(box, width, height, a = 1.3, b = 2, mi = 32):
            w = box[2] - box[0]
            h = box[3] - box[1]
            # w = max(w,h)
            x = (box[2] + box[0])/2
            y = (box[3] + box[1])/2
            w = w* a + b
            h = h* a + b
            w = max(w, mi)
            h = max(h, mi)
            # h = w
            cbox = [0 for _ in range(4)]
            cbox[0] = int(max(0, x - w/2))
            cbox[2] = int(min(width, x + w/2))
            cbox[1] = int(max(0, y - h/2))
            cbox[3] = int(min(height, y + h/2))
            return cbox
        modelFileName = '../clothes_classify/model/efficientnet-b0head.pth'
        tr = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        def preprocess(cvimgs):
            tensors = []
            for cvimg in cvimgs:
                pilimg = cv2PIL(cvimg)
                tensors.append(tr(pilimg))
            return torch.stack(tensors)


        return ClassifyModel(cropBoxOf=cropBoxOf, modelFileName = modelFileName,  needtoclassifycls= [1,], predcls2origcls=[1, 2, 2], preprocess=preprocess)
