import os, cv2
import time

import numpy as np
import torch
from models.MobileNet import mobilenet_v2
import glob
import tqdm


class Compose(object):
    def __init__(self, transforms):
        self.transforms = transforms
    def __call__(self, img):
        for t in self.transforms:
            img = t(img)
        return img


class Normalize(object):
    def __init__(self,mean, std):
        '''
        :param mean: RGB order
        :param std:  RGB order
        '''
        self.mean = np.array(mean).reshape(3,1,1)
        self.std = np.array(std).reshape(3,1,1)

    def __call__(self, image):
        '''
        :param image:  (H,W,3)  RGB
        :return:
        '''
        return image.transpose((2, 0, 1))


#init modele
class Class2_init():
    def __init__(self):
        self.weight = os.path.join(os.path.dirname(__file__), "weight/weights-20-0-0.9643231899265478.pth")
        self.test_transforms= Compose([Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

        model = mobilenet_v2(4)
        model.load_state_dict(torch.load(self.weight), strict=True)
        model = model.cuda()
        self.model=model.eval()


    def class_forward(self,testimage_list):

        # 0 deng_mie
        # 1 deng_liang

        class_model, class_transforms = self.model,self.test_transforms
        input = []
        for i in range(len(testimage_list)):
            temp_small = cv2.cvtColor(cv2.resize(testimage_list[i], (224,224)), cv2.COLOR_BGR2RGB)
            one_input = class_transforms(temp_small)
            input.append(one_input)
        input = torch.from_numpy(np.asarray(input)).float()

        input = input.to(torch.device("cuda"))
        output = class_model(input)
        pre, cls = torch.max(output, 1)

        return cls


if __name__ == '__main__':

    #########   set the GPU   ###########
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    #########  the input path   ##########

    prdictor = Class2_init()

    t=time.time()
    img_list = glob.glob('/home/hegang/datas2/hegang/datas/test_data/single_pointer/transformer_substation_data/img_crop/02_03_01_74crop_3.jpg')
    for num, item in enumerate(tqdm.tqdm(img_list)):
        img_ori = cv2.imread(item)
        # img = cv2.cvtColor(img_ori,cv2.COLOR_BGR2RGB)
        testimage = [img_ori]
        index = prdictor.class_forward(testimage)
        print(index)
    print('time=',time.time()-t)


