import argparse
from engine import *
from models import *
from coco import *
from PIL import Image
from util import *
from Smodataset import *
import torchvision.transforms as transforms
import numpy as np
import time

import numpy as np


def sigmoid(x):
    s = 1 / (1 + np.exp(-x))
    return s


class Warps(object):
    def __init__(self, size, interpolation=Image.BILINEAR):
        assert isinstance(size, (int, type))
        self.size = int(size)
        self.interpolation = interpolation

    def __call__(self, img):
        return img.resize((self.size, self.size), self.interpolation)


class Images:
    pkl = "/media/zhaokaiyue/_dde_data1/data/coco/Smoke_Call_SafetyBelt/coco_glove_word2vec.pkl"   # 这是词向量的pkl文件
    image_size = 448
    images_transform = None
    state = [0, 1]
    image_normalization_mean = [0.485, 0.456, 0.406]
    image_normalization_std = [0.229, 0.224, 0.225]

    def read_pkl(self, pkl_filepath):
        """
        读取pkl文件
        """
        with open(pkl_filepath, 'rb') as f:
            self.inp = pickle.load(f)
            self.inp = self.inp[:7, :]
        return self.inp


    def test_image(self, img: str, models):
        """
        测试单张图片
        """
        self.init_transform_image()
        image = self.images_transform(Image.open(img))
        input_var = torch.autograd.Variable(image)
        input_var = input_var.unsqueeze(0)      # 对图像进行扩大一维

        inp_var = torch.from_numpy(self.read_pkl(self.pkl))
        inp_var = torch.autograd.Variable(inp_var)
        inp_var = inp_var.unsqueeze(0)
        with torch.no_grad():
            result = models(input_var, inp_var)   # 输出的结果是张量

        print("result:{}".format(result))
        # result:tensor([[-3.9897, -1.9173, -0.1903, -2.4192, -0.0847,  0.2039, -0.6008]])
        
        y = sigmoid(result[0]).numpy()
        print("result:{}".format(y))
        # result:[0.01816898 0.12816621 0.45256415 0.0817185  0.47884578 0.5507899 0.3541536 ]
        
        search = {0: "Smoke", 1: "NoSmoke", 2: "NoSafetybelt", 3: "Safetybelt", 4: "Call", 5: "NoPhone", 6: "UsePhone"}
        final_types = []
        
        smoke_location = np.argmax(y[:2], axis=0)
        smoke_type = search[smoke_location]
        final_types.append(smoke_type)
        
        safetybelt_location = np.argmax(y[2: 4], axis=0)
        safetybelt_type = search[safetybelt_location+2]
        final_types.append(safetybelt_type)
        
        phone_location = np.argmax(y[4:], axis=0)
        phone_type = search[phone_location+4]
        final_types.append(phone_type)
        
        print("final_types:{}".format(final_types))
        # final_types:['NoSmoke', 'NoSafetybelt', 'NoPhone']

    def Torsor_to_numpy(self, tensors):
        result = []
        numpydata = tensors.detach().numpy()
        search = {0: "Smoke", 1: "NoSmoke", 2: "NoSafetybelt", 3: "Safetybelt", 4: "Call", 5: "NoPhone", 6: "UsePhone"}
        data = [ 1 if i > 0 else 0 for i in numpydata[0]]
        for key, value in enumerate(data):
            if value == 1:
                result.append(search[key])
        print(result)


    def test_mult_images(self, image_file, model):
        """
        测试多张图片
        :param image_file:
        :param model:
        :return:
        """
        if not os.path.exists(image_file):
            return

        if not os.path.isdir(image_file):
            return
        cur_time = time.time()
        for child_file in os.listdir(image_file):
            image_path = os.path.join(image_file, child_file)
            print("fileame:{}".format(image_path))
            fir_time = time.time()
            self.test_image(image_path, model)
            sed_time = time.time()
            print("single image time:{}".format(sed_time-fir_time))
        fin_time = time.time()
        print("sum_time:{}".format(fin_time-cur_time))


    def init_transform_image(self):
        """
        对图像进行归一化、缩放处理
        """
        normalize = transforms.Normalize(mean=self.image_normalization_mean,
                                         std=self.image_normalization_std)
        self.images_transform = transforms.Compose([
            Warps(self.image_size),
            transforms.ToTensor(),
            normalize,
        ])


    def init_model(self):
        """
        初始化模型
        """
        checkpoint = torch.load("/home/zhaokaiyue/PycharmProjects/mult_labels/checkpoint/coco/model_best.pth.tar", map_location="cpu")  #读取模型，并在cpu的机器上进行推理
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model = gcn_resnet101(pretrained=False, num_classes=7, t=0.4,
                              adj_file='/media/zhaokaiyue/_dde_data1/data/coco/Smoke_Call_SafetyBelt/voc_adj.pkl').to(device)

        model.load_state_dict(checkpoint['state_dict'])
        if torch.cuda.is_available():
            model = torch.nn.DataParallel(model).cuda()     # 判断是否是并行操作，即使是cup运行也不能少这一步，否则代码报错，这是由于torch的缘故
        model.eval()
        return model


if __name__ == "__main__":
    model = Images().init_model()
    imagpath = "/media/zhaokaiyue/_dde_data1/data/coco/Smoke_Call_SafetyBelt/data/val2014/130200207414691726_2346,508,1169,1011_121.jpg"  #测试的单张图片路径
    imagefilepath = "/media/zhaokaiyue/_dde_data1/data/coco/Smoke_Call_SafetyBelt/data/val2014"      # 测试的文件夹路径
    # Images().test_image(imagpath, model)
    Images().test_mult_images(imagefilepath, model)



