from efficientnet_pytorch import EfficientNet
from torch.autograd import Variable
import torch.nn as nn
import os
import torch
import logging
import torchvision.transforms as transforms
from torch.nn import functional as F

class MultiLabelModel(nn.Module):
    def __init__(self, basemodel, basemodel_output, num_classes):
        super(MultiLabelModel, self).__init__()
        self.basemodel = basemodel
        self.num_classes = num_classes
        for index, num_class in enumerate(num_classes):
            setattr(self, "FullyConnectedLayer_" + str(index), nn.Linear(basemodel_output, num_class))

    def forward(self, x):
        x = self.basemodel.forward(x)
        outs = list()
        dir(self)
        for index, num_class in enumerate(self.num_classes):
            fun = eval("self.FullyConnectedLayer_" + str(index))
            out = fun(x)
            outs.append(out)
        return outs

def load_model_inquiry(checkpoint_name,num_classes):

    model_name = 'efficientnet-b3'
    templet = EfficientNet.from_pretrained(model_name)
    input_channel = 3
    input_size = 448
    # build model
    tmp_input = Variable(torch.FloatTensor(1, input_channel, input_size, input_size))
    tmp_output = templet(tmp_input)
    output_dim = int(tmp_output.size()[-1])
    model = MultiLabelModel(templet, output_dim, num_classes)

    # load exsiting model
    if checkpoint_name != "":
        if os.path.exists(checkpoint_name):
            logging.info("load pretrained model from " + checkpoint_name)
            model.load_state_dict(torch.load(checkpoint_name))

    return model

def scale_keep_ar_min_fixed(img, fixed_min):
    ow, oh = img.size

    if ow < oh:

        nw = fixed_min

        nh = nw * oh // ow

    else:

        nh = fixed_min

        nw = nh * ow // oh
    return img.resize((nw, nh), Image.BICUBIC)
def prepare_image(image):
    """Do image preprocessing before prediction on any data.
    """
    if image.mode != 'RGB':
        image = image.convert("RGB")
    use_gpu = True
    # Resize the input image nad preprocess it.
    image = transforms.Lambda(lambda img: scale_keep_ar_min_fixed(img, 448))(image)
    image = transforms.CenterCrop((448, 448))(image)
    image = transforms.ToTensor()(image)

    # Convert to Torch.Tensor and normalize.
    image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)

    # Add batch_size axis.
    image = image[None]
    if use_gpu:
        image = image.cuda()
    return torch.autograd.Variable(image, volatile=True)

def inquiry(model,inputs_var,num_classes):
    # inputs_var = Variable(inputs, volatile=True)
    model.cuda()
    model.eval()
    print(model)
    cuda =True
    if cuda:
        output = nn.parallel.data_parallel(model, inputs_var, device_ids=range(1))

    label_re = []
    for i in range(len(num_classes)):
        top_value, index = output[i].data.cpu().topk(1, 1)
        # if top_value > thres_list:
        #     index = 1
        label_re.append([top_value.item(),index.item()])
    return label_re


if __name__ == '__main__':
    from PIL import Image

    num_classes = [2,2,2,2]
    checkpoint_name = r'/home/ubuntu/data/multi_label/saved_models/question_0_snapshot.pth'
    model = load_model_inquiry(checkpoint_name, num_classes)
    file = r'/home/ubuntu/data/multi_label/data/1.jpg'
    image = Image.open(file)
    image = prepare_image(image)
    inputs_var = Variable(image, volatile=True)
    label_re = inquiry(model, inputs_var, num_classes)

    print(label_re)

# import torch.nn.parallel
# import torch.optim
# import torch.utils.data
# import torch.utils.data.distributed
# import argparse
# import struct
# from efficientnet_pytorch import EfficientNet
# from concurrent.futures import ThreadPoolExecutor, as_completed
# import os,json
# import torch.nn as nn
# import torchvision.transforms as transforms
# import torch.backends.cudnn as cudnn
# from PIL import Image,ImageFile
# from torch.nn import functional as F
# from torch.autograd import Variable
# import logging
# import requests
# from http.server import HTTPServer, BaseHTTPRequestHandler
# import cgi
# import io,base64,time
# from json import *
# parser = argparse.ArgumentParser(description='PyTorch  Training')
# parser.add_argument('--use_gpu', default=True,
#                     help='path to dataset')
# parser.add_argument('--filepath', default=r'1.jpg',
#                     type=str, metavar='PATH',
#                     help='path to latest checkpoint (default: none)')
# parser.add_argument('--gpu', default=1, type=int,
#                     help='GPU nums to use.')
#
# model = None
#
# class MultiLabelModel(nn.Module):
#     def __init__(self, basemodel, basemodel_output, num_classes):
#         super(MultiLabelModel, self).__init__()
#         self.basemodel = basemodel
#         self.num_classes = num_classes
#         for index, num_class in enumerate(num_classes):
#             setattr(self, "FullyConnectedLayer_" + str(index), nn.Linear(basemodel_output, num_class))
#
#     def forward(self, x):
#         x = self.basemodel.forward(x)
#         outs = list()
#         dir(self)
#         for index, num_class in enumerate(self.num_classes):
#             fun = eval("self.FullyConnectedLayer_" + str(index))
#             out = fun(x)
#             outs.append(out)
#         return outs
#
# def load_model_inquiry():
#     global model_integration, question_class, model
#     model_name = 'efficientnet-b3'
#     input_channel = 3
#     input_size = 448
#     model_path = r'/home/ubuntu/data/multi_label/saved_models'
#     question_class = {}
#     model_integration = {}
#
#     for file in os.listdir(model_path):
#         resume = os.path.join(model_path,file)
#         class_quetion = file.split('_')
#         n_class = [2] * int(class_quetion[-2])
#         question_class.update({str(class_quetion[1]):n_class})
#
#         templet = EfficientNet.from_pretrained(model_name)
#
#         # build model
#         tmp_input = Variable(torch.FloatTensor(1, input_channel, input_size, input_size))
#         tmp_output = templet(tmp_input)
#         output_dim = int(tmp_output.size()[-1])
#         model = MultiLabelModel(templet, output_dim, n_class)
#         cuda = True
#         if cuda:
#             model = nn.DataParallel(model, device_ids=range(1))
#             model.cuda()
#             cudnn.benchmark = True
#         # load exsiting model
#         if file != "":
#             if os.path.exists(file):
#                 logging.info("load pretrained model from " + file)
#                 model.load_state_dict(torch.load(resume))
#         model.eval()
#         model_integration.update({str(class_quetion[1]): model})
#
#
# def scale_keep_ar_min_fixed(img, fixed_min):
#     ow, oh = img.size
#
#     if ow < oh:
#
#         nw = fixed_min
#
#         nh = nw * oh // ow
#
#     else:
#
#         nh = fixed_min
#
#         nw = nh * ow // oh
#     return img.resize((nw, nh), Image.BICUBIC)
#
# def prepare_image(image):
#     """Do image preprocessing before prediction on any data.
#     """
#     if image.mode != 'RGB':
#         image = image.convert("RGB")
#
#     # Resize the input image nad preprocess it.
#     image = transforms.Lambda(lambda img: scale_keep_ar_min_fixed(img, 448))(image)
#     image = transforms.CenterCrop((448, 448))(image)
#     image = transforms.ToTensor()(image)
#
#     # Convert to Torch.Tensor and normalize.
#     image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)
#     use_gpu =True
#     # Add batch_size axis.
#     image = image[None]
#     if use_gpu:
#         image = image.cuda()
#     return torch.autograd.Variable(image, volatile=True)
#
# def inquiry_tongue(file):
#     result = {'success': False}
#
#     def ThreadPool(image, keys, model, result):
#         label_re = []
#         output = model(image)
#         for i in range(len(question_class[keys])):
#             top_value, index = output[i].data.cpu().topk(1, 1)
#             label_re.append([top_value.item(),index.item()])
#         result['predictions'].append((int(keys), label_re))
#         return result
#
#     ImageFile.LOAD_TRUNCATED_IMAGES = True
#     image = Image.open(file)
#     image = prepare_image(image)
#     result['predictions'] = list()
#
#     pool = ThreadPoolExecutor(max_workers=1)
#     futures = {
#         pool.submit(
#             ThreadPool,
#             image,
#             keys,
#             model,
#             result
#         ):
#             model for keys, model in model_integration.items()
#     }
#     for future in as_completed(futures):
#         result = future.result()
#
#     sorted_result = sorted(result['predictions'], key=lambda key: key[0], reverse=False)
#     res = [str(sor) for _, sor in sorted_result]
#
#     return res

#
# if __name__ == '__main__':
#     from PIL import Image
#
#     file = r'/home/ubuntu/data/multi_label/data/1.jpg'
#
#     load_model_inquiry()
#     print(inquiry_tongue(file))