import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import datetime
from colorama import Fore
import os
import torch

def get_model_complexity(model, *input_tensor_size):
    '''
    计算模型的复杂度
    :param model:网络模型
    :param input_tensor_size:网络模型的输入张量的尺寸（包含批次数）。支持多输入的网络。
    :return:
    '''
    model.cpu()
    from thop import profile
    input_tensor = [torch.randn(s) for s in input_tensor_size]
    flops, params = profile(model, input_tensor)
    gflops = flops / 1024 ** 3
    mparams = params / 1024 ** 2
    model
    print(Fore.RED)
    print('GFlops:  ' + str(gflops))
    print('MParams: ' + str(mparams))
    print(Fore.BLACK)

    return gflops, mparams

def display(*inputs, delay_offs=None):
    '''
    显示图片
    :param inputs: 图片们
    :param delay_offs: None：不阻塞；number：延迟delay_offs（s）后显示下一张图或退出
    :return:
    '''
    assert len(inputs) < 10, "number of inputs must be smaller than 10"
    # need some delay_offs, turn plt to interactive mode
    if delay_offs != None:
        plt.ion()
    if delay_offs == None:
        plt.ioff()
    # transfer string(file path), nparray, PILImage all to nparray
    image_arrays = []
    for e in inputs:
        if isinstance(e, str):
            image = Image.open(e)
            image = np.asarray(image)
            image_arrays.append(image)
        elif isinstance(e, np.ndarray):
            image_arrays.append(e)
        elif "PIL" in str(type(e)):
            image = np.asarray(e)
            image_arrays.append(image)
        else:
            print("Unsupported type: " + str(type(e)))
            raise NotImplementedError
    plt.figure("MaUtilities Display")
    num_images = len(image_arrays)

    for i in range(1, num_images + 1):
        plt.subplot(100 + num_images * 10 + i)
        plt.imshow(image_arrays[i - 1])

    # Note: If not block in ioff, when the first image shown in ion and the second one shown in ioff, the second image
    # can NOT be shown (maybe the switch of interactive status causes that first image blocks the process.
    if delay_offs == None:
        plt.pause(1)
    else:
        plt.pause(delay_offs)
    # if not ion:
    plt.show()

def get_current_time(is_print=True):
    '''
    获取当前时间
    :param is_print:
    :return:
    '''
    current_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    if is_print:
        print(current_time)
    return current_time

def get_freer_gpu():
    '''
    获取当前最闲的GPU的编号
    :return:
    '''
    import os
    os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
    memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
    os.remove('tmp')
    return int(np.argmax(memory_available))

def get_gpu_used_size(comment=None, pprint=True):
    os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
    memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
    memory_used = [11171 - f for f in memory_available]
    if pprint:
        if comment is not None:
            print(Fore.MAGENTA, comment, "GPU USED: ", memory_used, Fore.BLACK)
        else:
            print(Fore.MAGENTA, "GPU USED: ", memory_used, Fore.BLACK)
    return memory_used

def get_current_py_file_name():
    '''
    获取当前py文件的文件名
    :return:
    '''
    import sys
    return os.path.basename(sys.argv[0]).split(".")[0]
