import warnings
import subprocess
import torch
import ignite


__all__ = ['get_state_dict', 'cuda_info', 'print_torch_version']


def cuda_info():
    print("PyTorch version: {}".format(torch.__version__))
    print("Use cuda: {}".format('Yes!' if torch.cuda.is_available() else 'No!'))
    print("Cuda list {}".format(list(range(torch.cuda.device_count()))))


def print_torch_version(logger=None):
    if not logger:
        logger = print
    logger("- PyTorch version: {}".format(torch.__version__))
    logger("- Ignite version: {}".format(ignite.__version__))
    logger("- CUDA version: {}".format(torch.version.cuda))
    logger("- CUDA device list {}".format(list(range(torch.cuda.device_count()))))


class DeviceWarning(BaseException):
    """A problem with a device (e.g. CUDA) was detected."""


def get_state_dict(f, device):
    map_location = get_map_location(device)
    _device = _check_device(device, map_location)
    return torch.load(f, map_location=map_location)


def get_map_location(target_device, fallback_device='cpu'):
    """Determine the location to map loaded data (e.g., weights)
    for a given target device (e.g. 'cuda').
    """
    map_location = torch.device(target_device)

    # The user wants to use CUDA but there is no CUDA device
    # available, thus fall back to CPU.
    if map_location.type == 'cuda' and not torch.cuda.is_available():
        warnings.warn(
            'Requested to load data to CUDA but no CUDA devices '
            'are available. Loading on device "{}" instead.'.format(
                fallback_device,
            ), DeviceWarning)
        map_location = torch.device(fallback_device)
    return map_location


def _check_device(requested_device, map_device):
    """Compare the requested device with the map device and
    return the map device if it differs from the requested device
    along with a warning.
    """
    type_1 = torch.device(requested_device)
    type_2 = torch.device(map_device)
    if type_1 != type_2:
        warnings.warn(
            'Setting self.device = {} since the requested device ({}) '
            'is not available.'.format(map_device, requested_device),
            DeviceWarning)
        return map_device
    # return requested_device instead of map_device even though we
    # checked for *type* equality as we might have 'cuda:0' vs. 'cuda:1'.
    return requested_device

def process_checkpoint(in_file, out_file):
    checkpoint = torch.load(in_file, map_location='cpu')
    # remove optimizer for smaller file size
    if 'optimizer' in checkpoint:
        del checkpoint['optimizer']
    # if it is necessary to remove some sensitive data in checkpoint['meta'],
    # add the code here.
    torch.save(checkpoint, out_file)
    sha = subprocess.check_output(['sha256sum', out_file]).decode()
    if out_file.endswith('.pth'):
        out_file_name = out_file[:-4]
    else:
        out_file_name = out_file
    final_file = out_file_name + f'-{sha[:8]}.pth'
    subprocess.Popen(['mv', out_file, final_file])

if __name__ == '__main__':
    print(__name__)
    from pathlib import Path
    cp = '/home/blake/data/medical/datasets/vertebral/checkpoint/app/ais_seg_hrnet_w18_landmark_spinenet_res34_v1.0.0/model_keypoint.pth'
    cp2 = '/home/blake/data/medical/datasets/vertebral/checkpoint/seg/ais_spine_seg_hrnet_w18_v1.2.0.3/training_checkpoint_99000.pth'
    # model_state = get_state_dict(cp2, 'cpu')
    #
    # save_state = {}
    # save_state['module_dict'] = model_state['model']
    #
    # torch.save(save_state, str(Path(cp).with_name('module_seg.pth')))

    process_checkpoint(str(Path(cp).with_name('module_keypoint.pth')), str(Path(cp).with_name('module_keypoint_sha.pth')))


    print("convert checkpoint")
