import os
import sys
from dataclasses import dataclass, field
from typing import List, Optional, Any

import torch
from omegaconf import MISSING


@dataclass
class ColorJitterConfigs:
     brightness: float = 32.0 / 255.0
     contrast: float = 0.5
     saturation: float = 0.5
     hue: float = 0.2

@dataclass
class TrainInput:
    image_size: int = 224
    crop_size: int = 224
    patch_dir: str = MISSING
    train_list: List[str] = MISSING
    valid_list: List[str] = MISSING

@dataclass
class Optimizer:
    type: str = 'adam'
    args: Optional[Any] = None

@dataclass
class Configuration():
    workdir: str = '.'
    id: str = MISSING
    input: TrainInput = MISSING
    color_jitter: ColorJitterConfigs = ColorJitterConfigs()
    normalize: Optional[List[List[float]]] = MISSING
    optimizer: Optimizer = Optimizer()
    lr: float = 0.01
    model: str = 'resnet18'
    fp16: bool = False
    base_model: Optional[str] = None
    hsv: bool = False
    gpu: str = ''
    num_workers: int = 0
    pin_memory: bool = True
    batch_size: int = 10
    epoch: int = 1
    log_every: int = 5
    save_every: int = 10
    
def check_config(cnn:Configuration) -> None:
    inputs = cnn.input
    assert inputs is not None
    assert inputs.patch_dir is not None
    assert os.path.isabs(inputs.patch_dir)

    assert inputs.train_list is not None 
    assert inputs.valid_list is not None
    assert len(inputs.valid_list) > 1
    assert len(inputs.valid_list) == len(inputs.train_list) 

    for list_file in inputs.train_list + inputs.valid_list:
        with open(os.path.join(inputs.patch_dir, list_file)) as f:
            patch_file = os.path.join(inputs.patch_dir, f.readline().strip())
            assert os.path.isfile(patch_file), f'patch image not found: {patch_file}'

    return cnn


_USE_GPU = None

def use_gpu(device=None):
    global _USE_GPU

    if device:
        os.environ['CUDA_VISIBLE_DEVICES'] = device
        _USE_GPU = None
    
    if _USE_GPU is None:
        cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
        if cuda_visible_devices:
            _USE_GPU = torch.cuda.is_available()
        else:
            _USE_GPU = False

    return _USE_GPU


def check_torch_env():
    cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")

    if cuda_visible_devices is None:
        print( 'export CUDA_VISIBLE_DEVICES = [NOT SET]')
    else:
        print(f'export CUDA_VISIBLE_DEVICES = {cuda_visible_devices}')

    if use_gpu():
        for i in range(torch.cuda.device_count()):
            print(f'{i}: {torch.cuda.get_device_name(i)}: {torch.cuda.get_device_capability(i)}')
    else:
        print(
f"""
*****************************************************************************
*
* WARN: CUDA_VISIBLE_DEVICES not set, or cuda device is not available
* 
* CUDA_VISIBLE_DEVICES = {cuda_visible_devices}
* torch.cuda.is_available() = {torch.cuda.is_available()}
*
*****************************************************************************

Press return to continue or CTRL-C to break
""")
        sys.stdin.readline()


if __name__ == '__main__':
    check_torch_env()
