from datetime import datetime
import os
import random
import sys

import wandb
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms

img_suffixs = ['jpg', 'png', 'jpeg', 'bmp', 'xbm', 'tif', 'pjp', 'svgz', 'ico', 'tiff', 'gif', 'svg', 'jfif', 'webp',
               'pjpeg', 'avif']

def set_cuda(devices):
    if devices != "all":
        os.environ["CUDA_VISIBLE_DEVICES"] = devices
    device_ids = [i for i in range(torch.cuda.device_count())]
    return device_ids


def listAllImagePath(rootDir):
    nameList = os.listdir(rootDir)
    res = []
    for name in nameList:
        path = os.path.join(rootDir, name)
        if os.path.isdir(path):
            res += listAllImagePath(path)
        elif is_img(path):
            res.append(path)
    return res


def get_parent_dirname(path):
    return os.path.basename(os.path.split(path)[0])

def is_img(path):
    if os.path.isdir(path):
        return False
    filename = os.path.basename(path)
    file_suffix = filename.split(".")[-1].lower()
    for suffix in img_suffixs:
        if suffix == file_suffix:
            return True
    return False


def cv_imread(file_path, flag=cv2.IMREAD_COLOR, cache=False):
    cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), flag)
    return cv_img


def cv_imwrite(file_path, cv_img, flag=cv2.IMREAD_COLOR, cache=False):
    parent_dir = os.path.dirname(file_path)
    if not os.path.isdir(parent_dir):
        os.makedirs(parent_dir)
    cv2.imwrite(file_path, cv_img)


def to_gray(img):
    if len(img.shape) == 3:
        return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return img


def save_model(model, epoch, save_dir, latest_flag=True, save_tag=None):
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    save_dict = {}
    if type(model) == nn.DataParallel:
        save_dict["model"] = model.module.state_dict()
    else:
        save_dict["model"] = model.state_dict()

    save_dict["epoch"] = epoch
    if save_tag == None:
        save_tag = epoch
    torch.save(save_dict, os.path.join(save_dir, "model_{}.pth".format(save_tag)))
    if latest_flag:
        torch.save(save_dict, os.path.join(save_dir, "model_latest.pth"))

def load_model_core(model, checkpoint_path):
    if not os.path.exists(checkpoint_path):
        print(checkpoint_path, "is not exists")
        exit(-1)
        return model, 0
    save_dict = torch.load(checkpoint_path)
    weights = save_dict["model"]
    if type(model) == nn.DataParallel:
        model.module.load_state_dict(weights)
    else:
        model.load_state_dict(weights)
    epoch = save_dict["epoch"]
    return model, epoch


def load_model(model, epoch, save_dir):
    checkpoint_path = os.path.join(save_dir, "model_{}.pth".format(epoch))
    return load_model_core(model, checkpoint_path)

def float_01_threshold(img):
    ret, img = cv2.threshold(img, 1.0, 1.0, cv2.THRESH_TRUNC)
    ret, img = cv2.threshold(img, 0, 0, cv2.THRESH_TOZERO)
    return img


def closeWandb():
    g = globals()
    None_Func = lambda *args, **kwargs: None
    wandb_dict = {}
    for key in dir(g["wandb"]):
        if eval("callable(wandb.{})".format(key)):
            exec("wandb.{}=None_Func".format(key))
    wandb.log = print




def isWindows():
    return sys.platform.startswith("win")

def isLinux():
    return sys.platform.startswith('linux')

def rgb_sobel_conv(model, x):
    x = transforms.Grayscale(1)(x)
    return gray_sobel_conv(model, x)

def gray_sobel_conv(model, x):
    sobel_kernel = torch.tensor([
        [[
            [1, 2, 1],
            [0, 0, 0],
            [-1, -2, -1]
        ]],
        [[
            [0, 1, 2],
            [-1, 0, 1],
            [-2, -1, 0]
        ]],
        [[
            [-1, 0, 1],
            [-2, 0, 2],
            [-1, 0, 1]
        ]],
        [[
            [-2, -1, 0],
            [-1, 0, 1],
            [0, 1, 2]
        ]],
    ], dtype=torch.float32)
    if "cuda" in str(next(model.parameters()).device):
        sobel_kernel = sobel_kernel.cuda()
    x = nn.functional.conv2d(x, sobel_kernel, bias=None, stride=1, padding=1)
    return x

def input_modal_split(x):
    b, c, h, w = x.size()
    if c == 3:
        return x
    if c == 4:
        channels = torch.split(x, 1, dim=1)
        bgr = torch.cat(channels[:3], dim=1)
        dft = channels[3]
        return bgr, dft


def sobel_conv(model, x):
    b, c, h, w = x.size()
    if c == 1:
        return gray_sobel_conv(model, x)
    elif c == 3:
        return rgb_sobel_conv(model, x)

def float_img_to_uint8_img(img):
    ret, img = cv2.threshold(img, 1.0, 1.0, cv2.THRESH_TRUNC)
    ret, img = cv2.threshold(img, 0, 0, cv2.THRESH_TOZERO)
    img = (img * 255).astype(np.uint8)
    return img

def is_bgr_img(img):
    if len(img.shape) == 3 and img.shape[2] == 3:
        return True
    return False

def is_gray_img(img):
    return not is_bgr_img(img)

def gray_modal(x):
    return transforms.Grayscale(1)(x)

def random_cut(img, target_width=224, target_height=224):
    HEIGHT, WIDTH = img.shape[:2]
    start_height = random.randint(0, HEIGHT - target_height)
    start_width = random.randint(0, WIDTH - target_width)
    return img[start_height:start_height + target_height, start_width:start_width + target_width, :]

def center_cut(img, target_width=224, target_height=224):
    HEIGHT, WIDTH = img.shape[:2]
    start_height = (HEIGHT - target_height) // 2
    start_width = (WIDTH - target_width) // 2
    return img[start_height:start_height + target_height, start_width:start_width + target_width, :]

def random_flip(img):
    if random.random() < 0.5:
        img = np.flip(img, axis=0)
    if random.random() < 0.5:
        img = np.flip(img, axis=1)
    return img

def get_now_time_str_ms():
    return datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')[:-3]

def same_transform_with_imgs(cvimg_list, transform):
    res_list = []
    channel_list = [1 if 2 == len(img.shape) else img.shape[-1] for img in cvimg_list]
    chunk_size = sum(channel_list)
    merge = np.stack([channel for img in cvimg_list for channel in cv2.split(img)], axis=-1)
    merge = transform(merge)
    merge = merge.to(torch.float32)
    merge = torch.chunk(merge, chunk_size, dim=0)
    index = 0
    iter_channel = iter(merge)
    for channel_size in channel_list:
        res_list.append(torch.cat([next(iter_channel) for i in range(channel_size)]))
    return res_list

def label_nm_to_class(label, abs_flag, class_nm_threhold,nm_per_class):
    if abs_flag:
        label //= nm_per_class
        label = int(label)
        max_class_size = class_nm_threhold // nm_per_class + 1
        label = min(label, max_class_size - 1)
    else:
        label += class_nm_threhold
        label = max(label, 0)
        label //= nm_per_class
        label = int(label)
        max_class_size = 2 * (class_nm_threhold // nm_per_class) + 1
        label = min(label, max_class_size - 1)

    label = torch.tensor([label])
    return label

def labels_nm_to_class(labels, abs_flag, class_nm_threhold,nm_per_class):
    split_label = torch.split(labels, 1)
    class_labels = []
    for label in split_label:
        label = label.cpu().item()
        class_label = label_nm_to_class(label, abs_flag, class_nm_threhold, nm_per_class)
        class_labels.append(class_label)
    out_labels = torch.concat(class_labels, dim=0).reshape(-1,1)
    return out_labels

if __name__ == '__main__':
    pass
