import logging
import os.path
import shutil
import time

import torch
from torch.utils.tensorboard import SummaryWriter


def get_device():
    dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    return dev


def get_time():
    str_time = time.strftime("%m%d-%H%M%S", time.localtime())
    return str_time


# 分类问题的onehot编码
def one_hot_embedding(labels, num_classes=10):
    # Convert to One Hot Encoding
    y = torch.eye(num_classes).to(get_device())
    return y[labels]


def get_log(sss: str, path='./logs', name='1'):
    # 创建日志
    _logger = logging.getLogger(name)
    _logger.setLevel(logging.INFO)
    # 创建两个handler，用于写入日志文件和控制台展示
    fh = logging.FileHandler(f'{path}/{name}', encoding="utf-8", mode="a")
    ch = logging.StreamHandler()

    # 创建一个formatter并设置其格式
    formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%m%d-%H%M%S')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    _logger.addHandler(fh)
    _logger.addHandler(ch)
    _logger.log(logging.INFO, sss)
    # return _logger


def get_writer(path, name):
    writer = SummaryWriter(f'{path}/{name}')
    return writer


class WriterSingleton:
    _instance = None  # 用于存储类的唯一实例

    def __new__(cls, path='', name=''):
        if not cls._instance:  # 如果实例不存在，则创建一个新的实例
            cls._instance = super(WriterSingleton, cls).__new__(cls)
            cls._instance.writer = SummaryWriter(path)  # 初始化SummaryWriter对象
        return cls._instance.writer  # 返回唯一实例的writer属性


class LoggerSingleton:
    _instance = None  # 用于存储类的唯一实例

    def __new__(cls, path='', name=''):
        if not cls._instance:  # 如果实例不存在，则创建一个新的实例
            cls._instance = super(LoggerSingleton, cls).__new__(cls)
            _logger = logging.getLogger(name)
            _logger.setLevel(logging.INFO)
            # 创建两个handler，用于写入日志文件和控制台展示
            fh = logging.FileHandler(os.path.join(path, name), encoding="utf-8", mode="a")
            ch = logging.StreamHandler()

            # 创建一个formatter并设置其格式
            formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%m%d-%H%M%S')
            fh.setFormatter(formatter)
            ch.setFormatter(formatter)
            _logger.addHandler(fh)
            _logger.addHandler(ch)
            cls._instance.logger = _logger  # 初始化对象
        return cls._instance.logger  # 返回唯一实例的属性


def save_checkpoint(state, is_best, prefix='', filename=''):
    tries = 15
    error = None

    # deal with unstable I/O. Usually not necessary.
    path_name = os.path.join(prefix, filename)
    while tries:
        try:
            torch.save(state, path_name)
            if is_best:
                best_path = os.path.join(prefix, "best.pth.tar")
                shutil.copyfile(path_name, best_path)
                print(f"best model saved to {best_path}")
        except IOError as e:
            error = e
            tries -= 1
        else:
            break
        print(f"model save {filename} failed, remaining {tries} trials")
        if not tries:
            raise error


def adjust_learning_rate(opt, optimizer, epoch):
    """Sets the learning rate to the initial LR
       decayed by 10 every 30 epochs"""
    lr = opt.lr * (0.1 ** (epoch // opt.lr_update))
    for param_group in optimizer.param_groups:
        param_group["lr"] = lr
