import os, datetime, sys
import torch
import numpy as np
import matplotlib.pyplot as plt 

class Logger(object):
    def __init__(self, filename, stream=sys.stdout):
        self.terminal = stream
        self.log = open(filename, 'w',encoding='utf-8')
        self.previousMsg = None
        sys.stdout = self
 
    def write(self, message):
        if self.previousMsg == None or "\n" in self.previousMsg:
            topMsg = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : "
            self.terminal.write(topMsg)
            self.log.write(topMsg)
 
        if isinstance(message, str):
            self.previousMsg = message
        if self.previousMsg == None:
            self.previousMsg = ""
 
        self.terminal.write(message)
        self.log.write(message)
        self.log.flush()
 
    def flush(self):
        pass

def set_seed(seed=42):
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def plot_loss(trainLoss, valLoss, path):
    epoches = np.arange(1, len(trainLoss)+1)
    plt.semilogy(epoches, trainLoss, label='train')
    plt.semilogy(epoches, valLoss, label='val')
    plt.legend()
    plt.savefig(os.path.join(path, 'loss.png'))
    plt.close()       
    np.savetxt(os.path.join(path, 'loss.txt'), np.stack([trainLoss, valLoss],-1))

def print_gpu_memory():
    if torch.cuda.is_available():
        # 当前 GPU 分配的内存
        allocated_memory = torch.cuda.memory_allocated() / 1024**3  # 转换为 GB
        # 当前 GPU 预留的内存
        reserved_memory = torch.cuda.memory_reserved() / 1024**3  # 转换为 GB
        # 当前 GPU 的总内存
        total_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3  # 转换为 MB

        print(f"Allocated and Reserved/Total Memory: {allocated_memory+reserved_memory:.2f}/{total_memory:.2f} GB")
    else:
        print("CUDA is not available")