import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import shutil
import glob
import pandas as pd

def del_files(path):
    """
    删除指定路径下的所有文件
    
    Args:
        path: 需要清空的文件目录
    """
    try:
        ls = glob.glob(os.path.join(path, '*'))
        for i in ls:
            if os.path.isfile(i):
                os.remove(i)
            else:
                shutil.rmtree(i)
    except:
        pass


class EarlyStopping:
    """
    提前停止训练机制
    """
    def __init__(self, accelerator=None, patience=7, verbose=False, delta=0, monitor_mode='min'):
        """
        初始化
        
        Args:
            accelerator: 加速器对象，用于分布式训练
            patience (int): 在多少个epoch没有改善后停止
            verbose (bool): 是否打印信息
            delta (float): 判断改善的最小差值
            monitor_mode (str): 监控模式，'min'表示值越小越好，'max'表示值越大越好
        """
        self.accelerator = accelerator
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.delta = delta
        self.monitor_mode = monitor_mode
        
    def __call__(self, val_loss, model, path):
        """
        调用检查是否需要停止
        
        Args:
            val_loss (float): 当前监控的值（可以是验证损失或其他指标）
            model: 模型
            path (str): 保存路径
        """
        # 根据监控模式确定分数
        if self.monitor_mode == 'min':
            score = -val_loss  # 值越小越好，转换为负值后越大越好
        else:
            score = val_loss   # 值越大越好
        
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
        elif score < self.best_score + self.delta:
            self.counter += 1
            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
            self.counter = 0
            
    def save_checkpoint(self, val_loss, model, path):
        """
        保存模型
        
        Args:
            val_loss (float): 当前监控的值
            model: 模型
            path (str): 保存路径
        """
        if self.verbose:
            if self.monitor_mode == 'min':
                print(f'监控指标decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
            else:
                print(f'监控指标increased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
                
        if not os.path.exists(path):
            os.makedirs(path)
            
        if self.accelerator:
            # 分布式训练环境
            model_to_save = self.accelerator.unwrap_model(model)
            self.accelerator.wait_for_everyone()
            self.accelerator.save(model_to_save.state_dict(), path + '/' + 'checkpoint')
        else:
            # 单GPU或CPU环境
            torch.save(model.state_dict(), path + '/' + 'checkpoint')
            
        self.val_loss_min = val_loss


def adjust_learning_rate(optimizer, epoch, args):
    """
    根据epoch调整学习率
    
    Args:
        optimizer: 优化器
        epoch (int): 当前epoch
        args: 命令行参数
    """
    # 在这里根据需要添加学习率调整策略
    if epoch % 10 == 0:
        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] * 0.5


def compute_metrics(pred, true):
    """
    计算各种评估指标
    
    Args:
        pred: 预测值
        true: 真实值
        
    Returns:
        metrics: 包含各种指标的字典
    """
    mae = np.mean(np.abs(pred - true))
    mse = np.mean((pred - true) ** 2)
    rel_err = np.mean(np.abs(pred - true) / (np.abs(true) + 1e-5))
    
    # 计算KL散度
    # 确保预测值和真实值是概率分布（和为1）
    pred_normalized = pred / (np.sum(pred, axis=-1, keepdims=True) + 1e-10)
    true_normalized = true / (np.sum(true, axis=-1, keepdims=True) + 1e-10)
    
    # 避免数值问题
    pred_normalized = np.clip(pred_normalized, 1e-10, 1.0)
    true_normalized = np.clip(true_normalized, 1e-10, 1.0)
    
    # 计算KL散度: sum(true * log(true / pred))
    kl_div = np.mean(np.sum(true_normalized * np.log(true_normalized / pred_normalized), axis=-1))
    
    return {
        'mae': mae,
        'mse': mse,
        'rel_err': rel_err,
        'kl_div': kl_div
    }


def load_content(args):
    """
    从提示库中加载任务相关的文本内容
    
    Args:
        args: 参数对象，包含数据集名称
    
    Returns:
        content: 加载的文本内容
    """
    # 根据数据集名称选择文件
    file = 'CHANNEL'  # 默认使用CHANNEL
    
    try:
        # 尝试从提示库加载
        prompt_path = os.path.join(args.root_path, '../dataset/prompt_bank/{0}.txt'.format(file))
        if os.path.exists(prompt_path):
            with open(prompt_path, 'r', encoding='utf-8') as f:
                content = f.read()
        else:
            # 如果找不到文件，使用默认内容
            content = '信道预测任务是通过分析信道簇特征预测探头权重的关键技术，可提高无线通信系统性能。'
            
            # 如果提示目录不存在，创建它
            prompt_dir = os.path.dirname(prompt_path)
            if not os.path.exists(prompt_dir):
                os.makedirs(prompt_dir)
            
            # 保存默认内容到文件中，方便后续修改
            with open(prompt_path, 'w', encoding='utf-8') as f:
                f.write(content)
            print(f"已创建默认提示文件: {prompt_path}")
    except Exception as e:
        print(f"加载提示内容时出错: {e}")
        content = '信道预测任务是通过分析信道簇特征预测探头权重的关键技术，可提高无线通信系统性能。'
    
    return content


def visualize_weights(pred_weights, true_weights, n_probes=32, save_path=None):
    """
    可视化探头权重分布
    
    Args:
        pred_weights: 预测的探头权重，形状为[T, n_probes]
        true_weights: 真实的探头权重，形状同上
        n_probes (int): 探头数量
        save_path (str, optional): 保存图像的路径
    """
    fig, axes = plt.subplots(2, 1, figsize=(12, 10))
    
    # 创建热图数据
    im1 = axes[0].imshow(true_weights.T, aspect='auto', cmap='viridis')
    axes[0].set_title('True Probe Weights')
    axes[0].set_xlabel('Time Steps')
    axes[0].set_ylabel('Probe Index')
    plt.colorbar(im1, ax=axes[0])
    
    im2 = axes[1].imshow(pred_weights.T, aspect='auto', cmap='viridis')
    axes[1].set_title('Predicted Probe Weights')
    axes[1].set_xlabel('Time Steps')
    axes[1].set_ylabel('Probe Index')
    plt.colorbar(im2, ax=axes[1])
    
    plt.tight_layout()
    
    if save_path:
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        plt.savefig(save_path)
    else:
        plt.show()
    
    plt.close(fig)


def visualize_training_metrics(train_losses, val_losses, val_mse_metrics=None, save_path=None):
    """
    可视化训练过程中的损失和指标变化
    
    Args:
        train_losses (list): 训练损失列表
        val_losses (list): 验证损失列表
        val_mse_metrics (list, optional): 验证MSE指标列表
        save_path (str, optional): 保存路径
    """
    if len(train_losses) == 0:
        print("没有训练数据可视化")
        return
    
    # 确定绘图布局
    n_plots = 2 if val_mse_metrics is not None else 1
    fig, axes = plt.subplots(1, n_plots, figsize=(n_plots*7, 6))
    
    epochs = list(range(1, len(train_losses) + 1))
    
    # 绘制损失曲线
    if n_plots == 1:
        ax = axes
    else:
        ax = axes[0]
    
    ax.plot(epochs, train_losses, 'b-', marker='o', label='Train Loss')
    ax.plot(epochs, val_losses, 'r-', marker='s', label='Validation Loss')
    ax.set_title('Loss over Epochs')
    ax.set_xlabel('Epoch')
    ax.set_ylabel('Loss')
    ax.legend()
    ax.grid(True)
    
    # 如果有MSE指标，则绘制MSE曲线
    if val_mse_metrics is not None:
        axes[1].plot(epochs, val_mse_metrics, 'g-', marker='d', label='Validation MSE')
        axes[1].set_title('MSE Metrics over Epochs')
        axes[1].set_xlabel('Epoch')
        axes[1].set_ylabel('MSE')
        axes[1].legend()
        axes[1].grid(True)
    
    plt.tight_layout()
    
    # 保存或显示图像
    if save_path:
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        plt.savefig(save_path)
        print(f"训练指标可视化已保存至: {save_path}")
    else:
        plt.show()
    
    plt.close(fig) 