import pandas as pd
import torch
from typing import Callable, Optional, Literal
import os

from MyUtil import MyLoggerV1, get_logger, get_best_file, get_checkpoint_file, get_test_csv_path
from sklearn.metrics import confusion_matrix


class WindIcingTrainerV1():
    def __init__(self, model, train_loader, valid_loader, optimizer, criterion, config,
                 device=torch.device('cpu'),
                 loss_function: Optional[Callable] = None,
                 step_function: Optional[Callable] = None,
                 metrics_function: Optional[Callable] = None
                 ):
        self.model = model
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.optimizer = optimizer
        self.criterion = criterion
        self.config = config
        
        self.device = device
        self.loss_function = loss_function
        self.step_function = step_function
        self.metrics_function = metrics_function
        
        self.epochs = config['epochs']
        self.start_epoch = 1
        
        self.warmup_epochs = config['warmup_epochs']
        self.warmup_loss = config['warmup_loss']
        self.checkpoint_step = config['checkpoint_step']
        self.early_stopping = config['early_stopping']
        self.patience = config['patience']

        self.train_size = config["data_dict"]['train_size']
        self.valid_size = config["data_dict"]['valid_size']
        self.test_size = config["data_dict"]['test_size']

        self.best_loss = None
        self.best_metrics = None
        
        self.logger = get_logger()
        
        if config['resume']:
            self._resume_checkpoint(config['resume_path'])
        
        
        
    def train(self):
        # os.makedirs(self.save_dir, exist_ok=True)
        self.model.to(self.device)
        self.criterion.to(self.device)
        
        self.logger.info(f'begin training for {self.epochs} epochs, starting from epoch {self.start_epoch}')
        
        early_stopping_counter = 0
        
        for epoch in range(self.start_epoch, self.epochs + 1):
            train_loss = self._train_epoch(epoch)
            info = f'Epoch {epoch} | Train Loss: {train_loss:.10f}'
            self.logger.info(info)
            
            valid_loss, metrics = self._valid_epoch(epoch)
            info = f'Epoch {epoch} | Valid Loss: {valid_loss:.10f}'
            self.logger.info(info)
            metrics.print()
            # self.logger.info(f'Valid Confusion Matrix:\n{cm}')
            
            if valid_loss < self.warmup_loss:
                self.warmup_epochs = 0
            
            if epoch > self.warmup_epochs:
                
                if self.best_loss is None or valid_loss < self.best_loss:
                    self.best_loss = valid_loss
                    self.best_metrics = metrics
                    early_stopping_counter = 0
                    self._save_checkpoint(epoch, save_best=True)
                
                elif epoch % self.checkpoint_step == 0:
                    self._save_checkpoint(epoch)
                
                if self.early_stopping:
                    if valid_loss > self.best_loss:
                        early_stopping_counter += 1
                        self.logger.info(f'Early stopping counter: {early_stopping_counter} out of {self.patience}')
                        
                        if early_stopping_counter >= self.patience:
                            self.logger.info(f'Training early stops at epoch {epoch}')
                            break
        self.logger.info(f"Best Valid Loss: {self.best_loss:.10f}")
        self.logger.info("Best Valid Result: ")
        self.best_metrics.print()
    # 训练返回平均损失
    
    def _input_output(self, data_loader):
        pass
    
    def _train_epoch(self, epoch):
        self.model.train()
        total_loss = 0
        for idx, ((inputs, _), labels) in enumerate(self.train_loader):
            # (batch_size, seq_len, window_size, feature_dim)
            inputs = inputs.to(self.device)
            # (batch_size) 表示掩码开始的位置
            # mask_indexes = mask_indexes.to(self.device)
            # (batch_size)
            labels = labels.to(self.device)
            
            self.optimizer.zero_grad()
            #(batch_size, seq_len, class_num)
            outputs = self.model(inputs)
            
            # 根据掩码位置获取实际时间步的输出，
            # mask_indexes -= 1
            preds = outputs[:, -1, :]
            
            # 反向传播
            if self.loss_function is not None:
                loss = self.loss_function(preds, labels, self.criterion)
            else:
                loss = self.criterion(preds, labels)
            loss.backward()
            
            # 优化器迭代
            if self.step_function is not None:
                self.step_function(epoch, loss, self.optimizer)
            else:
                self.optimizer.step()
            
            total_loss += loss.item()
        
        return total_loss / self.train_size
    
    def _valid_epoch(self, epoch):
        self.model.eval()
        total_loss = 0
        
        pred_classes = []
        label_classes = []
        
        with torch.no_grad():
            for idx, ((inputs, _), labels) in enumerate(self.valid_loader):
                # (batch_size, seq_len, window_size, feature_dim)
                inputs = inputs.to(self.device)
                # (batch_size) 表示掩码开始的位置
                # mask_indexes = mask_indexes.to(self.device)
                # (batch_size)
                labels = labels.to(self.device)
                
                # (batch_size, seq_len, class_num)
                outputs = self.model(inputs)
                # 根据掩码位置获取实际时间步的输出，
                
                preds = outputs[:, -1, :]
                
                # 获取预测类别
                predicted_classes = preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
                
                if self.loss_function is not None:
                    loss = self.loss_function(preds, labels, self.criterion)
                else:
                    loss = self.criterion(preds, labels)
                
                total_loss += loss.item()
        
        metrics = None
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
        
        return total_loss / self.valid_size, metrics
    
    def _save_checkpoint(self, epoch, save_best=False):
        
        state = {
            'epoch': epoch,
            'state_dict': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'config': self.config,
            'best_score': self.best_loss,
        }
        
        if save_best:
            filename = get_best_file(self.config['no'])
            torch.save(state, filename)
            self.logger.info(f'Saving current best model to {filename}......')
        else:
            filename = get_checkpoint_file(self.config['no'])
            torch.save(state, filename)
            self.logger.info(f'Saving checkpoint to {filename}......')
    
    def _resume_checkpoint(self, path):
        self.logger.info(f'Loading checkpoint from {path}......')
        checkpoint = torch.load(path)
        
        self.start_epoch = checkpoint['epoch'] + 1
        self.best_loss = checkpoint['best_score']
        
        self.model.load_state_dict(checkpoint['state_dict'])
        
        clazz = self.optimizer.__class__
        self.optimizer = clazz(self.model.parameters(), lr=self.config['lr'])
        
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        
        for state in self.optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(self.device)
        
        self.logger.info(f'Resume training from epoch {self.start_epoch}')
    
    def test(self, test_loader):

        self.logger.info('begin testing')
        self.model.to(self.device)
        self.model.eval()
        with torch.no_grad():
            total_loss = 0
            pred_classes = []
            label_classes = []
            for idx, ((inputs, _), labels) in enumerate(test_loader):
                # (batch_size, seq_len, window_size, feature_dim)
                inputs = inputs.to(self.device)
                # (batch_size) 表示掩码开始的位置
                # mask_indexes = mask_indexes.to(self.device)
                # (batch_size)
                labels = labels.to(self.device)
                
                # (batch_size, seq_len, class_num)
                outputs = self.model(inputs)
                # 根据掩码位置获取实际时间步的输出，
                # mask_indexes -= 1
                # preds = outputs[torch.arange(mask_indexes.size(0)), mask_indexes]
                preds = outputs[:, -1, :]
                # 获取预测类别
                predicted_classes = preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
                
                if self.loss_function is not None:
                    loss = self.loss_function(preds, labels, self.criterion)
                    total_loss += loss.item()
                elif self.criterion is not None:
                    loss = self.criterion(preds, labels)
                    total_loss += loss.item()
                
                if idx % (len(test_loader) // 20 + 1) == 0:
                    self.logger.info(f'idx: {idx} out of {len(test_loader)}')
        
        if self.loss_function is not None or self.criterion is not None:
            self.logger.info(f'Test loss: {total_loss / self.test_size}')
        
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
            self.logger.info('test result: ')
            metrics.print()

class WindIcingTrainerV2(WindIcingTrainerV1):
    def __init__(self, model, train_loader, valid_loader, optimizer, criterion, config,
                 device=torch.device('cpu'),
                 loss_function: Optional[Callable] = None,
                 step_function: Optional[Callable] = None,
                 metrics_function: Optional[Callable] = None
                 ):
        super().__init__(model, train_loader, valid_loader, optimizer, criterion, config,
                         device, loss_function, step_function, metrics_function)
  
    def _get_src_key_padding_mask(self, mask_indexes):
        src_key_padding_mask = torch.zeros(mask_indexes.shape[0], self.config['seq_len'] + 1).bool()
        for i in range(mask_indexes.shape[0]):
            start = mask_indexes[i]
            if start > 0:  # 如果掩码开始位置大于 0，则设置掩码
                src_key_padding_mask[i, start + 1:] = True
        # (batch_size, seq_len + 1)
        return src_key_padding_mask
    
    def _train_epoch(self, epoch):
        
        self.model.train()
        total_loss = 0
        for idx, ((inputs, _), labels) in enumerate(self.train_loader):
            # (batch_size, seq_len, window_size, feature_dim)
            inputs = inputs.to(self.device)
            # (batch_size) 表示掩码开始的位置
            # 创建掩码位置(batch_size, seq_len+1)
            # src_key_padding_mask = self._get_src_key_padding_mask(mask_indexes).to(self.device)
            
            labels = labels.to(self.device)
            
            self.optimizer.zero_grad()
            # (batch_size, seq_len + 1, class_num)
            outputs = self.model(inputs)
            
            # 获取第一个时间步吗，即cls的向量 形状为 (batch_size, class_num)
            preds = outputs[:, 0, :]
            
            # 反向传播
            if self.loss_function is not None:
                loss = self.loss_function(preds, labels, self.criterion)
            else:
                loss = self.criterion(preds, labels)
            loss.backward()
            
            # 优化器迭代
            if self.step_function is not None:
                self.step_function(epoch, loss, self.optimizer)
            else:
                self.optimizer.step()
            
            total_loss += loss.item()
        
        return total_loss / self.train_size
    
    def _valid_epoch(self, epoch):
        self.model.eval()
        total_loss = 0
        
        pred_classes = []
        label_classes = []
        
        with torch.no_grad():
            for idx, ((inputs, _), labels) in enumerate(self.valid_loader):
                # (batch_size, seq_len, window_size, feature_dim)
                inputs = inputs.to(self.device)
                # (batch_size) 表示掩码开始的位置
                # 创建掩码位置(batch_size, seq_len+1)
                # src_key_padding_mask = self._get_src_key_padding_mask(mask_indexes).to(self.device)
                
                labels = labels.to(self.device)
                
                # (batch_size, seq_len + 1, class_num)
                outputs = self.model(inputs)
                
                # 获取第一个时间步吗，即cls的向量 形状为 (batch_size, class_num)
                preds = outputs[:, 0, :]
                
                # 获取预测类别
                predicted_classes = preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
                
                if self.loss_function is not None:
                    loss = self.loss_function(preds, labels, self.criterion)
                else:
                    loss = self.criterion(preds, labels)
                
                total_loss += loss.item()
        
        metrics = None
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
        
        return total_loss / self.valid_size, metrics
    def test(self, test_loader):
        self.logger.info('begin testing')
        self.model.to(self.device)
        self.model.eval()
        
        total_loss = 0
        
        pred_classes = []
        label_classes = []
        
        with torch.no_grad():
            for idx, ((inputs, _), labels) in enumerate(test_loader):
                # (batch_size, seq_len, window_size, feature_dim)
                inputs = inputs.to(self.device)
                # (batch_size) 表示掩码开始的位置
                # 创建掩码位置(batch_size, seq_len+1)
                # src_key_padding_mask = self._get_src_key_padding_mask(mask_indexes).to(self.device)
                
                labels = labels.to(self.device)
                
                # (batch_size, seq_len + 1, class_num)
                outputs = self.model(inputs)
                
                # 获取第一个时间步吗，即cls的向量 形状为 (batch_size, class_num)
                preds = outputs[:, 0, :]
                
                # 获取预测类别
                predicted_classes = preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
                
                if self.loss_function is not None:
                    loss = self.loss_function(preds, labels, self.criterion)
                else:
                    loss = self.criterion(preds, labels)
                
                total_loss += loss.item()
            
                if idx % (len(test_loader) // 20 + 1) == 0:
                    self.logger.info(f'idx: {idx} out of {len(test_loader)}')
        
        if self.loss_function is not None or self.criterion is not None:
            self.logger.info(f'Test loss: {total_loss / self.test_size}')
        
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
            self.logger.info('test result: ')
            metrics.print()
    
class WindIcingTrainerV3(WindIcingTrainerV2):
    def __init__(self, model, train_loader, valid_loader, optimizer, criterion, config,
                 device=torch.device('cpu'),
                 loss_function: Optional[Callable] = None,
                 step_function: Optional[Callable] = None,
                 metrics_function: Optional[Callable] = None
                 ):
        super().__init__(model, train_loader, valid_loader, optimizer, criterion, config,
                         device, loss_function, step_function, metrics_function)
        self.criterion_power = torch.nn.MSELoss(reduction='sum')
        self.criterion_label = torch.nn.CrossEntropyLoss(reduction='sum')
        # 功率预测的lambda权重
        self.power_lambda = config['power_lambda']
    
    def _to_device(self):
        self.model.to(self.device)
        # self.criterion.to(self.device)
        self.criterion_power.to(self.device)
        self.criterion_label.to(self.device)
        self.logger.info(f"Trainer to device: {self.device}")
    
    def train(self):
        self._to_device()
        
        self.logger.info(f'Begin training for {self.epochs} epochs, starting from epoch {self.start_epoch}')
        
        early_stopping_counter = 0
        
        for epoch in range(self.start_epoch, self.epochs + 1):
            self.logger.info(f'Begin Training for Epoch {epoch}')
            train_res = self._run_epoch(self.train_loader, mode="train")
            train_loss = train_res["mean_loss"]
            power_loss = train_res["mean_loss_power_pred"]
            label_loss = train_res["mean_loss_label_pred"]
            
            info = f'Train Epoch {epoch} | Train Loss: {train_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f}'
            self.logger.info(info)
            
            self.logger.info(f"Begin Validing for Epoch {epoch}")
            valid_res = self._run_epoch(self.valid_loader, mode="valid")
            valid_loss = valid_res["mean_loss"]
            power_loss = valid_res["mean_loss_power_pred"]
            label_loss = valid_res["mean_loss_label_pred"]
            
            info = f'Valid Epoch {epoch} | Valid Loss: {valid_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f}'
            self.logger.info(info)
            metrics = valid_res["metrics"]
            metrics.print()
            # self.logger.info(f'Valid Confusion Matrix:\n{cm}')
            
            if label_loss < self.warmup_loss:
                self.warmup_epochs = 0
            
            if epoch > self.warmup_epochs:
                
                if self.best_loss is None or label_loss < self.best_loss:
                    self.best_loss = label_loss
                    self.best_metrics = metrics
                    early_stopping_counter = 0
                    self._save_checkpoint(epoch, save_best=True)
                
                elif epoch % self.checkpoint_step == 0:
                    self._save_checkpoint(epoch)
                
                if self.early_stopping:
                    if label_loss > self.best_loss:
                        early_stopping_counter += 1
                        self.logger.info(f'Early stopping counter: {early_stopping_counter} out of {self.patience}')
                        
                        if early_stopping_counter >= self.patience:
                            self.logger.info(f'Training early stops at epoch {epoch}')
                            break
        self.logger.info(f"Best Valid Loss: {self.best_loss:.10f}")
        self.logger.info("Best Valid Result: ")
        self.best_metrics.print()
    
    def test(self, test_loader):
        self._to_device()
        self.logger.info('Begin Testing')
        test_res = self._run_epoch(test_loader, mode="test")
        test_loss = test_res["mean_loss"]
        power_loss = test_res["mean_loss_power_pred"]
        label_loss = test_res["mean_loss_label_pred"]
        
        
        info = f'Test Loss: {test_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f}'
        self.logger.info(info)
        
        metrics = test_res["metrics"]
        self.logger.info('Test Metrics Result: ')
        metrics.print()
        
        # csv记录一下测试集功率预测曲线
        pred_classes = test_res["pred_classes"]
        label_classes = test_res["label_classes"]
        pred_powers = test_res['pred_powers']
        true_powers = test_res['true_powers']
        
        df = pd.DataFrame(dict(
            pred_classes=pred_classes,
            label_classes=label_classes,
            pred_powers=pred_powers,
            true_powers=true_powers
        ))
        df.to_csv(get_test_csv_path(self.config["no"]), index=False, encoding='utf-8')
    
    def _run_epoch(self, data_loader, mode: Literal['train', 'valid', 'test']):
        if mode == 'train':
            self.model.train()
        else:
            self.model.eval()
        
        length = 0
        total_loss = 0
        total_loss_power_pred = 0
        total_loss_label_pred = 0
        total_loss_power_icing_pred = 0
        pred_classes = []
        label_classes = []
        
        pred_powers = []
        true_powers = []
        
        no_grad = mode != 'train'
        with torch.no_grad() if no_grad else torch.enable_grad():
            
            for idx, ((inputs, powers), labels) in enumerate(data_loader):
                
                length += inputs.shape[0]
                
                inputs = inputs.to(self.device)
                powers = powers.to(self.device)
                labels = labels.to(self.device)
                
                if mode == 'train':
                    self.optimizer.zero_grad()
                
                power_preds, label_preds = self.model(inputs, powers)
                # 筛选出 labels 为 0 的位置
                mask = (labels == 0)
                # 正常0，故障1
                if mask.sum() == 0:
                    power_loss = torch.tensor(0).to(self.device)
                else:
                    power_loss = self.criterion_power(power_preds[mask], powers[mask])
                
                label_loss = self.criterion_label(label_preds, labels)
                
                loss = self.power_lambda * power_loss + (1 - self.power_lambda) * label_loss
                
                if mode == 'train':
                    loss.backward()
                    self.optimizer.step()
                
                total_loss += loss.item()
                total_loss_power_pred += power_loss.item()
                total_loss_label_pred += label_loss.item()
                total_loss_power_icing_pred += power_loss.item()
                
                if mode != "train":
                    # 获取预测类别
                    predicted_classes = label_preds.argmax(dim=1)  # 假设 preds 是 logits
                    pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                    label_classes.extend(labels.cpu().numpy())
                    
                    if mode == "test":
                        for power_pred in power_preds.cpu().numpy():
                            pred_powers.append(power_pred.tolist())
                        for power in powers.cpu().numpy():
                            true_powers.append(power.tolist())
        
        metrics = None
        if mode != "train":
            if self.metrics_function is not None:
                metrics = self.metrics_function(pred_classes, label_classes)
        
        res_dict = dict(
            mean_loss=total_loss / length,
            mean_loss_power_pred=total_loss_power_pred / length,
            mean_loss_label_pred=total_loss_label_pred / length,
            mean_loss_power_icing_pred=total_loss_power_icing_pred / length,
            metrics=metrics,
            
            pred_classes=pred_classes,
            label_classes=label_classes,
            pred_powers=pred_powers,
            true_powers=true_powers
        )
        
        
        return res_dict


class WindIcingTrainerV4(WindIcingTrainerV3):
    def __init__(self, model, train_loader, valid_loader, optimizer, criterion, config,
                 device=torch.device('cpu'),
                 loss_function: Optional[Callable] = None,
                 step_function: Optional[Callable] = None,
                 metrics_function: Optional[Callable] = None
                 ):
        super().__init__(model, train_loader, valid_loader, optimizer, criterion, config,
                         device, loss_function, step_function, metrics_function)
    
    def _train_epoch(self, epoch):
        self.model.train()
        total_loss = 0
        total_loss_power_pred = 0
        total_loss_label_pred = 0
        
        for idx, ((inputs, (powers, power_features)), labels) in enumerate(self.train_loader):
            inputs = inputs.to(self.device)
            powers = powers.to(self.device)
            power_features = power_features.to(self.device)
            labels = labels.to(self.device)
            
            self.optimizer.zero_grad()
            power_preds, label_preds = self.model(inputs, (powers, power_features))
            # 筛选出 labels 为 0 的位置
            mask = (labels == 0)
            # 正常0，故障1
            if mask.sum() == 0:
                power_loss = 0
            else:
                power_loss = self.criterion_power(power_preds[mask], powers[mask])
            
            label_loss = self.criterion_label(label_preds, labels)
            
            loss = self.power_lambda * power_loss + (1 - self.power_lambda) * label_loss
            
            loss.backward()
            self.optimizer.step()
            
            total_loss += loss.item()
            total_loss_power_pred += power_loss.item()
            total_loss_label_pred += label_loss.item()
        
        return total_loss / self.train_size, total_loss_power_pred / self.train_size, total_loss_label_pred / self.train_size
    
    def _valid_epoch(self, epoch):
        self.model.eval()
        total_loss = 0
        total_loss_power_pred = 0
        total_loss_label_pred = 0
        
        pred_classes = []
        label_classes = []
        
        with torch.no_grad():
            for idx, ((inputs, (powers, power_features)), labels) in enumerate(self.valid_loader):
                inputs = inputs.to(self.device)
                powers = powers.to(self.device)
                power_features = power_features.to(self.device)
                labels = labels.to(self.device)
                
                self.optimizer.zero_grad()
                power_preds, label_preds = self.model(inputs, (powers, power_features))
                # 筛选出 labels 为 0 的位置
                mask = (labels == 0)
                # 正常0，故障1
                if mask.sum() == 0:
                    power_loss = 0
                else:
                    power_loss = self.criterion_power(power_preds[mask], powers[mask])
                
                label_loss = self.criterion_label(label_preds, labels)
                
                loss = self.power_lambda * power_loss + (1 - self.power_lambda) * label_loss
                
                total_loss += loss.item()
                total_loss_power_pred += power_loss.item()
                total_loss_label_pred += label_loss.item()
                
                # 获取预测类别
                predicted_classes = label_preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
        
        metrics = None
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
        
        return total_loss / self.valid_size, total_loss_power_pred / self.valid_size, total_loss_label_pred / self.valid_size, metrics
    
    def test(self, test_loader):
        self.logger.info('begin testing')
        self.model.to(self.device)
        self.model.eval()
        
        total_loss = 0
        total_loss_power_pred = 0
        total_loss_label_pred = 0
        
        pred_classes = []
        label_classes = []
        
        with torch.no_grad():
            for idx, ((inputs, (powers, power_features)), labels) in enumerate(test_loader):
                inputs = inputs.to(self.device)
                powers = powers.to(self.device)
                power_features = power_features.to(self.device)
                labels = labels.to(self.device)
                
                self.optimizer.zero_grad()
                power_preds, label_preds = self.model(inputs, (powers, power_features))
                # 筛选出 labels 为 0 的位置
                mask = (labels == 0)
                # 正常0，故障1
                if mask.sum() == 0:
                    power_loss = 0
                else:
                    power_loss = self.criterion_power(power_preds[mask], powers[mask])
                
                label_loss = self.criterion_label(label_preds, labels)
                
                loss = self.power_lambda * power_loss + (1 - self.power_lambda) * label_loss
                
                total_loss += loss.item()
                total_loss_power_pred += power_loss.item()
                total_loss_label_pred += label_loss.item()
                
                # 获取预测类别
                predicted_classes = label_preds.argmax(dim=1)  # 假设 preds 是 logits
                
                pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                label_classes.extend(labels.cpu().numpy())
                
                if idx % (len(test_loader) // 20 + 1) == 0:
                    self.logger.info(f'idx: {idx} out of {len(test_loader)}')
        
        info = f'Test Loss: {total_loss / self.test_size:.10f} | Power Loss: {total_loss_power_pred / self.test_size:.10f} | Label Loss: {total_loss_label_pred / self.test_size:.10f}'
        self.logger.info(info)
        
        metrics = None
        if self.metrics_function is not None:
            metrics = self.metrics_function(pred_classes, label_classes)
            self.logger.info('Test Metrics Result: ')
            metrics.print()
       
class WindIcingTrainerV5(WindIcingTrainerV3):
    def __init__(self, model, train_loader, valid_loader, optimizer, criterion, config,
                 device=torch.device('cpu'),
                 loss_function: Optional[Callable] = None,
                 step_function: Optional[Callable] = None,
                 metrics_function: Optional[Callable] = None
                 ):
        super().__init__(model, train_loader, valid_loader, optimizer, criterion, config,
                         device, loss_function, step_function, metrics_function)
        
        # 控制结冰功率预测的惩罚损失
        # self.icing_alpha = config["icing_alpha"]
    def _power_icing_pred_loss_function(self, pred, true):
        # 结冰发生时，实际功率应该小于预测功率, 预测功率大于实际功率
        diff = pred - true
        loss = torch.where(diff > 0, torch.exp(-diff), torch.exp(diff))
        
        return loss.sum()
    def _to_device(self):
        self.model.to(self.device)
        # self.criterion.to(self.device)
        self.criterion_power.to(self.device)
        self.criterion_label.to(self.device)
        self.logger.info(f"Trainer to device: {self.device}")
    def train(self):
        self._to_device()
        
        self.logger.info(f'Begin training for {self.epochs} epochs, starting from epoch {self.start_epoch}')
        
        early_stopping_counter = 0
        
        for epoch in range(self.start_epoch, self.epochs + 1):
            self.logger.info(f'Begin Training for Epoch {epoch}')
            train_res = self._run_epoch(self.train_loader, mode="train")
            train_loss = train_res["mean_loss"]
            power_loss = train_res["mean_loss_power_pred"]
            label_loss = train_res["mean_loss_label_pred"]
            icing_power_loss = train_res["mean_loss_power_icing_pred"]
            info = f'Train Epoch {epoch} | Train Loss: {train_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f} | Icing Power Loss: {icing_power_loss:.10f}'
            self.logger.info(info)
            
            self.logger.info(f"Begin Validing for Epoch {epoch}")
            valid_res = self._run_epoch(self.valid_loader, mode="valid")
            valid_loss = valid_res["mean_loss"]
            power_loss = valid_res["mean_loss_power_pred"]
            label_loss = valid_res["mean_loss_label_pred"]
            icing_power_loss = valid_res["mean_loss_power_icing_pred"]
            info = f'Valid Epoch {epoch} | Valid Loss: {valid_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f} | Icing Power Loss: {icing_power_loss:.10f}'
            self.logger.info(info)
            metrics = valid_res["metrics"]
            metrics.print()
            # self.logger.info(f'Valid Confusion Matrix:\n{cm}')
            
            if label_loss < self.warmup_loss:
                self.warmup_epochs = 0
            
            if epoch > self.warmup_epochs:
                
                if self.best_loss is None or label_loss < self.best_loss:
                    self.best_loss = label_loss
                    self.best_metrics = metrics
                    early_stopping_counter = 0
                    self._save_checkpoint(epoch, save_best=True)
                
                elif epoch % self.checkpoint_step == 0:
                    self._save_checkpoint(epoch)
                
                if self.early_stopping:
                    if label_loss > self.best_loss:
                        early_stopping_counter += 1
                        self.logger.info(f'Early stopping counter: {early_stopping_counter} out of {self.patience}')
                        
                        if early_stopping_counter >= self.patience:
                            self.logger.info(f'Training early stops at epoch {epoch}')
                            break
        self.logger.info(f"Best Valid Loss: {self.best_loss:.10f}")
        self.logger.info("Best Valid Result: ")
        self.best_metrics.print()
    
    def test(self, test_loader):
        self._to_device()
        self.logger.info('Begin Testing')
        test_res = self._run_epoch(test_loader, mode="test")
        test_loss = test_res["mean_loss"]
        power_loss = test_res["mean_loss_power_pred"]
        label_loss = test_res["mean_loss_label_pred"]
        icing_power_loss = test_res["mean_loss_power_icing_pred"]
        
        info = f'Test Loss: {test_loss:.10f} | Power Loss: {power_loss:.10f} | Label Loss: {label_loss:.10f} | Icing Power Loss: {icing_power_loss:.10f}'
        self.logger.info(info)
        
        metrics = test_res["metrics"]
        self.logger.info('Test Metrics Result: ')
        metrics.print()
        
        # csv记录一下测试集功率预测曲线
        pred_classes = test_res["pred_classes"]
        label_classes = test_res["label_classes"]
        pred_powers = test_res['pred_powers']
        true_powers = test_res['true_powers']
        
        df = pd.DataFrame(dict(
            pred_classes=pred_classes,
            label_classes=label_classes,
            pred_powers=pred_powers,
            true_powers=true_powers
        ))
        df.to_csv(get_test_csv_path(self.config["no"]), index=False, encoding='utf-8')
    def _run_epoch(self, data_loader, mode: Literal['train', 'valid', 'test']):
        if mode == 'train':
            self.model.train()
        else:
            self.model.eval()
        
        length = 0
        total_loss = 0
        total_loss_power_pred = 0
        total_loss_label_pred = 0
        total_loss_power_icing_pred = 0
        pred_classes = []
        label_classes = []
        
        pred_powers = []
        true_powers = []
        
        no_grad = mode != 'train'
        with torch.no_grad() if no_grad else torch.enable_grad():

            for idx, ((inputs, powers), labels) in enumerate(data_loader):
                
                length += inputs.shape[0]
                
                inputs = inputs.to(self.device)
                powers = powers.to(self.device)
                labels = labels.to(self.device)
                
                if mode == 'train':
                    self.optimizer.zero_grad()
                    
                power_preds, label_preds = self.model(inputs, powers)
                # 筛选出 labels 为 0 的位置
                mask = (labels == 0)
                # 正常0，故障1
                if mask.sum() == 0:
                    power_loss = torch.tensor(0).to(self.device)
                else:
                    power_loss = self.criterion_power(power_preds[mask], powers[mask])
                
                mask_icing = (labels == 1)
                if mask_icing.sum() == 0:
                    power_icing_loss = torch.tensor(0).to(self.device)
                else:
                    power_icing_loss = self._power_icing_pred_loss_function(power_preds[mask_icing], powers[mask_icing])
                
                label_loss = self.criterion_label(label_preds, labels)
                
                loss = self.power_lambda * power_loss + (1 - self.power_lambda) * label_loss + power_icing_loss
                
                if mode == 'train':
                    loss.backward()
                    self.optimizer.step()
                
                total_loss += loss.item()
                total_loss_power_pred += power_loss.item()
                total_loss_label_pred += label_loss.item()
                total_loss_power_icing_pred += power_icing_loss.item()
                
                if mode != "train":
                    # 获取预测类别
                    predicted_classes = label_preds.argmax(dim=1)  # 假设 preds 是 logits
                    pred_classes.extend(predicted_classes.cpu().numpy())  # 转移到 CPU 并转换为 numpy 数组
                    label_classes.extend(labels.cpu().numpy())
                    
                    if mode == "test":
                        for power_pred in power_preds.cpu().numpy():
                            pred_powers.append(power_pred.tolist())
                        for power in powers.cpu().numpy():
                            true_powers.append(power.tolist())
            
        metrics = None
        if mode != "train":
            if self.metrics_function is not None:
                metrics = self.metrics_function(pred_classes, label_classes)
                
        res_dict = dict(
            mean_loss=total_loss / length,
            mean_loss_power_pred=total_loss_power_pred / length,
            mean_loss_label_pred=total_loss_label_pred / length,
            mean_loss_power_icing_pred=total_loss_power_icing_pred / length,
            metrics=metrics,
            
            pred_classes=pred_classes,
            label_classes=label_classes,
            pred_powers=pred_powers,
            true_powers=true_powers
        )
        return res_dict