# 工程相关包
from contextlib import nullcontext

from model import base_model
from data_processing import data_labels_generate
# 学习框架
import torch
import torch.optim as optim # 优化器
from torch.optim import lr_scheduler # 调度器
import torch.nn.functional as F
# 数据处理相应库
import glob
import random
import time # 随机打乱用
from sklearn.model_selection import KFold
import os
import h5py
import math
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, cohen_kappa_score
# 其他工具库
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import f1_score, cohen_kappa_score
from tqdm import tqdm  # 导入tqdm模块，用于显示进度条
import datetime  # 用于获取当前时间，以便在日志中加上时间戳

def log_message(message):
    log_file_path = os.path.join("G:\\Research\\EEG_Project\\Template\\CodeDir", '模型训练日志.txt')
    now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    with open(log_file_path, 'a') as log_file:  # 以追加模式打开文件
        log_file.write(f"{now} - {message}\n")  # 写入时间戳和消息

class Trainer:
    def __init__(self):
        print("Trainer __init__")

        # 训练相关参数
        self.lr = 0.001
        self.num_epochs = 1000
        self.k_folds = 10
        self.batches = 256
        self.patience = 20  # 早停耐心值
        self.best_val_loss = float('inf')
        self.best_metrics = {}
        self.no_improve_epochs = 0

        # 相关变量定义
        self.k_fold_splitter = None
        self.epoch = 0
        self.train_files = []
        self.val_files = []
        self.train_steps = 0
        self.val_steps = 0
        self.best_train_metrics = {'accuracy': -float('inf'), 'f1': -float('inf'), 'kappa': -float('inf')}
        self.best_val_metrics = {'accuracy': -float('inf'), 'f1': -float('inf'), 'kappa': -float('inf')}


        # 数据路径
        self.data_files_path = "F:\\EEGDataset\\MASS\\data_anno_savefiles"
        self.data_files = glob.glob(os.path.join(self.data_files_path, "*.h5"))
        self.model_path = "G:\\Research\\EEG_Project\\Template\\CodeDir\\save_model\\model"


        # 模型和工具
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = base_model.BaseModel().to(self.device)
        self.criterion = torch.nn.CrossEntropyLoss()
        self.optimizer = optim.Adam(self.model.parameters(), self.lr)
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.7, patience=5, min_lr=0.0008)

        # 初始化数据生成器
        self.data_labels_generator = data_labels_generate.DataLabelsGenerate()
        self.train_data_generator = None
        self.val_data_generator = None

    def data_init(self):
        random.shuffle(self.data_files)
        self.k_fold_splitter = KFold(n_splits = self.k_folds, shuffle = True, random_state=int(time.time()))  # 分成k_folds组 随机打乱

    def calculate_steps_per_epoch(self, file_names):
        total_samples = 0
        for file in file_names:
            with h5py.File(file, 'r') as f:
                total_samples += sum(f['data'][channel].shape[0] for channel in f['data'])
        return math.ceil(total_samples / self.batches)

    def configure_train_tools(self):
        # 优化器
        self.optimizer = optim.Adam(self.model.parameters(), self.lr)
        # 设置交叉熵损失函数，计算模型输出与真实标签之间的差距，采用均值作为损失的输出 会自动执行softmax 计算交叉损失
        self.criterion = torch.nn.CrossEntropyLoss(reduction='mean')
        # 学习率调度器 损失不再改变的时候 修改学习率
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode = 'min', factor = 0.7, patience = 5, min_lr=0.0008)

    def model_init(self):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 检查 GPU 是否可用
        self.model.to(self.device)
        self.model.train()
        self.configure_train_tools()

    def train_per_epoch(self):
        self.model.train()
        correct_predictions_total = 0
        epoch_loss = 0.0
        all_preds = []
        all_labels = []

        # 使用 tqdm 包裹 train_steps 迭代器，用于显示进度条
        progress_bar = tqdm(range(self.train_steps), desc = f"Epoch {self.epoch + 1}", ncols=100)

        for step in progress_bar:
            # 获取一个批次数据并转移到GPU
            batch_data_labels = next(self.train_data_generator)
            inputs = batch_data_labels['batch_data'].to(self.device)

            labels = batch_data_labels['labels'].to(self.device)

            self.optimizer.zero_grad()  # 清除以前的梯度
            outputs = self.model(inputs)  # 前向传播 执行整个网络的前向传播函数，包括但不限于MRCNN、AFR (batch_size,num_class)

            # 使用自定义的损失函数计算总损失
            loss = self.criterion(outputs, labels)  # 计算损失 (batch_size,num_class)  (batch_size)
            loss.backward()  # 反向传播 通过计算梯度来调整参数
            self.optimizer.step()  # 更新参数 更新权重和偏置 比如卷积、全连接中的每一层 都有相应的权重和偏置矩阵

            epoch_loss += loss.item()
            _, preds = torch.max(outputs, 1)  # 获取预测类别

            correct_predictions_total += (preds != labels).sum().item()  # 正确预测数

            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

            progress_bar.set_postfix({'Loss': loss.item()})

        return epoch_loss / self.train_steps, all_preds, all_labels

    def validate_per_epoch(self):
        """验证过程"""
        self.model.eval()
        val_loss = 0.0
        all_preds = []
        all_labels = []

        with torch.no_grad():
            progress_bar = tqdm(range(self.val_steps), desc=f"Validation Epoch {self.epoch + 1}", ncols=100)

            for _ in progress_bar:
                batch_data_labels = next(self.val_data_generator)

                inputs = batch_data_labels['batch_data'].to(self.device)
                labels = batch_data_labels['labels'].to(self.device)

                outputs = self.model(inputs)

                loss = self.criterion(outputs, labels)

                val_loss += loss.item()
                preds = torch.argmax(outputs, dim=1)

                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

                progress_bar.set_postfix({'Val Loss': loss.item()})
        return val_loss / self.val_steps, all_preds, all_labels

    def calculate_metrics(self , preds, labels):
        """计算评估指标"""
        return {
            'accuracy': accuracy_score(labels, preds),
            'f1': f1_score(labels, preds, average='weighted'),
            'kappa': cohen_kappa_score(labels, preds)
        }

    def early_stopping(self, val_loss):
        """早停机制"""
        if val_loss < self.best_val_loss:
            self.best_val_loss = val_loss
            self.no_improve_epochs = 0
            # 保存当前模型
            torch.save(self.model.state_dict(), self.model_path)
            print(f"Model improved, saved to {self.model_path}")
            return False
        else:
            self.no_improve_epochs += 1
            return self.no_improve_epochs >= self.patience

    def model_train_epochs(self):
        log_message("\n Training starts!")
        self.data_init()
        self.model_init()

        for fold, (train_index, val_index) in enumerate(self.k_fold_splitter.split(self.data_files)):
            print(f"Training fold {fold + 1}/{self.k_folds}...")

            self.train_files = [self.data_files[i] for i in train_index]
            self.val_files = [self.data_files[i] for i in val_index]
            self.train_steps = self.calculate_steps_per_epoch(self.train_files)
            self.val_steps = self.calculate_steps_per_epoch(self.val_files)
            print(f"训练文件 {len(self.train_files) , self.train_files}")
            print(f"train_steps: {self.train_steps}")
            print(f"验证文件 {len(self.val_files) , self.val_files}")
            print(f"val_steps: {self.val_steps}")


            self.train_data_generator = self.data_labels_generator.generate_data_labels(self.train_files, self.batches)
            self.val_data_generator = self.data_labels_generator.generate_data_labels(self.val_files, self.batches)

            for self.epoch in range(self.num_epochs):

                # 训练阶段
                train_loss, train_preds, train_labels = self.train_per_epoch()

                train_metrics = self.calculate_metrics(train_preds, train_labels)

                # 验证阶段
                val_loss, val_preds, val_labels = self.validate_per_epoch()
                val_metrics = self.calculate_metrics(val_preds, val_labels)

                # 更新学习率
                self.scheduler.step(val_loss)

                # 打印指标
                print(f"\nFold {fold + 1} Epoch {self.epoch + 1}/{self.num_epochs}")

                print(
                      f"Train Loss: {train_loss:.4f} | "
                      f"Acc: {train_metrics['accuracy']:.4f} | "
                      f"F1: {train_metrics['f1']:.4f} | "
                      f"Kappa: {train_metrics['kappa']:.4f}")
                print(
                    f"Val Loss: {val_loss:.4f} | "
                    f"Acc: {val_metrics['accuracy']:.4f} | "
                    f"F1: {val_metrics['f1']:.4f} | "
                    f"Kappa: {val_metrics['kappa']:.4f}")

                # 保存最优指标
                if train_metrics['accuracy'] > self.best_train_metrics['accuracy']:
                    self.best_train_metrics = train_metrics
                if val_metrics['accuracy'] > self.best_val_metrics['accuracy']:
                    self.best_val_metrics = val_metrics

                # 早停检查
                if self.early_stopping(val_loss):
                    log_message(f"Early stopping triggered at epoch {self.epoch + 1}")
                    print(f"Early stopping triggered at epoch {self.epoch + 1}")
                    break

        log_message(
            f"Best Train Metrics - "
            f"Acc: {self.best_train_metrics['accuracy']:.4f} | "
            f"F1: {self.best_train_metrics['f1']:.4f} | "
            f"Kappa: {self.best_train_metrics['kappa']:.4f}")
        log_message(
            f"Best Validation Metrics - "
            f"Acc: {self.best_val_metrics['accuracy']:.4f} | "
            f"F1: {self.best_val_metrics['f1']:.4f} | "
            f"Kappa: {self.best_val_metrics['kappa']:.4f}")

        print("\n Training completed!")
        log_message("\n Training completed!")
        print(
            f"Best Validation Metrics - "
            f"Acc: {self.best_val_metrics['accuracy']:.4f} | "
            f"F1: {self.best_val_metrics['f1']:.4f} | "
            f"Kappa: {self.best_val_metrics['kappa']:.4f}")
