#%%
#from transformer import MultiTaskTransformer
from SingleModel import SingleTaskTransformer
import torch
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
import matplotlib.pyplot as plt
from dataloadernew import make_loaders
import torch.nn as nn
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
import copy


#%%

# 加载训练和测试数据 单任务试验拆分
TrainData = np.load('X_train.npy')
TrainLabel = np.load('y_train.npy')
TestData = np.load('X_test.npy')
TestLabel = np.load('y_test.npy')

CoolerTrainLabel = TrainLabel[:,0]
ValveTrainLabel = TrainLabel[:,1]
PumpTrainLable = TrainLabel[:,2]
AccumulatorTrainLabel = TrainLabel[:,3]

CoolerTestLabel = TestLabel[:,0]
ValveTestLabel = TestLabel[:,1]
PumpTestLable = TestLabel[:,2]
AccumulatorTestLabel = TestLabel[:,3]


#%%
# 根据不同任务分别创建 DataLoader
batch_size = 64

Cooler_train_loader, Cooler_test_loader = make_loaders(TrainData, CoolerTrainLabel, TestData, CoolerTestLabel, batch_size=batch_size)

Valve_train_loader, Valve_test_loader = make_loaders(TrainData, ValveTrainLabel, TestData, ValveTestLabel, batch_size=batch_size)

Pump_train_loader, Pump_test_loader = make_loaders(TrainData, PumpTrainLable, TestData, PumpTestLable, batch_size=batch_size)

Accumulator_train_loader, Accumulator_test_loader = make_loaders(TrainData, AccumulatorTrainLabel, TestData, AccumulatorTestLabel, batch_size=batch_size)


torch.cuda.empty_cache()

#%%
# 检查是否有可用的 GPU
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda")
print(f"Using device: {device}")
torch.cuda.empty_cache()
# 配置实例化
# 超参数
input_dim = 17  # 每个时间步的传感器特征数量
Cooler_task_dims = 3  # 每个任务的输出维度, 对应5个任务的类别数
Valve_task_dims = 4
Pump_task_dims = 3
Accumulator_task_dims = 4

num_heads = 8
num_layers = 6
hidden_dim = 256
dropout = 0.1

lr = 1e-4

#%%
# 创建交叉熵损失函数
criterions = nn.CrossEntropyLoss()

# 创建模型
model = SingleTaskTransformer(input_dim, Cooler_task_dims, num_heads, num_layers, hidden_dim, dropout)
#model = SingleTaskTransformer(input_dim, Valve_task_dims, num_heads, num_layers, hidden_dim, dropout)
#model = SingleTaskTransformer(input_dim, Pump_task_dims, num_heads, num_layers, hidden_dim, dropout)
#model = SingleTaskTransformer(input_dim, Accumulator_task_dims, num_heads, num_layers, hidden_dim, dropout)

# 使用 CUDA（如果可用）
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)

# 优化器和损失函数
optimizer = optim.Adam(model.parameters(), lr=1e-4)

scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, factor=0.5)

# 训练参数
epochs = 5  # 训练轮次

# 记录每个 epoch 的训练损失、验证损失和准确率
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []

# 初始化存储每轮准确率的列表
#train_accuracies = [[] for _ in range(len(criterions))]
#val_accuracies = [[] for _ in range(len(criterions))]
checkpoint_pth = "best_model.pth" 



#%%

def calculate_accuracy(y_pred, y_true):
    _, predicted_labels = torch.max(y_pred, 1)
    correct = (predicted_labels == y_true).float()
    accuracy = correct.sum() / len(correct)
    return accuracy

train_epoch_losses = []
train_epoch_accuracyes = []
valid_epoch_losses = []
valid_epoch_accuracyes = []

best_acc = 0.0
for epoch in range(epochs):
    model.train()  # 将模型设置为训练模式
    train_epoch_loss = []
    train_epoch_accuracy = []
    pbar = tqdm(Cooler_train_loader, total=len(Cooler_train_loader))
    for index, sample in enumerate(pbar, start=1):
        # 获取输入数据和目标，并将它们转移到GPU（如果可用）
        inputs, labels = sample['sequence'].to(device), sample['label'].to(device)
        # inputs = inputs.to(device)
        # labels = labels.to(device)
        # 清零梯度
        optimizer.zero_grad()
        # 前向传播
        outputs = model(inputs)
        loss = criterions(outputs, labels)
        loss.backward()
        optimizer.step()

        train_epoch_loss.append(loss.item())
        accuracy = calculate_accuracy(outputs, labels)
        train_epoch_accuracy.append(accuracy.item())

        pbar.set_description(f'Epoch [{epoch + 1}/{epochs}]')
        pbar.set_postfix(**{'loss': loss.item(),
                            'accuracy': accuracy.item(),
                            })


    # Validation accuracy
    model.eval()
    valid_epoch_loss = []
    valid_epoch_accuracy = []
    pbar = tqdm(Cooler_test_loader, total=len(Cooler_test_loader))
    for index, sample in enumerate(pbar, start=1):
        inputs, labels = sample['sequence'].to(device), sample['label'].to(device)
        # inputs = inputs.to(device)
        # labels = labels.to(device)
        outputs = model(inputs)
        loss = criterions(outputs, labels)

        valid_epoch_loss.append(loss.item())
        accuracy = calculate_accuracy(outputs, labels)
        valid_epoch_accuracy.append(accuracy.item())
        pbar.set_description('valid')
        pbar.set_postfix(**{'total_loss': loss.item(),
                            'accuracy': accuracy.item(),
                            })
    # 计算平均精度
    print("--------------------------------------------")
    train_epoch_loss = np.average(train_epoch_loss)
    train_epoch_accuracy = np.average(train_epoch_accuracy)
    print(f'Epoch {epoch + 1}, train Accuracy: {train_epoch_accuracy:.4f}')
    valid_epoch_loss = np.average(valid_epoch_loss)
    valid_epoch_accuracy = np.average(valid_epoch_accuracy)
    print(f'Epoch {epoch + 1}, Validation Accuracy: {valid_epoch_accuracy:.4f}')
    print("--------------------------------------------")
    if valid_epoch_accuracy > best_acc:
        best_acc = valid_epoch_accuracy
        best_model_wts = copy.deepcopy(model.state_dict())
        state = {
            'state_dict': model.state_dict(),
            'best_acc': best_acc,
            'optimizer': optimizer.state_dict(),
        }
        torch.save(state, checkpoint_pth)
    train_epoch_losses.append(train_epoch_loss.item())
    train_epoch_accuracyes.append(train_epoch_accuracy.item())
    valid_epoch_losses.append(valid_epoch_loss.item())
    valid_epoch_accuracyes.append(valid_epoch_accuracy.item())
    scheduler.step(valid_epoch_loss)

print('Finished Training')
print('Best val Acc: {:4f}'.format(best_acc))


#%%
# 自定义每个任务的图表标题
CM_titles = [
    "Confusion Matrix for Cooler ",
    "Confusion Matrix for Valve ",
    "Confusion Matrix for Main pump ",
    "Confusion Matrix for Accumulator ",
    "Confusion Matrix for Flag Training "
]
task_idx = 0
# 训练完成后，计算并绘制混淆矩阵（测试集上的最终结果）
model.eval()  # 切换模型到评估模式
# 对每个任务绘制混淆矩阵

all_preds = []
all_labels = []

# 用 tqdm 显示进度条
with torch.no_grad():  # 在测试集上不需要梯度计算
    for sample in Cooler_test_loader:
        data, target = sample['sequence'].to(device), sample['label'].to(device)

        # 前向传播
        task_outputs = model(data)

        # 获取当前任务的输出
        
        _, predicted = torch.max(task_outputs, 1)

        # 保存预测和真实标签
        all_preds.extend(predicted.cpu().numpy())
        all_labels.extend(target.cpu().numpy())
#%%
# 计算混淆矩阵
cm = confusion_matrix(all_labels, all_preds)

# 绘制混淆矩阵
plt.figure(figsize=(6, 5))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(cm.shape[1]), yticklabels=np.arange(cm.shape[0]))
plt.title(CM_titles[task_idx])
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.show()

#%% 开始训练

for epoch in range(epochs):
    model.train()  # 切换模型到训练模式
    running_loss = 0.0

    # 使用 tqdm 显示进度条
    with tqdm(Cooler_train_loader, desc=f"Epoch {epoch + 1}/{epochs}", unit="batch") as pbar:
        for batch_idx, sample in enumerate(pbar):
            # 获取输入数据和标签
            data, target = sample['sequence'].to(device), sample['label'].to(device)

            # 清空优化器的梯度
            optimizer.zero_grad()

            # 前向传播
            task_outputs = model(data)

            # 计算损失
            loss = 0

            #task_accuracies_batch = [[] for _ in range(len(criterions))]  # 每个任务的准确率在当前batch的列表

            # for i, output in enumerate(task_outputs):
            #     task_loss = criterions[i](output, target[i])  # 每个任务的损失
            #     loss += task_loss

            #     # 计算分类准确率
            #     _, predicted = torch.max(output, 1)
            #     correct = (predicted == target[:, i]).sum().item()
            #     task_accuracy = correct / target.size(0)  # 当前任务的准确率
            #     task_accuracies_batch[i].append(task_accuracy)

            loss = criterions(task_outputs, target)
            # 反向传播
            loss.backward()

            # 更新参数
            optimizer.step()

            # 累加损失
            running_loss += loss.item()

            # 计算准确率
            _, predicted = torch.max(task_outputs, 1)
            correct = (predicted == target).sum().item()
            
            # 更新进度条
            pbar.set_postfix(loss=running_loss / (batch_idx + 1))

    # 每个 epoch 后，计算训练集的平均损失和准确率
    avg_train_loss = running_loss / len(Cooler_train_loader)
    train_losses.append(avg_train_loss)
    Train_task_accuracy = correct / target.size(0)

    
    # 计算每个任务的平均准确率并存储
    # for i in range(len(criterions)):
    #     avg_task_accuracy = sum(task_accuracies_batch[i]) / len(task_accuracies_batch[i])
    #     train_accuracies[i].append(avg_task_accuracy)
    # 计算准确率

    # 在验证集上评估模型

    model.eval()  # 切换模型到评估模式
    val_loss = 0.0
    #task_accuracies_batch_val = [[] for _ in range(len(criterions))]  # 每个任务在验证集上的准确率

    with torch.no_grad():  # 在验证集上不需要梯度计算
        for sample in Cooler_test_loader:
            data, target = sample['sequence'].to(device), sample['label'].to(device)

            # 前向传播
            task_outputs = model(data)

            # 计算损失
            loss = 0
            loss = criterions(task_outputs, target)
            # for i, output in enumerate(task_outputs):
            #     task_loss = criterions[i](output, target[:, i])
            #     loss += task_loss

            #     # 计算分类准确率
            #     _, predicted = torch.max(output, 1)
            #     correct = (predicted == target[:, i]).sum().item()
            #     task_accuracy = correct / target.size(0)  # 当前任务的准确率
            #     task_accuracies_batch_val[i].append(task_accuracy)

            val_loss += loss
            # 计算验证集准确率
            _, predicted = torch.max(task_outputs, 1)
            task_accuracy = correct / target.size(0)



    # 更新每个任务的准确率
    # for i in range(len(criterions)):
    #     avg_task_accuracy_val = sum(task_accuracies_batch_val[i]) / len(task_accuracies_batch_val[i])
    #     val_accuracies[i].append(avg_task_accuracy_val)

    avg_val_loss = val_loss / len(Cooler_test_loader)
    val_losses.append(avg_val_loss)

    # 输出当前 epoch 的训练损失、验证损失和每个任务的准确率
    print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {avg_train_loss:.6f}, "
          f"Validation Loss: {avg_val_loss:.6f}"
          f"Train ACC: {Train_task_accuracy:.6f}")

    # for i in range(len(criterions)):
    #     print(f"  Task {i + 1} Train Accuracy: {train_accuracies[i][-1] * 100:.2f}%, "
    #           f"Validation Accuracy: {val_accuracies[i][-1] * 100:.2f}%")

    # 更新学习率，基于验证损失
    scheduler.step(avg_val_loss)





#%%

