from config import *
from dataloader import load_data
from modeler import HAR_CNN
import torch
import torch.nn as nn
from torch import optim


def run():
    """
    模型训练和验证通用过程
    参数:
    - model: 要训练的神经网络模型
    - train_loader: 训练数据的 DataLoader
    - val_loader: 验证数据的 DataLoader
    - criterion: 损失函数
    - optimizer: 优化器
    - epochs: 训练的总轮数
    """
    best_val_loss = float("inf")  # 初始化最优验证损失为正无穷
    patience, trigger_times = 5, 0  # 早停机制的容忍轮数和触发计数

    train_loader = load_data(F_X_train, F_y_train)
    model = HAR_CNN(INPUT_DIM, OUTPUT_DIM)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)

    for epoch in range(EPOCHS):
        # 训练阶段
        model.train()  # 设置为训练模式
        train_loss, correct = 0, 0
        for X_batch, y_batch in train_loader:
            X_batch = X_batch.unsqueeze(1)  # 添加通道维度(batch_size, 1, 561)

            optimizer.zero_grad()  # 梯度清零
            outputs = model(X_batch)  # 前向传播

            loss = criterion(outputs, y_batch)  # 计算损失
            loss.backward()  # 反向传播
            optimizer.step()  # 参数更新

            train_loss += loss.item()  # 累计训练损失
            correct += (outputs.argmax(1) == y_batch).sum().item()  # 累计正确预测数

        train_acc = correct / len(train_loader.dataset)  # 计算训练准确率
        train_loss /= len(train_loader)  # 计算平均训练损失
        print(f"Epoch {epoch + 1}/{EPOCHS}: Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}")

        # # 验证阶段
        # model.eval()  # 设置为评估模式
        # val_loss, correct = 0, 0
        # with torch.no_grad():  # 禁用梯度计算
        #     for X_batch, y_batch in val_loader:
        #         X_batch = X_batch.unsqueeze(1) if isinstance(model, CNN) else X_batch
        #         outputs = model(X_batch)
        #         loss = criterion(outputs, y_batch)
        #         val_loss += loss.item()
        #         correct += (outputs.argmax(1) == y_batch).sum().item()
        #
        # val_acc = correct / len(val_loader.dataset)  # 计算验证准确率
        # val_loss /= len(val_loader)  # 计算平均验证损失
        #
        # # 输出每轮的训练和验证结果
        # print(f"Epoch {epoch + 1}/{epochs}: Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, "
        #       f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")

        # 早停机制：如果验证损失降低，保存模型；否则增加触发计数
        # if train_loss < best_val_loss:
        #     best_val_loss = train_loss
        #     torch.save(model.state_dict(), "best_model.pth")
        #     trigger_times = 0
        # else:
        #     trigger_times += 1
        #     if trigger_times >= patience:
        #         print("Early stopping!")
        #         break

if __name__ == "__main__":
    run()