import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model import ResLstmNet
from DataSet import GunDateSet, data_transform, train_transform

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_path = r'H:\shooting\data\gun_fire\THREE\train'
val_path = r'H:\shooting\data\gun_fire\THREE\test'
sequence_len = 15
batch_size = 32
lr = 0.0001

# print('数据加载中............')
train_dataset = GunDateSet(root_dir=train_path, sequence_len=sequence_len, transform=data_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)

Val_dataset = GunDateSet(root_dir=val_path, sequence_len=sequence_len, transform=data_transform)
val_loader = DataLoader(Val_dataset, batch_size=batch_size, shuffle=False, num_workers=2)

# print(f'数据加载完成---训练集长度：{len(train_dataset)}')
# print(f'设备:{device}')

model = ResLstmNet().to(device)
# criterion = nn.BCELoss()
# criterion = nn.BCEWithLogitsLoss() # 二分类
criterion = nn.CrossEntropyLoss()  # 多分类
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)


def train(epoch):
    model.train()
    running_loss = 0
    total_samples = 0
    correct = 0

    for i, (sequence, label) in enumerate(train_loader):
        sequence, label = sequence.to(device), label.to(device)

        batch = sequence.size(0)
        total_samples += batch

        optimizer.zero_grad()
        output = model(sequence)  # 假设模型输出是一个形状为 [batch_size, 1] 的张量
        loss = criterion(output, label)
        loss.backward()
        # nn.utils.clip_grad_norm_(model.parameters(), max_norm=5)  # 梯度裁剪
        optimizer.step()

        running_loss += loss.item() * batch

        # 计算准确率
        # predicted = (output > 0.5).float()  # 对于二分类问题，预测值大于0.5即为1
        _, predicted = torch.max(output, 1)  # shape [batch] 多分类问题
        correct += (predicted == label).sum().item()

    epoch_avg_loss = running_loss / len(train_dataset)
    epoch_accuracy = 100 * correct / len(train_dataset)  # 计算整个epoch的准确率
    print(f"Epoch {epoch + 1} completed --- train_Average loss: {epoch_avg_loss:.7f} --- Accuracy: {epoch_accuracy:.2f}%")
    torch.save(model.state_dict(), f'./runs/gunshot{epoch + 1}.pth')


def test(epoch, epochs):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for sequence, label in val_loader:
            sequence, label = sequence.to(device), label.to(device)
            output = model(sequence)
            # predicted = (output > 0.5).float()  #二分类
            _, predicted = torch.max(output, 1)  # 多分类
            total += label.size(0)
            correct += (predicted == label).sum().item()
    accuracy = 100*correct/total
    print(f'Epoch [{epoch+1}/{epochs}] - Validation Accuracy: {accuracy:.2f}%')
    print("*"*30)


if __name__ == '__main__':
    for epoch in range(100):
        train(epoch=epoch)
        test(epoch=epoch, epochs=80)
