import torch
from utils import set_seed, one_cycle, ModelEMA, EarlyStopping
from dataset import VibrationDataset
from model import SAFDNN
import numpy as np
import random
import os
set_seed()


def train_per_epoch(loader, model, optimizer, criterion, device):
    model.train()
    total_loss = 0
    correct = 0
    for i, (data, target) in enumerate(loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data.unsqueeze(1))
        loss = criterion(output, target.long())
        total_loss += loss.item()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
        optimizer.step()
        pred = output.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
    return total_loss / len(loader), correct / len(loader.dataset)


@torch.inference_mode()
def valid(loader, model, criterion, device):
    model.eval()
    total_loss = 0
    acc = np.zeros(loader.dataset.nc)
    n_l = np.zeros(loader.dataset.nc) + 1e-6
    correct = 0
    for i, (data, target) in enumerate(loader):
        data, target = data.to(device), target.to(device)
        output = model(data.unsqueeze(1))
        loss = criterion(output, target.long())
        total_loss += loss.item()
        pred = output.argmax(dim=1)
    #     correct += pred.eq(target.view_as(pred)).sum().item()
    # return total_loss / len(loader), correct / len(loader.dataset)
        # calculate average accuracy
        idx = target == pred
        for c in range(loader.dataset.nc):
            ci = target == c
            tp = idx[ci].sum()
            acc[c] += tp
            n_l[c] += ci.sum()

    return total_loss / len(loader), acc / n_l



if __name__ == "__main__":
    test_dataset = VibrationDataset('SAFDNN/test_data_512.npy')
    train_dataset = VibrationDataset('SAFDNN/train_data_512.npy')

    batch_size = 16
    epochs = 100

    # # 计算每个类别的权重
    # train_class_counts = np.bincount(train_dataset.label.astype(int))
    # train_class_weights = 1. / train_class_counts
    # train_sample_weights = train_class_weights[train_dataset.label.astype(int)]
    # test_class_counts = np.bincount(test_dataset.label.astype(int))
    # test_class_weights = 1. / test_class_counts
    # test_sample_weights = test_class_weights[test_dataset.label.astype(int)]
    # # 创建 WeightedRandomSampler
    # train_sampler = torch.utils.data.WeightedRandomSampler(weights=train_sample_weights, num_samples=len(train_sample_weights), replacement=False)
    # test_sampler = torch.utils.data.WeightedRandomSampler(weights=test_sample_weights, num_samples=len(test_sample_weights), replacement=False)
    
    # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, sampler=test_sampler)
    # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler)

    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False)

    model = SAFDNN(1, 4)
    # if os.path.exists("best.pth"):
    #     model.load_state_dict(torch.load("best.pth"))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    criterion = torch.nn.CrossEntropyLoss()
    # cos_lr
    lf = one_cycle()
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lf)
    ema = ModelEMA(model)
    early_stopping, stop = EarlyStopping(5), False
    best_acc = -np.inf
    for epoch in range(epochs):

        train_loss, train_acc = train_per_epoch(model=model, loader=train_loader, optimizer=optimizer, criterion=criterion, device=device)
        print("train:", train_loss, train_acc)
        ema.update(model)
        scheduler.step()
        test_loss, test_acc = valid(model=ema.ema, loader=test_loader, criterion=criterion, device=device)
        print("test:", test_loss, test_acc)
        if best_acc < test_acc.mean():
            best_acc = test_acc.mean()
            torch.save(ema.ema.state_dict(), "best.pth")
        stop = early_stopping(test_acc.mean(), epoch)
        if stop:
            break
        class_weights = 1. - test_acc / (test_acc.sum() + 1e-6)
        class_dic = {i: class_weights[i] for i in range(len(class_weights))}
        sample_weights = np.vectorize(lambda x: class_dic[x])(train_dataset.label)
        train_dataset.indices = random.choices(range(train_dataset.label.shape[0]), weights=sample_weights, k=train_dataset.label.shape[0])

