import numpy as np
import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torch.nn as nn

from models.transformer import TransformerMelSpectrogramRegressor, EEGCNN
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
from config import ModelArgs
from models.model import MainBackbone

if __name__ == '__main__':
    root_data_path = '/root/data/video_decoding'
    train_path = os.path.join(root_data_path, 'npy_data_class_30_zscore', 'train')
    val_path = os.path.join(root_data_path, 'npy_data_class_30_zscore', 'val')
    result_path = os.path.join(root_data_path, 'results', 'cls_30')
    args = ModelArgs

    train_names = os.listdir(train_path)
    feat_train_data = []
    spec_train_data = []
    for train_name in train_names:
        file = np.load(os.path.join(train_path, train_name))
        name = train_name[:-4]
        number = int(name[20:])
        feat_train_data.append(file)
        spec_train_data.append(number)
    feat_train = np.array(feat_train_data)
    feat_train = feat_train.transpose((0, 2, 1))   # (batch_size, channel, length)
    spec_train = np.array(spec_train_data)
    print(feat_train.shape)
    print(spec_train.shape)

    val_names = os.listdir(val_path)
    feat_val_data = []
    spec_val_data = []
    for val_name in val_names:
        file = np.load(os.path.join(val_path, val_name))
        name = val_name[:-4]
        number = int(name[20:])
        feat_val_data.append(file)
        spec_val_data.append(number)
    feat_val = np.array(feat_val_data)
    feat_val = feat_val.transpose((0, 2, 1))
    spec_val = np.array(spec_val_data)
    print(feat_val.shape)
    print(spec_val.shape)

    train_dataset = TensorDataset(torch.tensor(feat_train).to(torch.float), torch.tensor(spec_train).to(torch.int64))
    val_dataset = TensorDataset(torch.tensor(feat_val).to(torch.float), torch.tensor(spec_val).to(torch.int64))

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True)

    model = TransformerMelSpectrogramRegressor(args)
    # model = EEGCNN(57)
    
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
    args.device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
    criterion = nn.CrossEntropyLoss()
    model.to(args.device)

    loss_val_min = np.inf
    loss_all = []
    writer = SummaryWriter('runs/cls_30/zscore')
    val_loss_history = []
    
    for epoch in range(args.epochs):
        # training
        model.train()
        train_loss = 0
        correct_train = 0
        total_train = 0
        for data, target in train_loader:
            data = data.to(args.device)
            target = target.to(args.device)
            optimizer.zero_grad()
            output = model(data)
            result = torch.argmax(output, 1, keepdim=False)
            correct_train += (result == target).sum().float()
            total_train += len(target)
            loss = criterion(output, target)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
        scheduler.step()
        epoch_loss = train_loss / len(train_loader)
        epoch_acc = correct_train / total_train
        loss_all.append(train_loss)
        writer.add_scalar('Loss/Train', epoch_loss, epoch)
        writer.add_scalar('accuracy/Train', epoch_acc, epoch)
        print(f'Epoch {epoch+1} / {args.epochs}, train loss: {epoch_loss:.4f}, acc: {epoch_acc:.4f}')
        
        # val
        model.eval()
        val_loss = 0
        correct_val = 0
        total_val = 0
        for data, target in val_loader:
            data = data.to(args.device)
            target = target.to(args.device)
            output = model(data)
            result = torch.argmax(output, 1, keepdim=False)
            com_result = torch.eq(result, target)
            index = torch.nonzero(com_result)
            index_result = result[index]
            correct_val += (result == target).sum().float()
            total_val += len(target)
            loss = criterion(output, target)
            val_loss += loss.item()

        epoch_loss = val_loss / len(val_loader)
        epoch_acc = correct_val / total_val
        val_loss_history.append(val_loss)
        writer.add_scalar('Loss/Valid', epoch_loss, epoch)
        writer.add_scalar('accuracy/Valid', epoch_acc, epoch)
        print(f'val loss: {epoch_loss:.4f}, val acc: {epoch_acc:.4f}')
        if epoch_loss < loss_val_min:
            torch.save(model.state_dict(), os.path.join(root_data_path, 'model.pth'))
            loss_val_min = epoch_loss
            print(epoch+1)
    writer.close()
