import os
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import datetime
from torch.utils.tensorboard import SummaryWriter

from models.model import TransformerMelSpectrogramRegressor
from config import ModelArgs
from utils.util import getEEGValidDataloader
from utils.filter import extract_quick_FFT

current_time = datetime.datetime.now().strftime('%Y%m%d%H%M')
# 特殊备注
spec = 'nosegment_90_200Hz'

if __name__ == '__main__':
    bands = ['theta', 'alpha', 'beta', 'low_gamma', 'high_gamma']
    args = ModelArgs
    cls_num = 2
    method = 'sfft'
    root_data_path = f'/root/data/video_decoding/results/cls_{cls_num}'
    for band in bands:
        train_loader = getEEGValidDataloader(args.batch_size, cls_num, type_ = 'train', spec_path = 1, band = 'high_gamma')
        val_loader = getEEGValidDataloader(args.batch_size, cls_num, type_ = 'val', spec_path = 1, band = 'high_gamma')

        args.input_dim=57
        args.output_dim=cls_num
        model = TransformerMelSpectrogramRegressor(args)  # 输入维度为时间维度
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
        device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
        criterion = nn.CrossEntropyLoss()
        model.to(device)

        writer = SummaryWriter(f'runs/cls_{cls_num}/{band}/epoch{args.epochs}_{spec}_{current_time}')
        loss_val_min = np.inf
        loss_all = []
        acc_val = []
        for epoch in range(args.epochs):
            # training
            model.train()
            train_loss = 0
            correct = 0
            total = 0
            for data, target in train_loader:
                if method == 'sfft':
                    spec += f'_{method}'
                    data = torch.tensor(extract_quick_FFT(data))
                data = data.to(device)
                target = target.to(device)
                optimizer.zero_grad()
                output = model(data)
                result = torch.argmax(output, 1, keepdim=False)
                correct += (result == target).sum().float()
                total += len(target)
                loss = criterion(output, target)
                train_loss += loss.item()
                loss.backward()
                optimizer.step()
            # scheduler.step()
            epoch_loss = train_loss / len(train_loader)
            epoch_acc = correct / total
            loss_all.append(epoch_loss)
            writer.add_scalar('Loss/Train', epoch_loss, epoch)
            writer.add_scalar('accuracy/Train', epoch_acc, epoch)
            print(f'Epoch {epoch+1} / {args.epochs}, train loss: {epoch_loss:.4f}, acc: {epoch_acc:.4f}')
            # val
            model.eval()
            val_loss = 0
            correct_val = 0
            total_val = 0
            for data, target in val_loader:
                if method == 'sfft':
                    spec += f'_{method}'
                    data = torch.tensor(extract_quick_FFT(data))
                data = data.to(device)
                target = target.to(device)
                output = model(data)
                result = torch.argmax(output, 1, keepdim=False)
                correct_val += (result == target).sum().float()
                total_val += len(target)
                loss = criterion(output, target)
                val_loss += loss.item()
            epoch_loss = val_loss / len(val_loader)
            epoch_acc = correct_val / total_val
            acc_val.append(epoch_acc.cpu())
            writer.add_scalar('Loss/Valid', epoch_loss, epoch)
            writer.add_scalar('accuracy/Valid', epoch_acc, epoch)
            print(f'val loss: {epoch_loss:.4f}  val acc: {epoch_acc:.4f}')
            if epoch_loss < loss_val_min:
                torch.save(model.state_dict(), os.path.join(root_data_path, 'model.pth'))
                loss_val_min = epoch_loss
                print(epoch+1, ' stored')
            writer.close()