import numpy as np
import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torch.nn as nn

from config import ModelArgs
from models.model import MainBackbone
from utils.util import MyDataset


if __name__ == '__main__':
    root_data_path = '/root/data/video_decoding'
    train_path1 = os.path.join(root_data_path, 'npy_data_class_30', 'train')
    val_path1 = os.path.join(root_data_path, 'npy_data_class_30', 'val')
    train_path2 = os.path.join(root_data_path, 'npy_data_class_30_zscore', 'train')
    val_path2 = os.path.join(root_data_path, 'npy_data_class_30_zscore', 'val')
    result_path = os.path.join(root_data_path, 'results', 'cls_30')
    args = ModelArgs

    train_names1 = os.listdir(train_path1)
    train_names2 = os.listdir(train_path2)

    length = len(train_names1)
    count = 0
    feat_train_data1 = []
    feat_train_data2 = []
    spec_train_data = []
    while count < length:
        train_name1 = train_names1[count]
        train_name2 = train_names2[count]
        if train_name1 != train_name2:
            print('error')
        
        file1 = np.load(os.path.join(train_path1, train_name1))
        file2 = np.load(os.path.join(train_path2, train_name2))
        feat_train_data1.append(file1)
        feat_train_data2.append(file2)
        name = train_name1[:-4]
        spec_train_data.append(int(name[20:]))
        count += 1
    feat_train1 = np.array(feat_train_data1)
    feat_train2 = np.array(feat_train_data2)
    spec_train = np.array(spec_train_data)
    print(feat_train1.shape)
    print(feat_train2.shape)
    print(spec_train.shape)

    val_names1 = os.listdir(val_path1)
    val_names2 = os.listdir(val_path2)
    length = len(val_names1)
    count = 0
    feat_val_data1 = []
    feat_val_data2 = []
    spec_val_data = []
    while count < length:
        val_name1 = val_names1[count]
        val_name2 = val_names2[count]
        if val_name1 != val_name2:
            print('error')
        file1 = np.load(os.path.join(val_path1, val_name1))
        file2 = np.load(os.path.join(val_path2, val_name2))
        feat_val_data1.append(file1)
        feat_val_data2.append(file2)
        name = val_name1[:-4]
        spec_val_data.append(int(name[20:]))
        count += 1
    feat_val1 = np.array(feat_val_data1)
    feat_val2 = np.array(feat_val_data2)
    spec_val = np.array(spec_val_data)
    print(feat_val1.shape)
    print(feat_val2.shape)
    print(spec_val.shape)

    train_dataset = MyDataset(torch.tensor(feat_train1).to(torch.float), torch.tensor(feat_train2).to(torch.float), torch.tensor(spec_train).to(torch.int64))
    val_dataset = MyDataset(torch.tensor(feat_val1).to(torch.float), torch.tensor(feat_val2).to(torch.float), torch.tensor(spec_val).to(torch.int64))

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True)

    model = MainBackbone(args)
    
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
    args.device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
    criterion = nn.CrossEntropyLoss()
    model.to(args.device)

    loss_val_min = np.inf
    loss_all = []
    acc_val = []
    for epoch in range(args.epochs):
        model.train()
        train_loss = 0
        correct_train = 0
        total_train = 0
        for data1, data2, target in train_loader:
            data1 = data1.to(args.device)
            data2 = data2.to(args.device)
            
            # 将 30 分类转换为 2 分类
            target = torch.where(target >= 15, 1, 0).long()
            
            target = target.to(args.device)
            optimizer.zero_grad()
            output = model(data1, data2)
            result = torch.argmax(output, 1, keepdim=False)
            correct_train += (result == target).sum().float()
            total_train += len(target)
            loss = criterion(output, target)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
        scheduler.step()
        epoch_loss = train_loss / len(train_loader)
        epoch_acc = correct_train / total_train
        loss_all.append(epoch_loss)
        print(f'Epoch {epoch+1} / {args.epochs}, train loss: {epoch_loss:.4f}, acc: {epoch_acc:.4f}')
        
        # val
        model.eval()
        val_loss = 0
        correct_val = 0
        total_val = 0
        for data1, data2, target in val_loader:
            data1 = data1.to(args.device)
            data2 = data2.to(args.device)
            
            # 将 30 分类转换为 2 分类
            target = torch.where(target >= 15, 1, 0).long()
            
            target = target.to(args.device)
            output = model(data1, data2)
            result = torch.argmax(output, 1, keepdim=False)
            com_result = torch.eq(result, target)
            index = torch.nonzero(com_result)
            index_result = result[index]
            correct_val += (result == target).sum().float()
            total_val += len(target)
            loss = criterion(output, target)
            val_loss += loss.item()

        epoch_loss = val_loss / len(val_loader)
        epoch_acc = correct_val / total_val
        acc_val.append(epoch_acc.cpu())
        print(f'val loss: {epoch_loss:.4f}, val acc: {epoch_acc:.4f}')
        if epoch_loss < loss_val_min:
            torch.save(model.state_dict(), os.path.join(root_data_path, 'model.pth'))
            loss_val_min = epoch_loss
            print(epoch+1)
