Kalpit
feat: Add model files with LFS
d39b279
raw
history blame
1.79 kB
import models
import time
import torch
import math
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score, average_precision_score, roc_auc_score
def eval_model(cfg, model, val_loader, loss_ce, val_batch_size):
model.eval()
outpred_list = []
gt_label_list = []
video_list = []
valLoss = 0
lossTrainNorm = 0
print("******** Start Testing. ********")
with torch.no_grad(): # No need to track gradients during validation
for i, (_, input, target, binary_label, video_id) in enumerate(tqdm(val_loader, desc="Validation", total=len(val_loader))):
if i == 0:
ss_time = time.time()
input = input[:,0]
varInput = torch.autograd.Variable(input.float().cuda())
varTarget = torch.autograd.Variable(target.contiguous().cuda())
var_Binary_Target = torch.autograd.Variable(binary_label.contiguous().cuda())
logit = model(varInput)
lossvalue = loss_ce(logit, var_Binary_Target)
valLoss += lossvalue.item()
lossTrainNorm += 1
outpred_list.append(logit[:,0].sigmoid().cpu().detach().numpy())
gt_label_list.append(varTarget.cpu().detach().numpy())
video_list.append(video_id)
valLoss = valLoss / lossTrainNorm
outpred = np.concatenate(outpred_list, 0)
gt_label = np.concatenate(gt_label_list, 0)
video_list = np.concatenate(video_list, 0)
pred_labels = [1 if item > 0.5 else 0 for item in outpred]
true_labels = np.argmax(gt_label, axis=1)
pred_accuracy = accuracy_score(true_labels, pred_labels)
return pred_accuracy, video_list, pred_labels, true_labels, outpred