import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset

def test(net_glob, dataset_test, args):
    # testing
    acc_test, loss_test = test_img_avg(net_glob, dataset_test, args)

    print("Testing accuracy: {:.2f}".format(acc_test))

    return acc_test, loss_test

def test_img(net_list,dataset,args):
    for i in range(args.num_groups):
        net_list[i].eval()
    test_loss = 0
    correct = 0
    data_loader = DataLoader(dataset,batch_size=args.bs)
    
    l = len(data_loader)
    with torch.no_grad():
        for id in range(args.num_groups):
            for idx,(image,label) in enumerate(data_loader):
                image,label = image.to(args.device),label.to(args.device)
                if args.dataset == 'widar':
                    target = target.long()
                log_probs = net_list[id](image)['output']
                test_loss += F.cross_entropy(log_probs,label,reduction='sum').item()
                y_pred = log_probs.data.max(1,keepdim=True)[1]
                correct += y_pred.eq(label.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)*args.num_groups
    accuracy = 100.00 * correct / (len(data_loader.dataset)*args.num_groups)
    return accuracy,test_loss

def test_img_avg(net_g, datatest, args):
    net_g.eval()
    # testing
    test_loss = 0
    correct = 0
    data_loader = DataLoader(datatest, batch_size=args.bs)
    l = len(data_loader)
    with torch.no_grad():
        for idx, (data, target) in enumerate(data_loader):
            if args.device != -1:
                data, target = data.to(args.device), target.to(args.device)
            if args.dataset == 'widar':
                target = target.long()
            log_probs = net_g(data)['output']
            # sum up batch loss
            test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
            # get the index of the max log-probability
            y_pred = log_probs.data.max(1, keepdim=True)[1]
            correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)
    accuracy = 100.00 * correct / len(data_loader.dataset)
    # if args.verbose:
    #     print('\nTest set: Average loss: {:.4f} \nAccuracy: {}/{} ({:.2f}%)\n'.format(
    #         test_loss, correct, len(data_loader.dataset), accuracy))
    return accuracy, test_loss


def test_img_resem(net_zoo, datatest, args):
    for net in net_zoo:
        net.eval()
    # testing
    test_loss = 0
    correct = 0
    data_loader = DataLoader(datatest, batch_size=args.bs)
    l = len(data_loader)
    with torch.no_grad():
        for idx, (data, target) in enumerate(data_loader):
            if args.device != -1:
                data, target = data.to(args.device), target.to(args.device)
            if args.dataset == 'widar':
                target = target.long()
            log_probs = net_zoo[0](net_zoo[1](data)['representation4'],start_layer_idx = 4)['output']
            # sum up batch loss
            test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
            # get the index of the max log-probability
            y_pred = log_probs.data.max(1, keepdim=True)[1]
            correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)
    accuracy = 100.00 * correct / len(data_loader.dataset)
    # if args.verbose:
    #     print('\nTest set: Average loss: {:.4f} \nAccuracy: {}/{} ({:.2f}%)\n'.format(
    #         test_loss, correct, len(data_loader.dataset), accuracy))
    return accuracy, test_loss

def test_img_depthfl(net_g, datatest, args):
    net_g.eval()
    # testing
    test_loss = 0
    correct = 0
    data_loader = DataLoader(datatest, batch_size=args.bs)
    l = len(data_loader)
    with torch.no_grad():
        for idx, (data, target) in enumerate(data_loader):
            if args.device != -1:
                data, target = data.to(args.device), target.to(args.device)
            if args.dataset == 'widar':
                target = target.long()
            output_list = net_g(data)
            # ensemble_output = torch.stack(output_list, dim=2)
            # ensemble_output = torch.sum(ensemble_output, dim=2) / len(output_list)
            ensemble_output = output_list[-1]
            # sum up batch loss
            test_loss += F.cross_entropy(ensemble_output, target, reduction='sum').item()
            # get the index of the max log-probability
            y_pred = ensemble_output.data.max(1, keepdim=True)[1]
            correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)
    accuracy = 100.00 * correct / len(data_loader.dataset)
    # if args.verbose:
    #     print('\nTest set: Average loss: {:.4f} \nAccuracy: {}/{} ({:.2f}%)\n'.format(
    #         test_loss, correct, len(data_loader.dataset), accuracy))
    return accuracy, test_loss
