# -*- coding:utf-8 -*-
import os
import torch.nn as nn
from torch.utils.data import DataLoader
from model.model import encoder
from dataset.datasets import load_dataset
import torch.nn.functional as F
from tqdm import tqdm
import torch
import numpy as np
from func import WeightedKNNClassifier, linear, rbf_softmax_linear, cluster_acc
import multiprocessing
import argparse
############
## Import ##
############

######################
## Testing Accuracy ##
######################
models_list = ['vgg16', 'vgg16_bn', 'resnet18', 'resnet-34', 'resnet-50',
               'resnet101', 'resnet152','mobilenet_v3',
               'efficient-resnet18-capsule', 'resizer_efficient-resnet18-capsule',
               'crate_small' ,'crate_base' ,'crate_large', 'crate_tiny', 'crate_tiny_small',
               'vit-b-50', 'vit-b-125', 'vit-l-50', 'vit-l-125','glom', 'classical_features',
               'faster_vit_4_21k_224']
capsule_network_family_list = ['efficient-capsule-orgin' ,'efficient-capsule' ,'efficient-res-capsule',
                               'res-capsule', 'efficient-resnet18-capsule','simple_efficient_res_capsule',
                               'resizer_efficient-resnet18-capsule']
crate_network_family_list = ['crate_small','crate_base','crate_large', 'crate_tiny', 'crate_tiny_small']

def compute_accuracy(y_pred, y_true):
    """Compute accuracy by counting correct classification. """
    assert y_pred.shape == y_true.shape
    return 1 - np.count_nonzero(y_pred - y_true) / y_true.size


def chunk_avg(x, n_chunks=2, normalize=False):
    x_list = x.chunk(n_chunks, dim=0)
    x = torch.stack(x_list, dim=0)
    if not normalize:
        return x.mean(0)
    else:
        return F.normalize(x.mean(0),dim=1)


def inference(net, train_loader, test_loader):
    knn_classifier = WeightedKNNClassifier()
    train_z_full_list, train_y_list, test_z_full_list, test_y_list = [], [], [], []
    with torch.no_grad():
        for x, y in tqdm(train_loader):

            x = torch.cat(x, dim=0)
            # if args.arch == 'efficient-capsule':
            if args.arch in capsule_network_family_list:
                z_proj, z_pre, probs = net(x, is_test=True)
            else:
                z_proj, z_pre = net(x, is_test=True)

            z_pre = chunk_avg(z_pre, test_patches)
            z_pre = z_pre.detach().cpu()

            train_z_full_list.append(z_pre)

            knn_classifier.update(train_features=z_pre, train_targets=y)

            train_y_list.append(y)

        for x_test, y_test in tqdm(test_loader):
            x_test = torch.cat(x_test, dim=0)

            # if args.arch == 'efficient-capsule':
            if args.arch in capsule_network_family_list:
                z_proj_test, z_pre_test, probs_test = net(x_test, is_test=True)
            else:
                z_proj_test, z_pre_test, = net(x_test, is_test=True)

            z_pre_test = chunk_avg(z_pre_test, test_patches)
            z_pre_test = z_pre_test.detach().cpu()
           
            test_z_full_list.append(z_pre_test)
       
            knn_classifier.update(test_features=z_pre_test, test_targets=y_test)

            test_y_list.append(y_test)

    train_features_full, train_labels, test_features_full, test_labels = torch.cat(train_z_full_list, dim=0), torch.cat(train_y_list,dim=0), torch.cat(test_z_full_list,dim=0), torch.cat(test_y_list,dim=0)

    dataset_num_classed = 6
    if args.data == "cifar10":
        dataset_num_classed = 10
    elif args.data == "cifar100":
        dataset_num_classed = 100
    elif args.data == "tinyimagenet200":
        dataset_num_classed = 200
    elif args.data == "imagenet100":
        dataset_num_classed = 100
    elif args.data == "imagenet":
        dataset_num_classed = 1000
    elif args.data == "smallloil":
        dataset_num_classed = 6
    elif args.data == "carbonateimagesdynafull":
        dataset_num_classed = 21
    elif args.data == "carbonateimagesstatfull":
        dataset_num_classed = 21
    elif args.data == "outlierimagesstat":
        dataset_num_classed = 7
    elif args.data == "small_data_oil_for_classification":
        dataset_num_classed = 6

    class_dir_name = os.path.dirname(os.path.abspath(args.model_path))

    if args.linear:
        print("Using Linear Eval to evaluate accuracy")
        linear_epoch = 10
        linear_model = linear(train_features_full, linear_epoch, train_labels, test_features_full, test_labels, lr=args.lr, num_classes = dataset_num_classed)
        torch.save(linear_model, os.path.join(class_dir_name,'linear.pt'))

    if args.rbflogit:
        print("Using Linear Eval to evaluate accuracy")
        rbflogit_epoch = 10
        rbflogit_model = rbf_softmax_linear(train_features_full, rbflogit_epoch, train_labels, test_features_full, test_labels, lr=args.lr, num_classes = dataset_num_classed)
        torch.save(rbflogit_model, os.path.join(class_dir_name,'rbflogit_model.pt'))


    if args.knn:
        print("Using KNN to evaluate accuracy")
        top1, top5 = knn_classifier.compute()
        print("KNN (top1/top5):", top1, top5)
        torch.save(knn_classifier, os.path.join(class_dir_name, 'knn.pt'))

    # cluster_acc(test_loader, net, device, print_result=False, save_name_img='cluster_img', save_name_fig='pca_figure')




if __name__ == '__main__':
    ######################
    ## Parsing Argument ##
    ######################
    parser = argparse.ArgumentParser(description='Evaluation')

    parser.add_argument('--test_patches', type=int, default=128,
                        help='number of patches used in testing (default: 128)')

    parser.add_argument('--data', type=str, default="cifar10",
                        help='dataset (default: cifar10)')
    parser.add_argument('--arch', type=str, default="resnet18-cifar",
                        help='network architecture (default: resnet18-cifar)')

    parser.add_argument('--lr', type=float, default=0.03,
                        help='learning rate for linear eval (default: 0.03)')
    parser.add_argument('--linear', type=bool, default=True,
                        help='use linear eval or not')
    parser.add_argument('--rbflogit', type=bool, default=False,
                        help='use rbflogit eval or not')
    parser.add_argument('--knn', help='evaluate using kNN measuring cosine similarity', action='store_true')
    parser.add_argument('--model_path', type=str, default="",
                        help='model directory for eval')

    args = parser.parse_args()
    multiprocessing.freeze_support()
    torch.multiprocessing.set_sharing_strategy('file_system')
    test_patches = args.test_patches

    #Get Dataset
    if args.data == "imagenet100" or args.data == "imagenet":

        memory_dataset = load_dataset(args.data, train=True, num_patch=test_patches)
        memory_loader = DataLoader(memory_dataset, batch_size=8, shuffle=True, drop_last=True,num_workers=4)

        test_data = load_dataset(args.data, train=False, num_patch=test_patches)
        test_loader = DataLoader(test_data, batch_size=8, shuffle=True, num_workers=4)

    else:
        memory_dataset = load_dataset(args.data, train=True, num_patch=test_patches)
        memory_loader = DataLoader(memory_dataset, batch_size=4, shuffle=True, drop_last=True,num_workers=8)

        test_data = load_dataset(args.data, train=False, num_patch=test_patches)
        test_loader = DataLoader(test_data, batch_size=4, shuffle=True, num_workers=8)

    # Load Model and Checkpoint
    use_cuda = False
    if torch.cuda.is_available():
        device = "cuda"
        use_cuda = True
    else:
        device = "cpu"
        use_cuda = False
    # device = "cpu"
    if args.arch == 'efficient-res-capsule':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
    elif args.arch == 'conv-encoder':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
    elif args.arch == 'vgg16':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=4096)
    elif args.arch == 'vgg16_bn':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=4096)
    elif args.arch == 'mobilenet_v3':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=1024)
    elif args.arch == 'efficient-capsule-orgin':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=16)
    elif args.arch == 'efficient-capsule':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=512)
    elif args.arch == 'efficient-resnet18-capsule':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=64)
    elif args.arch == 'resizer_efficient-resnet18-capsule':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=256)
    elif args.arch == 'simple_efficient_res_capsule':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=288)
    elif args.arch == 'bilateralfsunet':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=128)
    elif args.arch == 'crate_base':
        net = encoder(user_device=device, arch=args.arch, hidden_dim=768)
    elif args.arch in crate_network_family_list:
        net = encoder(user_device=device, arch=args.arch, hidden_dim=128)
    elif args.arch == "glom":
        net = encoder(user_device=device, arch=args.arch, hidden_dim=1024)
    elif args.arch == "resnet18":
        net = encoder(user_device=device, arch=args.arch, hidden_dim=512)
    elif args.arch == "resnet50":
        net = encoder(user_device=device, arch=args.arch, hidden_dim=2048)
    elif args.arch == "resnet152":
        net = encoder(user_device=device, arch=args.arch, hidden_dim=2048)
    else:
        net = encoder(user_device=device, arch=args.arch, hidden_dim=2048)
    net = nn.DataParallel(net)
    save_dict = torch.load(args.model_path)
    net.load_state_dict(save_dict, strict=False)
    net.to(device)
    net.eval()

    inference(net, memory_loader, test_loader)



