import argparse
import torch
from pathlib import Path
import torchvision.transforms as transforms
from torch.utils.data import DataLoader

from sklearn.metrics import average_precision_score, accuracy_score
import numpy as np

from ImageDataset import UCMercedMLDataset


import binom_multilabel_knn
import feature_extractor


from utils import *
from main_cssl import get_backbone, get_ssl_model




def get_dataset(dataset, data_dir):#, lr=None):

    mean = [0.4056, 0.4292, 0.4032]
    std = [0.2028, 0.1897, 0.1908]

    
    transformations = {
        "train": transforms.Compose([transforms.ToTensor(), transforms.CenterCrop((224,224)), transforms.Normalize(mean, std)]),
        "val": transforms.Compose([transforms.ToTensor(), transforms.CenterCrop((224,224)), transforms.Normalize(mean, std)]),
        "test": transforms.Compose([transforms.ToTensor(), transforms.CenterCrop((224,224)), transforms.Normalize(mean, std)]),
    }

    if dataset == 'UCMerced':
        #if lr != None:
        #    list_train_paths = file_to_list('/data/lamoe/UCMerced_LandUse/fewshot_1000_train_{}_0.txt'.format(lr))
        #else:
        list_train_paths = file_to_list(map_dataset_to_file(dataset,'train'))
        list_test_paths = file_to_list(map_dataset_to_file(dataset,'test'))
        list_val_paths = file_to_list(map_dataset_to_file(dataset,'val'))
        
        ds = {
            'train': UCMercedMLDataset(list_train_paths, transformations['train'], data_dir),
            'test': UCMercedMLDataset(list_test_paths, transformations['test'], data_dir),
            'val': UCMercedMLDataset(list_val_paths, transformations['val'], data_dir)
        }
    
    return ds


    
def main():

    

    parser = argparse.ArgumentParser(description='Continual Self-Supervised learning')
    parser.add_argument('--eval_name', default='knn_eval', type=str)
    parser.add_argument('--exp_name', default='CoSMAE', type=str, help='name of the experiment that should be evaluated')
    parser.add_argument('--tasks', nargs='+', default=['US3D','US3D_UAVID','US3D_UAVID_Potsdam','US3D_UAVID_Potsdam_TreeSatAI'], help='list of model sufixes of completed tasks that should be evaluated')
    
    parser.add_argument('--trial_ids', nargs='+', default=[0], type=int, help='which trials should be evaluated')
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--data_dir', default='/data/lamoe', help='path to the data directory')
    parser.add_argument('--k', default=10, type=int)
    parser.add_argument('--num_workers', default=8, type=int)
    parser.add_argument('--patch_size', default=16, type=int)
    
    parser.add_argument('--best_val', default=False, type=bool, help='use model with best validation performance')
    parser.add_argument('--eval_val', default=False, type=bool, help='use validation set to evaluate')
    
    #parser.add_argument('--tasks', nargs='+', default=['US3D','UAVID', 'Potsdam', 'TreeSatAI'], help='list of CL tasks')
    parser.add_argument('--results', default='results/', help='path for saving the results')
    
    args = parser.parse_args()


    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    torch.set_num_threads(8)

    seeds = [42,43,44,45,46,47,48,49,50,51]
    trials = len(args.trial_ids)#TRIALS)

    img_size = (3,224,224)
    #batch_size = 128
    EVALUATION = ['UCMerced']


    for eval_dataset in EVALUATION:
        ds = get_dataset(eval_dataset, args.data_dir)#, lr=label_ratio)

        for trial in args.trial_ids:#TRIALS:
            
            
            train_dl = DataLoader(
                ds['train'],
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_workers,
                pin_memory=True
            )

            val_dl = DataLoader(
                ds['val'],
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.num_workers,
                pin_memory=True
            )

            test_dl = DataLoader(
                ds['test'],
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.num_workers,
                pin_memory=True
            )
            
            
        
            torch.manual_seed(seeds[trial])
            torch.set_float32_matmul_precision("medium")

            
            model = get_backbone(img_size[1], img_size[0], args.patch_size)

            ssl_model = get_ssl_model(model, 'CoSMAE', mr=0.75)
            ssl_model.to(device)

            tasks = args.tasks#TASKS

            for task in range(len(tasks)):
                        
                #first_check_sum = get_model_checksum(ssl_model)

            
                
                pretrain_name = 'weights/' + args.exp_name + '_' + tasks[task] + '_pretraining_trial_' + str(trial)
                
                exp_name = args.eval_name + '_' + args.exp_name + '_pretrainig_' + tasks[task] #+ '_trial_' + str(trial)
                print('Loading pretrained weights from {}'.format(tasks[task]))

                #logger = SummaryWriter('logs/{}_trial_{}'.format(exp_name,trial))

                if args.best_val:
                    pretrained_checkpoint = torch.load('{}_best.pt'.format(pretrain_name), map_location=torch.device(device))
                    pretrained_state_dict = pretrained_checkpoint['model_state_dict']
                    print('best epoch:', pretrained_checkpoint['epoch'])
                else:
                    pretrained_state_dict = torch.load('{}.pt'.format(pretrain_name), map_location=torch.device(device))
                #print(len(pretrained_state_dict.keys()))

                # Step 2: Create a new state dictionary containing only the relevant parts
                ssl_model_state_dict = ssl_model.state_dict()  # Your model's state dictionary
                #print(len(ssl_model_state_dict.keys()))
                filtered_state_dict = {k: v for k, v in pretrained_state_dict.items() if k in ssl_model_state_dict}
                #print(len(filtered_state_dict.keys()))
                # Step 3: Update the model's state dictionary with the filtered one
                ssl_model_state_dict.update(filtered_state_dict)

                # Step 4: Load the updated state dictionary into your model
                ssl_model.load_state_dict(ssl_model_state_dict)
        
                #second_check_sum = get_model_checksum(ssl_model)
                #if first_check_sum == second_check_sum:
                #    print('weights not properly loaded')
            


                    
                fe = feature_extractor.FeatureExtractor(ssl_model.backbone,'encoder.ln') 
                    

                all_features = torch.Tensor([])
                all_labels = torch.Tensor([])
                fe.eval()
                with torch.no_grad():
                    for batch in train_dl:
                            if type(batch) is list:
                                imgs, labels = batch[0], batch[1]
                            else:
                                imgs, labels = batch['image'], batch['label']
                            #print(batch['image'])
                            imgs = imgs.to(device)
                            labels = labels
                            features = fe(imgs)

                            all_labels = torch.cat((all_labels,labels),0)

                            all_features = torch.cat((all_features, features['encoder'][:, 0].cpu()),0)
                                
                all_labels = all_labels.numpy()
                all_features = all_features.numpy()

                
                print("\n Trial {} === Pretrained on {} - KNN on {} ===".format(trial, tasks[task], eval_dataset))
                #else:
                #    print("\n Trial {} === No pretraining - KNN on {} ===".format(trial, eval_dataset))


                
                knn_classifier = binom_multilabel_knn.binom_multilabel_kNN(k=args.k, metric = "cosine")#,gpu_id=0)
                
                
                    #knn_classifier = knn.kNN(k=args.k, metric = "cosine")
                    #all_features = all_features
                    #all_labels = all_labels
                
                knn_classifier.fit(all_features, all_labels)


                all_test_features = torch.Tensor([])
                all_test_labels = torch.Tensor([])

                if args.eval_val:#VALIDATION:
                    eval_dl = val_dl
                else:
                    eval_dl = test_dl
                with torch.no_grad():
                    for batch in eval_dl:
                            if type(batch) is list:
                                imgs, labels = batch[0], batch[1]
                            else:
                                imgs, labels = batch['image'], batch['label']
                            imgs = imgs.to(device)
                            labels = labels
                            features = fe(imgs)
                            #print(features['encoder'][:, 0].size())
                            all_test_labels = torch.cat((all_test_labels,labels),0)
                            
                            all_test_features = torch.cat((all_test_features, features['encoder'][:, 0].cpu()),0)
            
                #print(len(all_test_labels))      

                all_test_labels = all_test_labels.numpy()
                #print(all_test_labels)


                Y_pred = knn_classifier.predict(all_test_features).toarray()


                if eval_dataset == 'UCMerced':
                    ap_macro = average_precision_score(all_test_labels,Y_pred,average='macro')
                    ap_micro = average_precision_score(all_test_labels,Y_pred,average='micro')
                    ap_classwise = average_precision_score(all_test_labels,Y_pred,average=None)

                    metrics = {
                        'map_micro': ap_micro,
                        'map_macro': ap_macro,
                        'ap_classwise': ap_classwise
                    }
                    print(ap_macro,ap_micro)
                    
                else:
                    Y_pred_classes=np.argmax(Y_pred,axis=1)
                    acc = accuracy_score(all_test_labels,Y_pred_classes)
                    metrics = {
                        'acc': acc,
                    }
                    print(acc)


                #if label_ratio != None:
                #    file_naming_eval_dataset = eval_dataset + '_lr' + str(label_ratio)
                #else:
                file_naming_eval_dataset = eval_dataset
                
                Path('results/' + exp_name).mkdir(parents=True, exist_ok=True)
                
                for key in metrics:
                    if key == 'ap_classwise':
                        with open(args.results + exp_name + '/' + file_naming_eval_dataset +  '_k' + str(args.k) + '_trial' + str(trial) +  '_AP_classwise.txt', 'a') as f:
                            for class_AP in metrics['ap_classwise']:
                                f.write(str(class_AP) + '\n')
                            if trials > 1 and trial < trials - 1:
                                f.write('\n')
                    else:
                        if args.eval_val:#if VALIDATION:
                            print(args.results + exp_name + '/' + file_naming_eval_dataset +  '_k' + str(args.k) + '_trial' + str(trial) + '_' + key + '_val.txt')
                            with open(args.results + exp_name + '/' + file_naming_eval_dataset +  '_k' + str(args.k) + '_trial' + str(trial) + '_' + key + '_val.txt', 'a') as f:
                                f.write(str(metrics[key]) + '\n')
                        else:
                            print(args.results + exp_name + '/' + file_naming_eval_dataset +  '_k' + str(args.k) + '_trial' + str(trial) + '_' + key + '.txt')
                            with open(args.results + exp_name + '/' + file_naming_eval_dataset +  '_k' + str(args.k) + '_trial' + str(trial) + '_' + key + '.txt', 'a') as f:
                                f.write(str(metrics[key]) + '\n')

    

if __name__ == "__main__":
    main()