from datasets.cifar10 import load_cifar10
import random
import torch
import numpy as np


class TrainerEnsemble:
    def __init__(self, args, trainers) -> None:
        self.args = args
        self.trainers = trainers
        self.assign_eval_device()
        self.create_dataloader()
        self.init_each_trainer()

    def assign_eval_device(self):
        self.device = torch.device("cuda:" + str(self.args.gpu_ids[0]))
    
    def create_dataloader(self):
        if self.args.ds == "cifar10":
            train_loader, valid_loader, test_loader, dim, K, eval_train_loader = load_cifar10(self.args.ds, self.args.bs, self.args.annotation_type, device=self.device, has_eval_train_loader=True)
        # if self.args.ds == "cifar100":
        #     train_loader, valid_loader, test_loader, dim, K = load_cifar100(self.args)

        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader
        self.num_classes = K

        self.eval_train_loader = eval_train_loader

    def init_each_trainer(self):
        for i in range(len(self.trainers)):
            self.trainers[i].assign_device()
            self.trainers[i].create_dataloader(self.train_loader, self.eval_train_loader)
            self.trainers[i].create_model(self.num_classes)
            self.trainers[i].create_optimizer()
            self.trainers[i].create_loss_fn()
            self.trainers[i].config_pseudo_label()

    def train_each_model(self, epoch, verbose=False):
        self.current_epoch = epoch
        for i in range(len(self.trainers)):
            if verbose:
                print("Training the {}-th model.")
            self.trainers[i].train_epoch(epoch)

        if self.args.use_exchange:
            print("exhanging pseudo matrix.")
            self.exchange_Y_refine_mats(self.args.change_rate)

    def eval_all_models(self, mode="test"):

        if mode == "valid":
            loader = self.valid_loader
        if mode == "test":
            loader = self.test_loader

        total, num_samples = 0, 0
        with torch.no_grad():
            total, num_samples = 0, 0
            for images, labels in loader:
                outputs_list = list(map(lambda x: x.model(images.to(x.device)).clone().detach().cpu(), self.trainers))
                _, predicted = torch.max(sum(outputs_list) / len(outputs_list), 1)
                total += (predicted == labels).sum().item()
                num_samples += labels.size(0)
        return 100*(total/num_samples)


    # functions related to refined labels 
    def exchange_Y_refine_mats(self, change_rate=None):
        if self.args.num_trainers == 1:
            pass
        n_samples = self.trainers[0].n_samples
        all_idx = np.arange(0, n_samples)
        if change_rate is None:
            change_idx = all_idx
        else:
            np.random.shuffle(all_idx)
            change_idx = all_idx[:int(n_samples * change_rate)]

        
        if self.args.num_trainers == 2:
            self.trainers[0].Y_refine_mat[change_idx, :], self.trainers[1].Y_refine_mat[change_idx, :] = self.trainers[1].Y_refine_mat[change_idx, :], self.trainers[0].Y_refine_mat[change_idx, :]
        if self.args.num_trainers > 2:
            ids_list = [ i for i in range(self.args.num_trainers)]
            random.shuffle(ids_list)
            for i in range(0, self.args.num_trainers, 2):
                if i + 1 == self.args.num_trainers:
                    break
                self.trainers[i].Y_refine_mat[change_idx, :], self.trainers[i+1].Y_refine_mat[change_idx, :] = self.trainers[i+1].Y_refine_mat[change_idx, :], self.trainers[i].Y_refine_mat[change_idx, :]

    
