import wandb

from torch.utils.data import DataLoader
from tqdm import tqdm
import torch
from Datasets.FormatDataset import FormatDataset
from Trainer.all_models import *
from Trainer.all_datasets import *
from Trainer.all_losses import *
from Trainer.TrainConfig import TrainConfig
from Utils import *
from Datasets.DatasetRegister import DefaultDatasetRegister as DatasetRegister
from Loss.LossRegister import DefaultLossRegister as LossRegister
import torchvision.transforms as transforms


class Trainer(object):
    def __init__(self, train_config:TrainConfig):
        self.train_config = train_config
        self.device = self.init_device()
        self.model = self.init_model()
        self.optimizer = self.init_optimizer()
        self.scheduler = self.init_scheduler()
        self.losser = self.init_losser()

        self.train_dataloader = self.init_train_dataloader()
        self.test_dataloader = self.init_test_dataloader()
        self.now_epoch_count = self.init_epoch_count()
        self.min_error_avg_BBBC = float("inf")
        self.min_error_avg_MyLab = float("inf")
        self.min_error_avg_FocusPath = float("inf")
        self.min_error_avg_Incoherent_Same = float("inf")
        self.min_error_avg_Incoherent_Diff = float("inf")
        self.min_error_avg_Incoherent_All = float("inf")
        self.init_log()



    def init_device(self):
        set_cuda(self.train_config.devices)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        return device

    def init_model(self):
        model_type = ModelRegister[self.train_config.model_type_name]
        model = model_type(num_classes = self.train_config.num_classes)
        if self.train_config.init_from:
            model, _ = load_model_core(model, self.train_config.init_from)

        model.to(self.device)
        return model



    def init_train_dataset(self):
        if self.train_config.train_dir_list == None or len(self.train_config.train_dir_list) < 1:
            print("no train data")
            exit(-1)

        DatasetType = DatasetRegister[self.train_config.dataset_type_name]

        transform_list = [
            transforms.ToTensor(),
            # transforms.CenterCrop((self.train_config.height, self.train_config.width)),
            transforms.RandomCrop((self.train_config.height, self.train_config.width)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
        ]

        transform = transforms.Compose(transform_list)


        dataset = None
        for path in self.train_config.train_dir_list:
            t_dataset = DatasetType(
                dir_path=path, abs_flag=self.train_config.abs_flag, transform=transform,
                random_pair=self.train_config.random_pair,
                from_cache=self.train_config.from_cache,
                update_cache = self.train_config.update_cache
            )

            if dataset is None:
                dataset = t_dataset
            else:
                dataset += t_dataset

        if self.train_config.test_code_flag and len(dataset) >= 3 * self.train_config.batch_size:
            dataset = torch.utils.data.Subset(dataset, range(3 * self.train_config.batch_size))
        return dataset


    def init_train_dataloader(self):
        dataset = self.init_train_dataset()
        dataloader = DataLoader(dataset, batch_size=self.train_config.batch_size, num_workers=self.train_config.num_workers, drop_last=True,
                                      shuffle=True, pin_memory=self.train_config.pin_memory)
        return dataloader

    def init_test_dataset(self):
        if self.train_config.test_dir_list == None or len(self.train_config.test_dir_list) < 1:
            return None

        DatasetType = DatasetRegister[self.train_config.dataset_type_name]
        transform_list = [
            transforms.ToTensor(),
            # transforms.CenterCrop((self.train_config.height, self.train_config.width)),
            transforms.RandomCrop((self.train_config.height, self.train_config.width)),
        ]

        transform = transforms.Compose(transform_list)

        dataset = None
        for path in self.train_config.test_dir_list:
            t_dataset = DatasetType(
                dir_path=path, abs_flag=self.train_config.abs_flag, transform=transform,
                from_cache=self.train_config.from_cache,
                update_cache=self.train_config.update_cache
            )

            if dataset is None:
                dataset = t_dataset
            else:
                dataset += t_dataset

        if self.train_config.test_code_flag and len(dataset) >= 3 * self.train_config.batch_size:
            dataset = torch.utils.data.Subset(dataset, range(3 * self.train_config.batch_size))
        return dataset

    def init_test_dataloader(self):
        dataset = self.init_test_dataset()
        dataloader = DataLoader(dataset, batch_size=self.train_config.batch_size,
                                num_workers=self.train_config.num_workers, drop_last=True,
                                shuffle=True,pin_memory=self.train_config.pin_memory)
        return dataloader


    def init_losser(self):
        loss_type = LossRegister[self.train_config.loss_type_name]
        if self.train_config.loss_param_dict is None:
            losser = loss_type()
        else:
            losser = loss_type(**self.train_config.loss_param_dict)
        return losser

    def init_log(self):
        if not self.train_config.log_flag or self.train_config.test_code_flag:
            closeWandb()
            return

        config = vars(self.train_config)
        config["pid"] = os.getpid()
        config["ppid"] = os.getppid()
        wandb.init(
            project=self.train_config.wandb_project,
            name=self.train_config.wandb_run_name,
            config=config
        )

    def init_optimizer(self):
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.train_config.learning_rate, weight_decay=self.train_config.l2_norm)
        return optimizer

    def init_scheduler(self):
        scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.train_config.scheduler_interval, gamma=self.train_config.scheduler_gamma)
        return scheduler

    def init_epoch_count(self):
        return 0

    def train_epoch(self):
        self.model.train()

        self.now_epoch_count+=1
        if self.train_config.scheduler_flag and self.now_epoch_count % self.train_config.scheduler_interval == 0:
            self.scheduler.step()

        train_loss_sum = 0
        self.optimizer.zero_grad()
        train_range_router_acc_count = 0
        if self.train_config.random_pair:
            for iter_count, (inputs, inputs2, labels, other_info_dict) in enumerate(
                    tqdm(self.train_dataloader, desc="epoch:{} train".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                inputs2 = inputs2.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs, inputs2)
                loss = self.losser(outputs, labels)
                loss = loss / self.train_config.grad_accumulation_count
                train_loss_sum += loss.sum().item()
                loss.backward()
                if iter_count % self.train_config.grad_accumulation_count == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()
            wandb.log({"epoch": self.now_epoch_count,
                       "loss": train_loss_sum / (len(self.train_dataloader))
                       })
            print({"epoch": self.now_epoch_count,
                   "loss": train_loss_sum / (len(self.train_dataloader))
                   })
        else:
            for iter_count, (inputs, labels, other_info_dict) in enumerate(tqdm(self.train_dataloader, desc="epoch:{} train".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)
                loss = self.losser(outputs, labels)
                loss = loss / self.train_config.grad_accumulation_count
                train_loss_sum += loss.sum().item()
                loss.backward()
                if iter_count % self.train_config.grad_accumulation_count == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()

            wandb.log({"epoch": self.now_epoch_count,
                       "loss": train_loss_sum / (len(self.train_dataloader))
                       })
            print({"epoch": self.now_epoch_count,
                       "loss": train_loss_sum / (len(self.train_dataloader))
                       })

    def test_epoch_with_BBBC_dataset(self):
        dataloader = self.test_dataloader
        self.model.eval()
        move_interval = TrainConfig.BBBC_INTERVAL
        with torch.no_grad():
            result_list = []
            for iter_count, (inputs, labels, other_info_dict) in enumerate(
                    tqdm(dataloader, desc="epoch:{} eval".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)
                diff_value = torch.abs(labels - outputs).cpu()
                diff_value_list = diff_value.flatten().tolist()
                result_list += diff_value_list

            error_avg = np.mean(result_list)

            if self.min_error_avg_BBBC > error_avg:
                self.min_error_avg_BBBC = error_avg
            std = np.std(result_list)

            in1IntervalPercent = sum(i < move_interval for i in result_list) / len(result_list)
            wandb.log({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_BBBC,
                "acc":in1IntervalPercent
            })
            print({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_BBBC,
                "acc": in1IntervalPercent
            })


    def test_epoch_with_FocusPath_dataset(self):
        dataloader = self.test_dataloader
        self.model.eval()
        move_interval = TrainConfig.FOCUSPATH_INTERVAL
        with torch.no_grad():
            result_list = []
            for iter_count, (inputs, labels, other_info_dict) in enumerate(
                    tqdm(dataloader, desc="epoch:{} eval".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)
                diff_value = torch.abs(labels - outputs).cpu()
                diff_value_list = diff_value.flatten().tolist()
                result_list += diff_value_list

            error_avg = np.mean(result_list)

            if self.min_error_avg_FocusPath > error_avg:
                self.min_error_avg_FocusPath = error_avg
            std = np.std(result_list)
            in1IntervalPercent = sum(i < move_interval for i in result_list) / len(result_list)
            wandb.log({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_FocusPath,
                "acc": in1IntervalPercent
            })
            print({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_FocusPath,
                "acc": in1IntervalPercent
            })

    def test_epoch_with_MyLab_dataset(self):
        dataloader = self.test_dataloader
        self.model.eval()
        move_interval = TrainConfig.MYLAB_INTERVAL
        with torch.no_grad():
            result_list = []
            for iter_count, (inputs, labels, other_info_dict) in enumerate(
                    tqdm(dataloader, desc="epoch:{} eval".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)
                diff_value = torch.abs(labels - outputs).cpu()
                diff_value_list = diff_value.flatten().tolist()
                result_list += diff_value_list

            error_avg = np.mean(result_list)

            if self.min_error_avg_MyLab > error_avg:
                self.min_error_avg_MyLab = error_avg
            std = np.std(result_list)
            in1IntervalPercent = sum(i < move_interval for i in result_list) / len(result_list)

            wandb.log({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_MyLab,
                "acc": in1IntervalPercent,
            })
            print({
                "epoch": self.now_epoch_count,
                "error_avg": error_avg,
                "std": std,
                "min_error_avg": self.min_error_avg_MyLab,
                "acc": in1IntervalPercent,
            })

    def test_epoch_with_Incoherent_dataset(self):
        dataloader = self.test_dataloader
        self.model.eval()

        move_interval = TrainConfig.INCOHERENT_INTERVAL
        with torch.no_grad():
            result_dict = {}
            for iter_count, (inputs, labels, other_info_dict) in enumerate(
                    tqdm(dataloader, desc="epoch:{} eval".format(self.now_epoch_count)), 0):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)

                diff_value = torch.abs(labels - outputs).cpu()
                diff_value_list = diff_value.flatten().tolist()
                output_list = outputs.flatten().tolist()
                label_list = labels.flatten().tolist()

                key_list = other_info_dict["key"]
                test_dataset_key_list = other_info_dict["test_dataset_key"]

                for i in range(len(key_list)):
                    test_dataset_key = test_dataset_key_list[i]
                    key = key_list[i]
                    if test_dataset_key not in result_dict:
                        result_dict[test_dataset_key] = {}
                    if key not in result_dict[test_dataset_key]:
                        result_dict[test_dataset_key][key] = {"output_list":[],"label":None}
                    result_dict[test_dataset_key][key]["output_list"].append(output_list[i])
                    result_dict[test_dataset_key][key]["label"] = label_list[i]


            if "same" in result_dict:
                same_res_dict = result_dict["same"]
                same_res_list = []
                for key, value in same_res_dict.items():
                    label = value["label"]
                    output_list = value["output_list"]
                    median_output = np.median(output_list)
                    diff = abs(median_output - label)
                    same_res_list.append(diff)
                    #print(key, label, median_output, diff, sorted(output_list))

                result_list = same_res_list
                error_avg_Same = np.mean(result_list)
                if self.min_error_avg_Incoherent_Same > error_avg_Same:
                    self.min_error_avg_Incoherent_Same = error_avg_Same
                std_Same = np.std(result_list)

                in1IntervalPercent_Same = sum(i < move_interval for i in result_list) / len(result_list)


            if "diff" in result_dict:
                diff_res_dict = result_dict["diff"]
                diff_res_list = []
                for key, value in diff_res_dict.items():
                    label = value["label"]
                    output_list = value["output_list"]
                    median_output = np.median(output_list)
                    diff = abs(median_output - label)
                    diff_res_list.append(diff)
                    #print(key, label, median_output, diff, sorted(output_list))

                result_list = diff_res_list
                error_avg_Diff = np.mean(result_list)
                if self.min_error_avg_Incoherent_Diff > error_avg_Diff:
                    self.min_error_avg_Incoherent_Diff = error_avg_Diff
                std_Diff = np.std(result_list)

                in1IntervalPercent_Diff = sum(i < move_interval for i in result_list) / len(result_list)
                #print("diff:", error_avg_Diff, std_Diff)

            if "same" in result_dict and "diff" in result_dict:
                all_list = same_res_list + diff_res_list
                result_list = all_list
                error_avg_all = np.mean(result_list)
                if self.min_error_avg_Incoherent_All > error_avg_all:
                    self.min_error_avg_Incoherent_All = error_avg_all
                std_All = np.std(result_list)

                in1IntervalPercent_All = sum(i < move_interval for i in result_list) / len(result_list)
                #print("all:", error_avg_all, std_All)

            wandb.log({
                "epoch": self.now_epoch_count,
                "error_avg_same": error_avg_Same,
                "std_same": std_Same,
                "min_error_avg_same": self.min_error_avg_Incoherent_Same,
                "acc_same": in1IntervalPercent_Same,

                "error_avg_diff": error_avg_Diff,
                "std_diff": std_Diff,
                "min_error_avg_diff": self.min_error_avg_Incoherent_Diff,
                "acc_diff": in1IntervalPercent_Diff,

                "error_avg_all": error_avg_all,
                "std_all": std_All,
                "min_error_avg_all": self.min_error_avg_Incoherent_All,
                "acc_all": in1IntervalPercent_All,
            })
            print({
                "epoch": self.now_epoch_count,
                "error_avg_same": error_avg_Same,
                "std_same": std_Same,
                "min_error_avg_same": self.min_error_avg_Incoherent_Same,
                "acc_same": in1IntervalPercent_Same,

                "error_avg_diff": error_avg_Diff,
                "std_diff": std_Diff,
                "min_error_avg_diff": self.min_error_avg_Incoherent_Diff,
                "acc_diff": in1IntervalPercent_Diff,

                "error_avg_all": error_avg_all,
                "std_all": std_All,
                "min_error_avg_all": self.min_error_avg_Incoherent_All,
                "acc_all": in1IntervalPercent_All,
            })


    def test_epoch(self):
        if self.train_config.test_forward == TrainConfig.TEST_MYLAB:
            self.test_epoch_with_MyLab_dataset()
        elif self.train_config.test_forward == TrainConfig.TEST_INCOHERENT:
            self.test_epoch_with_Incoherent_dataset()
        elif self.train_config.test_forward == TrainConfig.TEST_FOCUSPATH:
            self.test_epoch_with_FocusPath_dataset()
        elif self.train_config.test_forward == TrainConfig.TEST_BBBC:
            self.test_epoch_with_BBBC_dataset()

