import pathlib
import torch
import torch.nn as nn
import copy
import tqdm
from .processors import ProcessorManager, Result
from .utils import dict_as_table, predict_batch, show_batch


__all__ = ["create_learner"]

settings = {
    "device": "cuda:0",
    "dtype": torch.float32,
    "loss_fn": nn.CrossEntropyLoss,
    "loss_fn_kwargs": {},
    "optimizer": torch.optim.Adam,
    "optimizer_kwargs": {},
    "scheduler": torch.optim.lr_scheduler.LambdaLR,
    "scheduler_kwargs": {"lr_lambda": lambda epoch: 1},
    "figsize": (10, 6),

    # 保存位置
    "work_dir": "exp",
    "roots": "runs",
    "auto_zip": True,

    # plot 要展示的 key
    "plot_keys": {"loss", "acc"},

    # tqdm 要展示的 key
    "tqdm_keys": {"loss", "cur_acc", "best_acc", "lr"},

    # 评估指标参数设置
    "metrics_average": "macro avg",
    "confusion_matrix_kwargs": {},
    "roc_curve_kwargs": {},
    "classification_report_kwargs": {"zero_division": 1},
    "dynamic_plot": False,
    "save_last_model": False,
    "save_best_model": False,
}


class Learner:
    def __init__(self, **kwargs):
        self.config = kwargs
        self.pm = ProcessorManager(self.config)
        self.fit_called = False

    def set(self, key, value):
        self.config[key] = value

    def loss_fn(self, loss_fn, **kwargs):
        self.config["loss_fn"] = loss_fn
        self.config["loss_fn_kwargs"] = kwargs

    def optimizer(self, optimizer, **kwargs):
        self.config["optimizer"] = optimizer
        self.config["optimizer_kwargs"] = kwargs

    def scheduler(self, scheduler, **kwargs):
        self.config["scheduler"] = scheduler
        self.config["scheduler_kwargs"] = kwargs

    def show_config(self):
        print(self)

    def __repr__(self):
        msg = {
            "device": self.config["device"],
            "dtype": self.config["dtype"],
            "loss_fn": self.config["loss_fn"],
            "optimizer": self.config["optimizer"],
            "scheduler": self.config["scheduler"],
            "batch_size": self.config["train_loader"].batch_size,
        }
        return dict_as_table(msg)

    def __determine_work_dir(self):
        roots = pathlib.Path(self.config["roots"])
        work_dir = pathlib.Path(roots, self.config["work_dir"])
        if not roots.exists() or not work_dir.exists():
            output_dir = str(work_dir)
        else:
            assert roots.is_dir(), f"{roots} is not a dir"
            assert work_dir.is_dir(), f"{work_dir} is not a dir"
            wd = work_dir.name
            exps = [str(p)
                    for p in roots.iterdir() if p.is_dir() and str(p.name).startswith(wd)]
            output_dir = str(roots / f"{wd}{len(exps)}")
        return output_dir

    def fit(self, epochs=10, lr=1e-3):
        # run only once
        assert not self.fit_called, "fit can only be called once"
        self.fit_called = True
        output_dir = self.__determine_work_dir()
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
        ################################################################
        # 添加一些信息
        extra_config = {}
        extra_config["total_epochs"] = epochs
        extra_config["output_dir"] = output_dir
        extra_config["idx_to_class"] = {
            v: k for k, v in self.class_to_idx.items()}
        extra_config["num_classes"] = len(self.class_to_idx)

        ################################################################
        model = self.config["model"]
        model.to(self.config["device"], self.config["dtype"])
        optimizer = self.config["optimizer"](
            model.parameters(), lr=lr, **self.config["optimizer_kwargs"])
        scheduler = self.config["scheduler"](
            optimizer, **self.config["scheduler_kwargs"])
        loss_fn = self.config["loss_fn"](**self.config["loss_fn_kwargs"])
        train_loader = self.config["train_loader"]
        val_loader = self.config["val_loader"]

        range_fn = tqdm.tqdm(range(epochs))
        ln = len(str(epochs))
        for epoch in range_fn:
            model.train()
            range_fn.set_description(f"Epoch {epoch:>{ln}d}")

            loss_sum, count = 0, 0
            for data, label in train_loader:
                data = data.to(self.config["device"], self.config["dtype"])
                label = label.to(self.config["device"])
                optimizer.zero_grad()
                output = model(data)
                loss = loss_fn(output.float(), label.long())
                loss.backward()
                optimizer.step()

                loss_sum += loss.item()
                count += 1
            scheduler.step()

            train_loss = loss_sum / count
            y_true, y_label, y_score = [], [], []
            with torch.no_grad():
                model.eval()
                for data, label in val_loader:
                    data = data.to(self.config["device"], self.config["dtype"])
                    label = label.to(self.config["device"])
                    output = model(data)
                    y_true.extend(label.tolist())
                    y_label.extend(output.argmax(dim=-1).tolist())
                    y_score.extend(output.tolist())

            result = Result()
            result.update(extra_config)
            result.put("y_true", y_true)
            result.put("y_pred", y_label)
            result.put("y_score", y_score)
            result.put("train_loss", train_loss)
            result.put("lr", scheduler.get_last_lr()[0])
            result.put("cur_epochs", epoch)
            gdata = self.pm.end_current_epoch(result)
            msg = {k: gdata[k]
                   for k in self.config["tqdm_keys"] & gdata.keys()}
            range_fn.set_postfix(msg)

        print(f"the result is saved in {output_dir}")
        self.pm.end_fit(extra_config)

    @property
    def class_to_idx(self):
        ds = self.config["train_loader"].dataset
        while hasattr(ds, "class_to_idx") or hasattr(ds, "dataset"):
            if hasattr(ds, "class_to_idx"):
                return ds.class_to_idx
            assert hasattr(
                ds, "dataset"), "the dataset must have class_to_idx property"
            ds = ds.dataset

    @property
    def samples_nums(self):
        t, v = self.config["train_loader"], self.config["val_loader"]
        if v is None:
            return {"train": len(t.dataset)}
        return {"train": len(t.dataset), "val": len(v.dataset)}

    @property
    def model(self):
        return self.config["model"]

    def show_train_batch(self, rows=4, cols=4, **kwargs):
        figsize = kwargs.get("figsize", self.config["figsize"])
        show_batch(self.config["train_loader"], rows, cols, figsize)

    def show_val_batch(self, rows=4, cols=4, **kwargs):
        figsize = kwargs.get("figsize", self.config["figsize"])
        show_batch(self.config["val_loader"], rows, cols, figsize)

    def predict_train_batch(self, rows=4, cols=4, **kwargs):
        figsize = kwargs.get("figsize", self.config["figsize"])
        predict_batch(self.config["train_loader"], self.config["model"], self.config["device"], self.config["dtype"],
                      rows, cols, figsize)

    def predict_val_batch(self, rows=4, cols=4, **kwargs):
        figsize = kwargs.get("figsize", self.config["figsize"])
        predict_batch(self.config["val_loader"], self.config["model"], self.config["device"], self.config["dtype"],
                      rows, cols, figsize)

    def save(self, path):
        torch.save(self.config["model"].state_dict(), path)

    def load(self, path, **kwargs):
        self.config["model"].load_state_dict(torch.load(path), **kwargs)

    def eval(self):
        self.config["model"].eval()

    def train(self):
        self.config["model"].train()

    def cpu(self):
        self.config["model"].cpu()

    def to(self, *args, **kwargs):
        self.config["model"].to(*args, **kwargs)


def create_learner(model, train_loader, val_loader=None, **kwargs):
    copy_settings = copy.deepcopy(settings)
    kwargs["model"] = model
    kwargs["train_loader"] = train_loader
    kwargs["val_loader"] = val_loader
    copy_settings.update(kwargs)
    return Learner(**copy_settings)
