"""Return optimizer + scheduler for training."""
import torch
import itertools
from collections import defaultdict
from pprint import pprint
from .scheduler import ExpUpCosDown

def group_params(model, lrs, group_names, verbose=False):
    """Group parameters for different learning rates."""
    params_groups = defaultdict(list)
    params_keys = defaultdict(list)
    params_lrs = defaultdict(int)
    # if isinstance(model, list):
    #     assert isinstance(lrs, list) and isinstance(group_names, list), "model, lrs, group names should be a list at the same time."
    #     collected_params = []
    #     for i in range(len(model)):
    #         model_i = model[i]
    #         lrs_i = lrs[i]
    #         group_names_i = group_names[i]
    #         assert (len(lrs_i) - 1) == len(group_names_i), "lrs should have one more element than group_names standing for other parameters."
    #         for j, g in enumerate(group_names_i):
    #             sub_model = getattr(model_i, g)
    #             for k, v in sub_model.named_parameters():
    #                 params_groups[g].append(v)
    #                 params_keys[g].append(f"{g}." + k)
    #             params_lrs[g] = lrs_i[j]
    #         collected_params += itertools.chain.from_iterable(params_keys.values())
    #     for i in range(len(model)):
    #         model_i = model[i]
    #         lrs_i = lrs[i]
    #         for n, p in model_i.named_parameters():
    #             if n not in collected_params:
    #                 params_groups[f"model_{i}." + "other"].append(p)
    #                 params_keys[f"model_{i}." + "other"].append(n)
    #         params_lrs[f"model_{i}." + "other"] = lrs_i[-1]

    #     grouped_params_with_lr = [ {"params": params_groups[k], "lr": params_lrs[k]} for k in params_groups.keys()]
    # else:
    assert (len(lrs) - 1) == len(group_names), "lrs should have one more element than group_names standing for other parameters."
    for k, v in model.named_parameters():
        for g in group_names:
            if k.startswith(g):
                params_groups[g].append(v)
                params_keys[g].append(k)
    collected_params = list(itertools.chain.from_iterable(params_keys.values()))
    for n, p in model.named_parameters():
        if n not in collected_params:
            params_groups["other"].append(p)
            params_keys["other"].append(n)
    grouped_params_with_lr = [{"params": params_groups[g], "lr": lrs[i]} for i, g in enumerate(params_groups.keys())]
    
    if verbose:
        pprint(params_keys)
        # pprint(params_lrs)
    
    return grouped_params_with_lr

def opt_setup(opt_configs, epoch_len, model):
    """Return optimizer + scheduler for training."""

    # account for accumulated batches
    epoch_len = epoch_len // opt_configs["accumulate_batches"]

    grouped_params_with_lr = group_params(
        model,
        opt_configs["lrs"], 
        opt_configs["group_prefix"], 
        verbose=opt_configs["verbose"]
    )
    # for freeze train temp
    # grouped_params_with_lr = grouped_params_with_lr[1:]
    if opt_configs["opt"] == "Adam":
        opt = torch.optim.Adam(grouped_params_with_lr, betas=(0.9, 0.999))
    elif opt_configs["opt"] == "SGD":
        opt = torch.optim.SGD(grouped_params_with_lr, momentum=0.9)
    elif opt_configs["opt"] == "AdamW":
        opt = torch.optim.AdamW(grouped_params_with_lr, betas=(0.9, 0.999))
    else:
        raise NotImplementedError(f"Optimizer {opt_configs['opt']} not implemented.")
    
    exp_steps = opt_configs["n_epochs_warmup"] * epoch_len
    try:
        joint_warmup = opt_configs["joint_itpl_warmup"] * epoch_len
        print(f"Joint training: warmup: {joint_warmup}")
    except KeyError:
        joint_warmup = 0
    
    lrs = [opt.param_groups[i]["lr"] for i in range(len(opt.param_groups))]
    print(f"Learning rates in scheduler: {lrs}")
    exp_scheduler = ExpUpCosDown(        
        opt, 
        lrs, 
        exp_steps, 
        tot_steps=epoch_len * opt_configs["n_epochs"],
        min_lr=opt_configs["min_lr"],
        mode=opt_configs["schd_mode"],
        joint_warmup=joint_warmup
        )
    return {"optimizer": opt, "lr_scheduler": {"scheduler": exp_scheduler, "interval": "step"}}