import torch
import torch.nn.functional as F
import lightning.pytorch as pl
from pathlib import Path
from lightning.pytorch.loggers import TensorBoardLogger
from lightning.pytorch.callbacks import ModelCheckpoint
from modules import LapModule, ReactionModule, GreenDM
from models import model_names
from itertools import product
from .my_callbacks import *

def _combinate_dict(data):
    for args in product(*data.values()):
        kwargs = {k: v for k, v in zip(data.keys(), args)}
        yield kwargs


def gen_green_kwargs(
    net_kwargs={
        "varyUNet": {
            "features": [8],
            "layers": [5],
        }
    },
    gridSize=[64],
    batch_size=[8],
    method=["imconv", "exconv", "real"],
    maxiter=[10, 20, 40],
    lr=[1e-3],
    max_epochs=150,
    gpus=1,
    data_path="../data",
    loss_fn=[F.mse_loss],
    data_type=["f", "r", "rf", "xyf"],
    equation_type=["Lap", "Reaction"],
    TrainN=["500"],
    js_log_path="./log.json",
    K_strategy = ['const', 'adaptive', 'dynamic']

):
    # Assemble dm
    for arg in _combinate_dict(
        {
            "equation_type": equation_type,
            "data_type": data_type,
            "loss_fn": loss_fn,
            "lr": lr,
            "maxiter": maxiter,
            "method": method,
            "batch_size": batch_size,
            "gridSize": gridSize,
            "TrainN": TrainN,
            "K_strategy": K_strategy
        }):
        # DataModule
        dm_arg = {
            "data_path": data_path,
            "batch_size": arg["batch_size"],
            "n": arg["gridSize"],
            "data_type": arg["data_type"],
            "reaction": True if arg["equation_type"] == "Reaction" else False,
            "TrainN": arg["TrainN"],
        }
        dm = GreenDM(**dm_arg)

        for net_name, net_args in net_kwargs.items():
            for net_arg in _combinate_dict(net_args):
                
                # Network
                match arg["data_type"]:
                    case "f":
                        net_arg["in_c"] = 1
                    case "r":
                        net_arg["in_c"] = 1
                    case "rf":
                        net_arg["in_c"] = 2
                    case "xyf":
                        net_arg["in_c"] = 3
                net = model_names[net_name](**net_arg)

                # Pytorch_lightning Module
                pl_module_arg = {
                    "net": net,
                    "method": arg["method"],
                    "n": arg["gridSize"],
                    "lr": arg["lr"],
                    "loss_fn": arg["loss_fn"],
                    "maxiter": arg["maxiter"],
                }
                match arg["equation_type"]:
                    case "Lap":
                        pl_module = LapModule(**pl_module_arg)

                    case "Reaction":
                        pl_module = ReactionModule(**pl_module_arg)

                # Trainer kwargs
                match arg["method"]:
                    case "imconv":
                        loss_fn_name = f'jac-{arg["K_strategy"]}-{arg["maxiter"]}'
                    case _:
                        loss_fn_name = f'{arg["method"]}'

                # experiment name:
                # equation + data_type + loss function + gridSize + networks name + batch size + special label

                exp_name = f'{arg["equation_type"]}_{arg["data_type"]}_{loss_fn_name}_{arg["gridSize"]}_{net.name()}_{arg["batch_size"]}_{arg["TrainN"]}'

                # log_path = f"./all_logs_{arg['gridSize']}_{arg['label']}/{arg['data_type']}_{arg['equation_type']}"
                log_path = f'./all_logs/{Path(js_log_path).stem}'
                
                tensorBoardLogger = TensorBoardLogger(log_path, exp_name)
                TrainModelCKPT = ModelCheckpoint(
                    monitor=arg["method"],
                    mode="min",
                    train_time_interval=None,
                    save_top_k=1,
                    filename='best_train'
                    )
                ValModelCKPT = ModelCheckpoint(
                    monitor=f"ValLoss",
                    mode="min",
                    train_time_interval=None,
                    save_top_k=1,
                    filename='best_val'
                    )
                LastModelCKPT = ModelCheckpoint(
                    monitor=f"epoch",
                    mode="max",
                    train_time_interval=None,
                    save_top_k=1,
                    filename='last',
                    save_on_train_epoch_end=True
                    )
                jsonlogger = JsonLogger(js_log_path=js_log_path, name=exp_name)
                adaptive_K_mul = Adaptive_Kmul(exp_name, arg['maxiter'], 10)
                adaptive_K_add = Adaptive_Kadd(exp_name, arg['maxiter'], 1)
                dynamic_K = Dynamic_K(exp_name, 10, 10)
                
                match arg['K_strategy']:
                    case 'const':   callbacks = [TrainModelCKPT, ValModelCKPT, LastModelCKPT, jsonlogger]
                    case 'dynamic': callbacks = [TrainModelCKPT, ValModelCKPT, LastModelCKPT, jsonlogger, dynamic_K]
                    case 'adaptive-mul':callbacks = [TrainModelCKPT, ValModelCKPT, LastModelCKPT, jsonlogger, adaptive_K_mul]
                    case 'adaptive-add':callbacks = [TrainModelCKPT, ValModelCKPT, LastModelCKPT, jsonlogger, adaptive_K_add]

                trainer_arg = {
                    "max_epochs": max_epochs,
                    "precision": 32,
                    "accelerator": "cuda",
                    "devices": gpus,
                    "logger": tensorBoardLogger,
                    "callbacks": callbacks,
                }
                pl_trainer = pl.Trainer(**trainer_arg)
                kwargs = {
                    "name": exp_name,
                    "dm": dm,
                    "pl_module": pl_module,
                    "trainer": pl_trainer,
                    "log_path": f"{log_path}/{exp_name}",
                    "data_type": arg["data_type"],
                    "ckpt": None,
                }

                log_info = {
                    "exp_name": exp_name,
                    net_name: net_arg,
                    "n": pl_module_arg["n"],
                    "batch_size": dm_arg["batch_size"],
                    "method": pl_module_arg["method"],
                    "maxiter": pl_module_arg["maxiter"],
                    "lr": pl_module_arg["lr"],
                    "max_epoch": trainer_arg["max_epochs"],
                    "loss_fn": pl_module_arg["loss_fn"].__name__,
                    "data_type": dm_arg["data_type"],
                    "equation": arg["equation_type"],
                    "data_label": arg["TrainN"],
                    "js_log_path": js_log_path,
                    "log_path":log_path
                }

                yield kwargs, log_info


def main(**kwargs):
    # Initilize the Data Module
    dm = kwargs["dm"]

    # Initilize the model
    pl_module = kwargs["pl_module"]

    # Initilize lightning trainer
    pl_trainer = kwargs["trainer"]

    if kwargs["ckpt"] is not None:
        ckpt = torch.load(kwargs["ckpt"])
        pl_module.load_state_dict(ckpt["state_dict"])

    pl_trainer.fit(
        model=pl_module,
        datamodule=dm,
    )

    pl_trainer.save_checkpoint(f'{kwargs["log_path"]}/version_0/checkpoints/last.ckpt' )
    return True
