from __future__ import annotations

from typing import Any, Sequence

from tensordict.nn import TensorDictModule, TensorDictSequential
from torch import nn

from rl_evolve.models.templates import MLP, ConvNet, ConvNet3D, ImpalaNet, ResNetBlock

MODEL_MAPPING = {
    "mlp": MLP,
    "cnn": ConvNet,
    "cnn3d": ConvNet3D,
    "resnet_block": ResNetBlock,
    "impala_net": ImpalaNet,
    "rnn": nn.RNN,
    "lstm": nn.LSTM,
    "gru": nn.GRU,
    "transformer": nn.Transformer,
}

DEFAULT_CONFIGS = {
    "mlp": {
        "in_features": None,
        "out_features": 2,
        "num_cells": (32, 32),
        "activation_class": nn.Tanh,
        "activation_kwargs": None,
        "activate_last_layer": False,
    },
    "cnn": {
        "in_channels": 3,
        "num_channels": (16, 32, 64),
        "kernel_sizes": (3, 3, 3),
        "strides": (1, 1, 1),
        "paddings": (0, 0, 0),
        "activation_class": nn.Tanh,
        "activation_kwargs": None,
        "norm_class": None,
        "norm_kwargs": None,
        "squash_last_layer": True,
    },
    "cnn3d": {
        "in_channels": 3,
        "features_dim": 1024,
        "hidden_channels": [32, 16, 16],
        "kernel_sizes": [3, 3, 3],
        "conv_activation": "LeakyReLU",
        "final_pool_type": "AdaptiveAvgPool3d",
        "final_pool_kwargs": {"output_size": 2},
        "final_activation": "LeakyReLU",
    },
    "resnet_block": {
        "num_ch": 3,
        "activation_class": nn.Tanh,
        "activation_kwargs": None,
    },
    "impala_net": {
        "channels": (16, 32, 32),
        "activation_class": nn.Tanh,
        "activation_kwargs": None,
    },
    "rnn": {
        "input_size": 32,
        "hidden_size": 256,
        "num_layers": 1,
        "nonlinearity": "tanh",
        "bias": True,
        "batch_first": True,
    },
    "lstm": {
        "input_size": 32,
        "hidden_size": 256,
        "num_layers": 1,
        "nonlinearity": "tanh",
        "bias": True,
        "batch_first": True,
    },
    "gru": {
        "input_size": 32,
        "hidden_size": 256,
        "num_layers": 1,
        "nonlinearity": "tanh",
        "bias": True,
        "batch_first": True,
    },
    "transformer": {
        "d_model": 512,
        "nhead": 8,
        "num_encoder_layers": 6,
        "num_decoder_layers": 6,
        "dim_feedforward": 2048,
        "dropout": 0.1,
        "activation": "relu",
    },
}


def get_constructed_model(
    model_configs: Sequence[dict[str, Any]]
) -> TensorDictSequential:
    layers = []
    for model_config in model_configs:
        model_cls = MODEL_MAPPING.get(model_config["model_name"])
        model = model_cls(**model_config["model_config"])
        model = TensorDictModule(
            model, model_config["in_keys"], model_config["out_keys"]
        )
        layers.append(model)

    model = TensorDictSequential(*layers)
    return model
