import ptan
import numpy as np
import abc

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from lib.efficient_kan import KAN
from lib.conv_kan.KANConv import KAN_Convolutional_Layer
import gymnasium as gym


class WeightDecay(nn.Module):
    def __init__(self, module, weight_decay, name: str = None):
        if weight_decay < 0.0:
            raise ValueError(
                "Regularization's weight_decay should be greater than 0.0, got {}".format(
                    weight_decay
                )
            )

        super().__init__()
        self.module = module
        self.weight_decay = weight_decay
        self.name = name

        self.hook = self.module.register_full_backward_hook(self._weight_decay_hook)

    def remove(self):
        self.hook.remove()

    def _weight_decay_hook(self, *_):
        if self.name is None:
            for param in self.module.parameters():
                if param.grad is None or torch.all(param.grad == 0.0):
                    param.grad = self.regularize(param)
        else:
            for name, param in self.module.named_parameters():
                if self.name in name and (
                    param.grad is None or torch.all(param.grad == 0.0)
                ):
                    param.grad = self.regularize(param)

    def forward(self, *args, **kwargs):
        return self.module(*args, **kwargs)

    def extra_repr(self) -> str:
        representation = "weight_decay={}".format(self.weight_decay)
        if self.name is not None:
            representation += ", name={}".format(self.name)
        return representation

    @abc.abstractmethod
    def regularize(self, parameter):
        pass


class L1(WeightDecay):
    """Regularize module's parameters using L1 weight decay.

    Example::

        import torchlayers as tl

        # Regularize all parameters of Linear module
        regularized_layer = tl.L1(tl.Linear(30), weight_decay=1e-5)

    .. note::
            Backward hook will be registered on `module`. If you wish
            to remove `L1` regularization use `remove()` method.

    Parameters
    ----------
    module : torch.nn.Module
        Module whose parameters will be regularized.
    weight_decay : float
        Strength of regularization (has to be greater than `0.0`).
    name : str, optional
        Name of parameter to be regularized (if any).
        Default: all parameters will be regularized (including "bias").

    """

    def regularize(self, parameter):
        return self.weight_decay * torch.sign(parameter.data)


class AtariA2C(nn.Module):
    def __init__(self, input_shape, n_actions, l1_penalty=0.0, use_kan_linear=False):
        super(AtariA2C, self).__init__()

        # obs_action = (input_shape[2], input_shape[0], input_shape[1])
        print("obs_action: ", input_shape)
        obs_action = input_shape

        self.conv = nn.Sequential(
            L1(KAN_Convolutional_Layer(
                in_channels=obs_action[0], 
                out_channels=32, 
                kernel_size=(8, 8), 
                stride=(4, 4), 
                grid_size=5), l1_penalty),
            L1(KAN_Convolutional_Layer(
                in_channels=32, 
                out_channels=64, 
                kernel_size=(4, 4), 
                stride=(2, 2), 
                grid_size=5), l1_penalty),
            L1(KAN_Convolutional_Layer(
                in_channels=64, 
                out_channels=128, 
                kernel_size=(3, 3), 
                stride=(1, 1), 
                grid_size=5), l1_penalty),
        )

        conv_out_size = self._get_conv_out(obs_action)

        if use_kan_linear:
            self.policy = nn.Sequential(
                KAN([conv_out_size, 64, 512]),
                KAN([512, 64, n_actions])
            )

            self.value = nn.Sequential(
                KAN([conv_out_size, 64, 512]),
                KAN([512, 64 , 1])
            )
        else:
            self.policy = nn.Sequential(
                nn.Linear(conv_out_size, 512),
                nn.ReLU(),
                nn.Linear(512, n_actions)
            )

            self.value = nn.Sequential(
                nn.Linear(conv_out_size, 512),
                nn.ReLU(),
                nn.Linear(512 , 1)
            )

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        fx = x.float() / 255
        conv_out = self.conv(fx).view(fx.size()[0], -1)
        return self.policy(conv_out), self.value(conv_out)