from typing import Dict, List, Optional, Union

import numpy as np
import torch
from torch import nn

from rl_evolve.models.templates.activations import get_activation_class


class CNNAttention(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 3,
        stride: int = 1,
        padding: int = 1,
        pool_type: nn.Module = nn.AvgPool3d,
        pool_kwargs: Optional[Dict] = None,
    ):
        super(CNNAttention, self).__init__()
        self.w_qs = nn.Conv3d(
            in_channels, out_channels // 8, kernel_size, stride, padding
        )
        self.w_ks = nn.Conv3d(
            in_channels, out_channels // 8, kernel_size, stride, padding
        )
        self.w_vs = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
        self.pool = pool_type(**(pool_kwargs or {"kernel_size": 2}))
        nn.init.orthogonal_(self.w_qs.weight, gain=np.sqrt(2))
        nn.init.orthogonal_(self.w_ks.weight, gain=np.sqrt(2))
        nn.init.orthogonal_(self.w_vs.weight, gain=np.sqrt(2))
        self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, inputs):
        batch_size, _, D, H, W = inputs.shape
        q = self.pool(self.pool(self.w_qs(inputs)))
        k = self.pool(self.pool(self.w_ks(inputs)))
        v = self.pool(self.pool(self.w_vs(inputs)))
        _, _, D_p, H_p, W_p = q.shape
        output_size = D_p * H_p * W_p
        q = q.view(batch_size, -1, output_size).permute(0, 2, 1)
        k = k.view(batch_size, -1, output_size)
        v = v.view(batch_size, -1, output_size)
        attn = torch.bmm(q, k)
        attn = nn.functional.softmax(attn, dim=-1)
        out = torch.bmm(v, attn.permute(0, 2, 1))
        out = out.view(batch_size, -1, D_p, H_p, W_p)
        out = nn.functional.interpolate(
            out, size=(D, H, W), mode="trilinear", align_corners=False
        )
        out = self.gamma * out + self.w_vs(inputs)
        return out


class ConvNet3D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        features_dim: int = 256,
        hidden_channels: List[int] = [32, 16, 16],
        kernel_sizes: Union[int, List[int]] = 3,
        strides: Union[int, List[int]] = 1,
        paddings: Union[int, List[int]] = 1,
        pool_sizes: Union[int, List[int]] = 2,
        conv_activation: Union[str, nn.Module] = "ReLU",
        pool_type: Optional[str] = nn.AvgPool3d,
        pool_kwargs: Optional[Dict] = None,
        final_pool_type: str = "AdaptiveAvgPool3d",
        final_pool_kwargs: Dict = {"output_size": 2},
        final_activation: Union[str, nn.Module] = "ReLU",
        sample_input_shape: tuple = (32, 32, 32),
    ):
        super(ConvNet3D, self).__init__()

        if isinstance(kernel_sizes, int):
            kernel_sizes = [kernel_sizes] * len(hidden_channels)
        if isinstance(strides, int):
            strides = [strides] * len(hidden_channels)
        if isinstance(paddings, int):
            paddings = [paddings] * len(hidden_channels)
        if isinstance(pool_sizes, int):
            pool_sizes = [pool_sizes] * len(hidden_channels)
        conv_activation = get_activation_class(conv_activation)
        final_activation = get_activation_class(final_activation)

        # Construct CNN layers
        layers = []
        current_channels = in_channels

        for i, out_channels in enumerate(hidden_channels):
            layers.append(
                CNNAttention(
                    in_channels=current_channels,
                    out_channels=out_channels,
                    kernel_size=kernel_sizes[i],
                    stride=strides[i],
                    padding=paddings[i],
                    pool_type=pool_type,
                    pool_kwargs=pool_kwargs,
                )
            )
            layers.append(conv_activation())
            current_channels = out_channels

        if final_pool_type:
            pool_layer = getattr(nn, final_pool_type)
            layers.append(pool_layer(**(final_pool_kwargs or {})))

        layers.append(nn.Flatten())
        self.cnn = nn.Sequential(*layers)

        with torch.no_grad():
            sample_input = torch.zeros(1, in_channels, *sample_input_shape)
            n_flatten = self.cnn(sample_input).shape[1]

        self.linear = nn.Sequential(
            nn.Linear(n_flatten, features_dim), final_activation()
        )
        self.features_dim = features_dim

    def forward(self, observations: torch.Tensor) -> torch.Tensor:
        return self.linear(self.cnn(observations))
