import torch
import ultralytics.nn.modules as modules
from ultralytics.nn.tasks import DetectionModel
from ultralytics.yolo.utils import LOGGER
from typing import Iterable


class RecurrentModel(DetectionModel):
    def __init__(self, cfg='yolov8n.yaml', ch=5, nc=None, verbose=True):
        super().__init__(cfg, ch, nc, verbose)


    def _forward_once(self, x, profile=False, visualize=False):
        """
        Perform a forward pass through the network.

        Args:
            x (torch.Tensor): The input tensor to the model
            profile (bool):  Print the computation time of each layer if True, defaults to False.
            visualize (bool): Save the feature maps of the model if True, defaults to False

        Returns:
            (torch.Tensor): The last output of the model.
        """
        required_dim = 5
        if x.ndim < required_dim:
            x = x.reshape(*([1] * (required_dim - x.ndim) + list(x.shape)))

        y, dt = [], []  # outputs
        for m in self.model:
            is_head = isinstance(m, (modules.Detect,))

            if m.f != -1:  # if not from previous layer
                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers
            if profile:
                self._profile_one_layer(m, x, dt)
            
            is_tensor = lambda el: isinstance(el, torch.Tensor)
            is_iterable = lambda el: isinstance(el, Iterable)
            at_time = lambda seq, ts: seq[ts] if is_tensor(seq) else ([at_time(tensor, ts) for tensor in seq] if is_iterable(seq) else seq)
            stack_time = lambda seq: torch.stack(seq) if is_tensor(seq[0]) else ([torch.stack([seq[t][i] for t in range(len(seq))]) for i in range(len(seq[0]))] if is_iterable(seq) else seq)
    
            if is_head: # if it is detection head, take the output at the last time step and generate the result.
                x = m(at_time(x, -1))
            else: # process as the module should be, but recurse across the time steps.
                time_steps = (x.shape[0] if is_tensor(x) else x[0].shape[0])
                z = [m(at_time(x, t)) for t in range(time_steps)]
                x = stack_time(z)
            y.append(x if m.i in self.save else None)  # save output
            
            if hasattr(m, "reset") and callable(m.reset):
                m.reset() # reset the recurrent modules
            
            if visualize:
                LOGGER.info('visualize feature not yet supported')
                # TODO: feature_visualization(x, m.type, m.i, save_dir=visualize)
        return x