import argparse
from typing import Any, Dict, List, Sequence, Tuple

import torch
import torch.nn as nn
from cassle.losses.barlow import barlow_loss_func
from cassle.methods.base import BaseModel
from cassle.methods.barlow_twins import BarlowTwins

class HookTool:
    def __init__(self):
        self.fea = []

    def hook_fun(self, module, fea_in, fea_out):
        self.fea.append(fea_out)

    def reset(self):
        self.fea = []

class BarlowTwins_Dual(BarlowTwins):
    def __init__(
        self, proj_hidden_dim: int, output_dim: int, lamb: float, scale_loss: float, **kwargs
    ):
        """Implements Barlow Twins (https://arxiv.org/abs/2103.03230)

        Args:
            proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
            output_dim (int): number of dimensions of projected features.
            lamb (float): off-diagonal scaling factor for the cross-covariance matrix.
            scale_loss (float): scaling factor of the loss.
        """

        super().__init__(proj_hidden_dim, output_dim, lamb, scale_loss, **kwargs)

        # initialize dual_encoder
        self.dual_encoder = self.base_model(zero_init_residual=self.zero_init_residual)
        self.dual_encoder.fc = nn.Identity()
        if self.cifar:
            self.dual_encoder.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2, bias=False)
            self.dual_encoder.maxpool = nn.Identity()


    @property
    def learnable_params(self) -> List[dict]:
        """Adds projector parameters to parent's learnable parameters.

        Returns:
            List[dict]: list of learnable parameters.
        """

        extra_learnable_params = [{"params": self.dual_encoder.parameters()}]
        return super().learnable_params + extra_learnable_params

    def on_train_start(self):
        """Resets the step counter at the beginning of training."""
        super().on_train_start()
        # init dual_encoder
        params_encoder = self.encoder.parameters()
        params_dual_encoder = self.dual_encoder.parameters()
        for po, pm in zip(params_encoder, params_dual_encoder):
            pm.data.copy_(po.data)


    def feature_forward(self, x):
        x = self.dual_encoder.layer4(x)
        x = self.dual_encoder.avgpool(x)
        x = torch.flatten(x, 1)
        dual_feats = self.dual_encoder.fc(x)
        dual_z = self.projector(dual_feats)
        return {'dual_feats': dual_feats, 'dual_z':dual_z}

    def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
        """Training step for Barlow Twins reusing BaseModel training step.

        Args:
            batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
                [X] is a list of size self.num_crops containing batches of images.
            batch_idx (int): index of the batch.

        Returns:
            torch.Tensor: total loss composed of Barlow loss and classification loss.
        """
        if self.current_task_idx == 0:
            out = super().training_step(batch, batch_idx)
            return out
        else:
            cur_hook = HookTool()
            cur_fea = self.encoder.layer3.register_forward_hook(cur_hook.hook_fun)
            out = super(BarlowTwins, self).training_step(batch, batch_idx)

            #encoder
            feats1, feats2 = out["feats"]
            z1 = self.projector(feats1)
            z2 = self.projector(feats2)

            #dual_encoder
            dual_x1, dual_x2 = cur_hook.fea[:2]
            cur_fea.remove()
            dual_out1 = self.feature_forward(dual_x1)
            dual_feats1, dual_z1 = dual_out1['dual_feats'], dual_out1['dual_z']
            dual_out2 = self.feature_forward(dual_x2)
            dual_feats2, dual_z2 = dual_out2['dual_feats'], dual_out2['dual_z']

            # ------- barlow twins loss -------
            barlow_loss = barlow_loss_func(dual_z1, dual_z2, lamb=self.lamb, scale_loss=self.scale_loss)

            self.log("train_barlow_loss", barlow_loss, on_epoch=True, sync_dist=True)

            out.update({"loss": out["loss"] + barlow_loss, "z": [z1, z2], "dual_z":[dual_z1, dual_z2]})
            return out

