import torch
from zkl_ptutils_metrics import SumMetric
from zkl_ptutils_neural import binary_sigmoid_cross_entropy, categorical_softmax_cross_entropy, \
    categorical_softmax_soft_accuracy
from zkl_ptutils_training import ScopedTypedMetricPatch

from zkl_llmpt_nbptt.nbptt_causal_language_model import NbpttCausalLanguageModel
from zkl_llmpt_nbptt.neural_switch import BinaryNeuralSwitch
from zkl_llmpt_nbptt.utils.capturing import capture_modules_output
from zkl_llmpt_nbptt.utils.getting import get_modules_by_type, get_parameters_by_module_type

class TrainingModel(torch.nn.Module):
    def __init__(self, model: NbpttCausalLanguageModel):
        super().__init__()
        self.model = model
        self.params = dict(model.named_parameters())
        self.buffers = dict(model.named_buffers())
        self.history_policy_grads = {}

    def forward(self, batch: tuple[torch.Tensor, torch.Tensor, torch.Tensor]):
        (x, y, t) = batch
        tokens = x.numel()

        def compute_loss(params: dict, buffers: dict):
            def compute_persample_policy_loss(
                policy_params: dict[str, torch.Tensor],
                other_params: dict[str, torch.Tensor],
                buffers: dict[str, torch.Tensor],
                x: torch.Tensor,
            ):
                state = policy_params | other_params | buffers
                switches_module = get_modules_by_type(self.model, BinaryNeuralSwitch)
                with capture_modules_output(switches_module) as switches_output:
                    y_logits, x_logits = torch.func.functional_call(self.model, state, args=(x,))
                buffers = {name: state[name] for name in buffers}

                switches_result = {
                    name: result
                    for name, (result, logit) in switches_output.items()}

                switches_ce = {
                    name: binary_sigmoid_cross_entropy(switch_logit, ~switch_result)
                    for name, (switch_result, switch_logit) in switches_output.items()}
                switches_ce = torch.stack(list(switches_ce.values()), dim=-1)
                policy_loss = torch.mean(switches_ce)

                return policy_loss, (y_logits, x_logits, switches_result, buffers)

            compute_persample_policy_grads = torch.func.grad(
                compute_persample_policy_loss, has_aux=True)
            compute_batch_policy_grads = torch.func.vmap(
                compute_persample_policy_grads, in_dims=(None, None, 0, 0), randomness='different')

            all_params_name = set(params.keys())
            policy_params_name = set(get_parameters_by_module_type(self.model, BinaryNeuralSwitch).keys())
            other_params_name = all_params_name - policy_params_name
            policy_params = {name: params[name] for name in policy_params_name}
            other_params = {name: params[name] for name in other_params_name}
            policy_grads, (y_logits, x_logits, switches_result, buffers) = \
                compute_batch_policy_grads(policy_params, other_params, buffers, x)

            ce_y = categorical_softmax_cross_entropy(y_logits, y, dim=-1)
            acc_y = categorical_softmax_soft_accuracy(y_logits, y, dim=-1)

            mask_y = (t == 2)
            weight_y = torch.sum(mask_y, dtype=torch.float32)
            ce_y_sum = torch.masked.sum(ce_y, mask=mask_y)
            acc_y_sum = torch.masked.sum(acc_y, mask=mask_y)

            ce_x = categorical_softmax_cross_entropy(x_logits, x, dim=-1)
            acc_x = categorical_softmax_soft_accuracy(x_logits, x, dim=-1)

            ce_x_mean = torch.mean(ce_x)
            acc_x_mean = torch.mean(acc_x)

            switches_cores_num = {
                name: torch.masked.sum(result, mask=t == 1, dtype=torch.float32)
                for name, result in switches_result.items()}
            switches_cores_base = torch.sum(t == 1, dtype=torch.float32)
            switches_other_num = {
                name: torch.masked.sum(result, mask=t != 1, dtype=torch.float32)
                for name, result in switches_result.items()}
            switch_other_base = torch.sum(t != 1, dtype=torch.float32)

            ce_y_mean = torch.masked.mean(ce_y, mask=mask_y)
            ce_y_mean = torch.nan_to_num(ce_y_mean, 0)
            ce_y_std = torch.masked.std(ce_y, mask=mask_y)
            ce_y_std = torch.nan_to_num(ce_y_std, 1)
            policy_score = torch.masked.sum(ce_y - ce_y_mean, mask=mask_y, dim=-1)
            policy_score = policy_score / ce_y_std

            loss_y = ce_y_sum / ce_y.numel()
            loss_x = torch.mean(ce_x)
            loss = loss_y + loss_x

            return loss, dict(
                policy_grads=policy_grads,
                policy_score=policy_score,
                switches_cores_num=switches_cores_num,
                switches_cores_base=switches_cores_base,
                switches_other_num=switches_other_num,
                switches_other_base=switch_other_base,
                buffers=buffers,
                ce_y_sum=ce_y_sum,
                acc_y_sum=acc_y_sum,
                weight_y=weight_y,
                ce_x_mean=ce_x_mean,
                acc_x_mean=acc_x_mean)

        compute_gradients = torch.func.grad(compute_loss, has_aux=True)
        gradients, outputs = compute_gradients(self.params, self.buffers)
        policy_grads = outputs['policy_grads']
        policy_score = outputs['policy_score']
        switches_cores_num = outputs['switches_cores_num']
        switches_cores_base = outputs['switches_cores_base']
        switches_other_num = outputs['switches_other_num']
        switches_other_base = outputs['switches_other_base']
        self.buffers = outputs['buffers']
        ce_y_sum = outputs['ce_y_sum']
        acc_y_sum = outputs['acc_y_sum']
        weight_y = outputs['weight_y']
        ce_x_mean = outputs['ce_x_mean']
        acc_x_mean = outputs['acc_x_mean']

        gradients_from_policy = {
            name: 0.2 * torch.tensordot(policy_score, grad, dims=([0], [0]))
            for name, grad in self.history_policy_grads.items()}

        gamma_old = 0.5
        gamma_new = 1 - gamma_old
        for name, grad_new in policy_grads.items():
            grad_old = self.history_policy_grads.get(name, 0)
            self.history_policy_grads[name] = grad_old * gamma_old + grad_new * gamma_new

        switches_cores_metrics = {
            f"cores ({name})": (co_num.detach(), switches_cores_base.detach())
            for name, co_num in switches_cores_num.items()}
        switches_other_metrics = {
            f"other ({name})": (co_num.detach(), switches_other_base.detach())
            for name, co_num in switches_other_num.items()}

        return dict(
            gradients=gradients | gradients_from_policy,
            metrics=dict(
                ce_y=(ce_y_sum.detach(), weight_y),
                acc_y=(acc_y_sum.detach(), weight_y),
                ce_x=ce_x_mean.detach(),
                acc_x=acc_x_mean.detach(),
                tokens=ScopedTypedMetricPatch(tokens, typ=SumMetric, scope='run'),
            ) | switches_cores_metrics | switches_other_metrics)
