"""
Experiment for the separated Hierarchical Transformer. This Experiments mirrows the LTLSynHierTransformerExperiment but uses a separated hierarchichal transformer with one local encoder.
"""

import logging
import numpy as np
import sys
import tensorflow as tf

from ... import models
from ...layers import positional_encoding as pe

from ...data import TPEFormat
from ...data import ExprNotation
from ...optimization import lr_schedules
from ..ltl_spec import LTLSpecPropertyEncoder
from .ltl_syn_experiment import LTLSynExperiment

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class LTLSynSepHierTransformerExperiment(LTLSynExperiment):

    BUCKET_DIR = "ltl-repair"
    WANDB_PROJECT = "ltl-repair"

    def __init__(
        self,
        ttot_learning: bool = False,
        constant_learning_rate: float = None,
        custom_pos_enc: bool = True,
        d_embed: int = 256,
        d_embed_enc: int = None,
        d_embed_dec: int = None,
        d_ff: int = 1024,
        d_ff_enc_d0: int = None,
        d_ff_enc_d1: int = None,
        d_ff_dec: int = None,
        dropout: float = 0.0,
        dropout_enc: float = None,
        dropout_dec: float = None,
        ff_activation_enc_d0: str = "relu",
        ff_activation_enc_d1: str = "relu",
        ff_activation_dec: str = "relu",
        fix_d1_embed: bool = False,
        name: str = "hier-transformer",
        num_properties: int = 12,
        num_heads: int = 4,
        num_heads_enc_d0: int = None,
        num_heads_enc_d1: int = None,
        num_heads_dec: int = None,
        num_layers: int = 8,
        num_layers_enc_d0: int = None,
        num_layers_enc_d1: int = None,
        num_layers_dec: int = None,
        property_tree_size: int = 25,
        warmup_steps: int = 4000,
        **kwargs,
    ):
        self.ttot_learning = ttot_learning
        self._attn_model = None
        self.constant_learning_rate = constant_learning_rate
        self.custom_pos_enc = custom_pos_enc
        if not custom_pos_enc:
            raise NotImplementedError
        self.d_embed_enc = d_embed_enc if d_embed_enc else d_embed
        self.d_embed_dec = d_embed_dec if d_embed_dec else d_embed
        self.d_ff_enc_d0 = d_ff_enc_d0 if d_ff_enc_d0 else d_ff
        self.d_ff_enc_d1 = d_ff_enc_d1 if d_ff_enc_d1 else d_ff
        self.d_ff_dec = d_ff_dec if d_ff_dec else d_ff
        self.dropout_enc = dropout_enc if dropout_enc else dropout
        self.dropout_dec = dropout_dec if dropout_dec else dropout
        self.ff_activation_enc_d0 = ff_activation_enc_d0
        self.ff_activation_enc_d1 = ff_activation_enc_d1
        self.ff_activation_dec = ff_activation_dec
        self.fix_d1_embed = fix_d1_embed
        self.property_tree_size = property_tree_size
        self.num_properties = num_properties
        self.num_heads_enc_d0 = num_heads_enc_d0 if num_heads_enc_d0 else num_heads
        self.num_heads_enc_d1 = num_heads_enc_d1 if num_heads_enc_d1 else num_heads
        self.num_heads_dec = num_heads_dec if num_heads_dec else num_heads
        self.num_layers_enc_d0 = num_layers_enc_d0 if num_layers_enc_d0 else num_layers // 2
        self.num_layers_enc_d1 = num_layers_enc_d1 if num_layers_enc_d1 else num_layers // 2
        self.num_layers_dec = num_layers_dec if num_layers_dec else num_layers
        self.warmup_steps = warmup_steps
        if self.d_embed_enc % self.num_heads_enc_d0 != 0:
            sys.exit(
                f"Encoder embedding dimension {self.d_embed_enc} is "
                "not divisible by the number of attention heads"
                f"{self.num_heads_enc_d0}"
            )
        if self.d_embed_enc % self.num_heads_enc_d1 != 0:
            sys.exit(
                f"Encoder embedding dimension {self.d_embed_enc} is "
                "not divisible by the number of attention heads"
                f"{self.num_heads_enc_d1}"
            )
        if self.d_embed_dec % self.num_heads_dec != 0:
            sys.exit(
                (
                    f"Decoder embedding dimension {self.d_embed_dec} is "
                    "not divisible by the number of attention heads "
                    f"{self.num_heads_dec}"
                )
            )
        super().__init__(name=name, **kwargs)

    @property
    def attn_model(self):
        if not self._attn_model:
            self._attn_model = self.init_model(training=False, attn_weights=True)
            logger.info("Created attention model")
            checkpoint = tf.train.latest_checkpoint(self.local_path(self.name))
            if checkpoint:
                logger.info("Found checkpoint %s", checkpoint)
                self._attn_model.load_weights(checkpoint).expect_partial()
                logger.info("Loaded weights from checkpoint")
        return self._attn_model

    def attn_weights(self, spec, training: bool = False):
        if not self.input_encoder.encode(spec):
            logger.info("Econding error: %s", self.input_encoder.error)
            return None
        formula_tensor, pos_enc_tensor = self.input_encoder.tensor
        # pylint: disable=E1102
        preds, _, enc_attn_local, enc_attn_global, dec_attn = self.attn_model(
            (tf.expand_dims(formula_tensor, axis=0), tf.expand_dims(pos_enc_tensor, axis=0)),
            training=training,
        )
        results = []
        attention_dict_list = []

        if self.fix_d1_embed:

            tokens = []
            for a in spec.assumptions:
                tokens.append(a)
            for g in spec.guarantees:
                tokens.append(g)
            token_ids = list(range(len(tokens)))

        else:

            tokens = [
                t
                for local_tokens in self.input_encoder.property_padded_tokens
                for t in local_tokens
            ]
            token_ids = []

            for i, t in enumerate(tokens):
                if t != "<p>":
                    token_ids.append(i)

        for head in range(0, self.num_heads_enc_d0):
            layerdict = {}
            for layer in range(1, self.num_layers_enc_d0 + 1):
                playerdict = {}
                for player_new, player in enumerate(token_ids):
                    attended_player_dict = {}
                    for player_attended_new, player_attended in enumerate(token_ids):
                        att = enc_attn_global[f"layer_{layer}"]["self_attn"][0][head][player][
                            player_attended
                        ].numpy()
                        attended_player_dict[player_attended_new] = str(att)
                    playerdict[player_new] = attended_player_dict
                layerdict[layer] = playerdict
            attention_dict_list.append(layerdict)

        for beam in preds[0]:
            if not self.target_encoder.decode(np.array(beam)):
                logger.info("Decoding error: %s", self.target_encoder.error)
                # return None
            beam_result = {}
            beam_result["circuit"] = self.target_encoder.circuit
            results.append(beam_result)
        return (attention_dict_list, self.num_layers_enc_d0, [tokens[i] for i in token_ids])

    @property
    def init_input_encoder(self):
        return LTLSpecPropertyEncoder(
            property_pad=self.property_tree_size,
            num_properties=self.num_properties,
            notation=ExprNotation.INFIX,
            encoded_notation=ExprNotation.PREFIX,
            eos=False,
            tpe_format=TPEFormat.BRANCHDOWN,
            tpe_pad=self.d_embed_enc,
        )

    @property
    def init_learning_rate(self):
        if self.constant_learning_rate:
            return self.constant_learning_rate
        return lr_schedules.TransformerSchedule(self.d_embed_enc, warmup_steps=self.warmup_steps)

    @property
    def init_optimizer(self):
        return tf.keras.optimizers.Adam(
            learning_rate=self.learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9
        )

    def __create_hyperparams(self):
        args = self.args
        params = {}
        params_sep_local = {}

        params["d_embed_enc"] = args["d_embed_enc"]
        params["d_ff_enc"] = args["d_ff_enc_d0"]
        params["dropout_enc"] = args["dropout_enc"]
        params["ff_activation_enc"] = args["ff_activation_enc_d0"]
        params["num_heads_enc"] = args["num_heads_enc_d0"]
        params["num_layers_enc"] = args["num_layers_enc_d0"]
        params["input_pad_id"] = self.input_pad_id
        params["alpha"] = args["alpha"]
        params["beam_size"] = args["beam_size"]
        params["d_embed_dec"] = args["d_embed_dec"]
        params["d_ff_dec"] = args["d_ff_dec"]
        params["dropout_dec"] = args["dropout_dec"]
        params["max_decode_length"] = self.max_target_length
        params["dtype_float"] = args["dtype_float"]
        params["dtype_int"] = args["dtype_int"]
        params["num_heads_dec"] = args["num_heads_dec"]
        params["ff_activation_dec"] = args["ff_activation_dec"]
        params["num_layers_dec"] = args["num_layers_dec"]
        params["target_pad_id"] = self.input_pad_id
        params["target_eos_id"] = self.target_eos_id
        params["target_start_id"] = self.target_start_id
        params["target_vocab_size"] = self.target_vocab_size
        params["fix_d1_embed"] = self.fix_d1_embed
        params["drop_batch_remainder"] = args["drop_batch_remainder"]

        params_sep_local["input_vocab_size"] = self.input_vocab_size
        params_sep_local["d_ff_enc"] = args["d_ff_enc_d1"]
        params_sep_local["ff_activation_enc"] = args["ff_activation_enc_d1"]
        params_sep_local["num_heads_enc"] = args["num_heads_enc_d1"]
        params_sep_local["num_layers_enc"] = args["num_layers_enc_d1"]
        params_sep_local["input_dimensions"] = (self.num_properties, self.property_tree_size)

        params_sep_local2 = dict(params_sep_local)
        params_sep_local2["input_dimensions"] = (1, self.max_target_length)
        params_sep_local2["input_vocab_size"] = self.target_vocab_size

        if self.ttot_learning:
            params["params_sep_local"] = [params_sep_local, params_sep_local2]
        else:
            params["params_sep_local"] = [params_sep_local]
        return params

    def init_model(self, training: bool = True, attn_weights: bool = False):
        params = self.__create_hyperparams()
        return models.separated_hierarchical_transformer.create_model(
            params, training=training, attn_weights=attn_weights
        )

    def prepare_tf_dataset(self, tf_dataset):
        # Keras needs target as input for training for transformers
        # Wrap single input in ragged tensor for sep hierarch transformer
        # TODO an input tensor never should be empty. If so (i.e. no assumptions), a dummy input should be used

        def shape_dataset(input_tensor, target_tensor):
            ltl_tensor, tpe_tensor = input_tensor
            target = tf.expand_dims(target_tensor, axis=0)
            lpe_tensor = pe.positional_encoding(self.max_target_length, self.d_embed_enc)
            if self.ttot_learning:
                return (
                    (
                        tf.ragged.stack([ltl_tensor, target]),
                        tf.ragged.stack([tf.cast(tpe_tensor, dtype=self.dtype_float), lpe_tensor]),
                        target_tensor,
                    ),
                    target_tensor,
                )
            else:
                return (
                    (tf.ragged.stack([ltl_tensor]), tf.ragged.stack([tpe_tensor]), target_tensor),
                    target_tensor,
                )

        return tf_dataset.map(shape_dataset)

    @classmethod
    def add_init_args(cls, parser):
        super().add_init_args(parser)
        defaults = cls.get_default_args()
        parser.add_argument(
            "--constant-learning-rate", type=float, default=defaults["constant_learning_rate"]
        )
        parser.add_argument("--d-embed", type=int, default=defaults["d_embed"])
        parser.add_argument("--d-embed-enc", type=int, default=defaults["d_embed_enc"])
        parser.add_argument("--d-embed-dec", type=int, default=defaults["d_embed_dec"])
        parser.add_argument("--d-ff", type=int, default=defaults["d_ff"])
        parser.add_argument("--d-ff-enc-d0", type=int, default=defaults["d_ff_enc_d0"])
        parser.add_argument("--d-ff-enc-d1", type=int, default=defaults["d_ff_enc_d1"])
        parser.add_argument("--d-ff-dec", type=int, default=defaults["d_ff_dec"])
        parser.add_argument("--dropout", type=float, default=defaults["dropout"])
        parser.add_argument("--dropout-enc", type=float, default=defaults["dropout_enc"])
        parser.add_argument("--dropout-dec", type=float, default=defaults["dropout_dec"])
        parser.add_argument(
            "--ff-activation-enc-d0", type=str, default=defaults["ff_activation_enc_d0"]
        )
        parser.add_argument(
            "--ff-activation-enc-d1", type=str, default=defaults["ff_activation_enc_d1"]
        )
        parser.add_argument("--ff-activation-dec", type=str, default=defaults["ff_activation_dec"])
        parser.add_argument("--fix-d1-embed", action="store_true")
        parser.add_argument("--num-heads", type=int, default=defaults["num_heads"])
        parser.add_argument("--num-heads-enc-d0", type=int, default=defaults["num_heads_enc_d0"])
        parser.add_argument("--num-heads-enc-d1", type=int, default=defaults["num_heads_enc_d1"])
        parser.add_argument("--num-heads-dec", type=int, default=defaults["num_heads_dec"])
        parser.add_argument("--num-layers", type=int, default=defaults["num_layers"])
        parser.add_argument("--num-layers-enc-d0", type=int, default=defaults["num_layers_enc_d0"])
        parser.add_argument("--num-layers-enc-d1", type=int, default=defaults["num_layers_enc_d1"])
        parser.add_argument("--num-layers-dec", type=int, default=defaults["num_layers_dec"])
        parser.add_argument("--warmup-steps", type=int, default=defaults["warmup_steps"])
        parser.add_argument("--ttot-learning", action="store_true", dest="ttot_learning")

    @classmethod
    def add_tune_args(cls, parser):
        parser.add_argument("--d-embed", nargs="*", default=[64, 256])
        parser.add_argument("--d-ff", nargs="*", default=[64, 256, 1024])
        parser.add_argument("-n", "--name", default="ht-tune")
        parser.add_argument("--num-layers", nargs="*", default=[4, 8])
        parser.add_argument("--num-heads", nargs="*", default=[4, 8])


if __name__ == "__main__":
    LTLSynSepHierTransformerExperiment.cli()
