# Basic utils for molct.

import os
import jax
import jax.numpy as jnp
import numpy as np
import haiku as hk

from jax import Array
from typing import Optional, Union, List
from jax.nn.initializers import lecun_normal, xavier_uniform
from .prng import SafeKey
from .activation import get_activation

__all__ = [
    "softmax_with_mask",
    "projection",
    "lecun_normal_init",
    "xavier_uniform_init",
    "PositonalEmbedding",
    "MultiheadAttention",
    "HyperformerPairBlock",
    "OuterProduct",
    "Transition",
]

def softmax_with_mask(x: jnp.ndarray, 
                      mask: jnp.ndarray, 
                      epsilon: float = -5.0e4) -> jnp.ndarray:

    x = jnp.where(mask, x, epsilon * jnp.ones_like(x))
    return jax.nn.softmax(x, axis=-1)

def projection(act, weights, bias):
    r"""Projection in Outerproduct.
    Args:
        act: (A, F)
    """
    return jnp.matmul(act, weights) + bias

def lecun_normal_init(shape, dtype):
    safe_key = hk.next_rng_key()
    safe_key = SafeKey(safe_key)
    return lecun_normal()(safe_key.get(), shape, dtype)

def xaiver_uniform_init(shape, dtype):
    safe_key = hk.next_rng_key()
    safe_key = SafeKey(safe_key)
    return xavier_uniform()(safe_key.get(), shape, dtype)

class PositionalEmbedding(hk.Module):

    def __init__(self, 
                 dim_feature: int,
                 fp_type = jnp.float32,
                 use_public_layer_norm: bool = True,
                 name: str = "positional_embedding",
                 ):
        super().__init__(name=name)

        self.dim_feature = dim_feature
        self.use_public_layer_norm = use_public_layer_norm
        
    def __call__(self, 
                 x_i: jnp.ndarray,
                 g_ij: jnp.ndarray,
                 time: float = 0.0,
                 ):
        
        # Initializing params
        if self.use_public_layer_norm:
            norm_fn = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name="norm") # type: ignore
            x_norm_fn = norm_fn
            g_norm_fn = norm_fn
        else:
            x_norm_fn = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name="x_norm") # type: ignore
            g_norm_fn = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name="g_norm") # type: ignore
        
        q_gen = hk.Linear(output_size=self.dim_feature, with_bias=False, name="q_gen") # type: ignore
        k_gen = hk.Linear(output_size=self.dim_feature, with_bias=False, name="k_gen") # type: ignore
        v_gen = hk.Linear(output_size=self.dim_feature, with_bias=False, name="v_gen") # type: ignore

        # Do caculation
        # (A, A, F) -> (A, F)
        g_ii = jnp.diagonal(g_ij, axis1=0, axis2=1).swapaxes(0, 1)
        # (A, F) * (A, F) -> (A, F)
        xg_i = x_i * g_ii
        # (A, F) -> (A, 1, F)
        xg_ii = jnp.expand_dims(xg_i, axis=-2)

        # (A, A, F) * (1, A, F) -> (A, A, F)
        xg_ij = jnp.expand_dims(x_i, axis=0) * g_ij

        # (A, 1, F)
        xg_ii = x_norm_fn(xg_ii + time)
        # (A, A, F)
        xg_ij = g_norm_fn(xg_ij + time)
        
        # (A, 1, F) -> (A, 1, F)
        q = q_gen(xg_ii)
        # (A, A, F) -> (A, A, F)
        k = k_gen(xg_ij)
        # (A, A, F) -> (A, A, F)
        v = v_gen(xg_ij)

        return q, k, v

class MultiheadAttention(hk.Module):

    def __init__(self,
                 dim_feature: int,
                 n_heads: int = 1,
                 name: str = "multihead_attention",
                 ):
        super().__init__(name=name)

        self.dim_feature = dim_feature
        self.n_heads = n_heads

        if self.dim_feature % self.n_heads != 0:
            raise ValueError("[utils/base/MultiheadAttention]: Dim_feature must be divisible by n_heads.")
        else:
            self.dim_head = self.dim_feature // self.n_heads
        self.reshape_tail = (self.n_heads, self.dim_head)
    
    def __call__(self,
                 q_vec: jnp.ndarray,
                 k_mat: jnp.ndarray,
                 v_mat: jnp.ndarray,
                 mask: jnp.ndarray,
                 cutoff: jnp.ndarray,
                 ):
        
        # Initializing params
        _A, _, _F = q_vec.shape
        linear_fn = hk.Linear(output_size=self.dim_feature,
                              with_bias=False,
                              name='linear_output') # type: ignore
        
        # Do caculation
        # (A, 1, F) -> (A, 1, h, f) -> (A, h, 1, f)
        q_ = jnp.reshape(q_vec, q_vec.shape[:-1] + (self.n_heads, self.dim_head)).swapaxes(-2, -3)
        # (A, A, F) -> (A, A, h, f) -> (A, h, A, f)
        k_ = jnp.reshape(k_mat, k_mat.shape[:-1] + (self.n_heads, self.dim_head)).swapaxes(-2, -3)
        v_ = jnp.reshape(v_mat, v_mat.shape[:-1] + (self.n_heads, self.dim_head)).swapaxes(-2, -3)

        # (A, h, 1, f) @ (A, h, A, f)^T -> (A, h, 1, A)
        att_probs = jnp.einsum("...ij,...kj", q_, k_)
        att_probs = att_probs / jnp.sqrt(self.dim_head)
        att_probs = softmax_with_mask(att_probs, jnp.expand_dims(jnp.expand_dims(mask, -2), -2))
        att_probs = att_probs * jnp.expand_dims(jnp.expand_dims(cutoff, -2), -2)
        
        # (A, h, 1, A) @ (A, h, A, f) -> (A, h, 1, f)
        att_vec = jnp.einsum("...ij,...jk", att_probs, v_)
        # (A, h, 1, f) -> (A, 1, h, f) -> (A, 1, F)
        att_vec = att_vec.swapaxes(-2, -3).reshape(q_vec.shape)
        att_vec = linear_fn(att_vec)
        
        return att_vec

class HyperformerPairBlock(hk.Module):

    def __init__(self,
                 dim_feature: int,
                 dim_outerproduct: int,
                 num_transition: int,
                 fp_type = jnp.float32,
                 name: str = "hyperformer_pair_block",
                 ):
        super().__init__(name=name)

        self.dim_feature = dim_feature
        self.dim_outerproduct = dim_outerproduct
        self.num_transition = num_transition
        self.fp_type = fp_type
        # self.outer_product = OuterProduct(dim_feature, dim_outerproduct, name="outer_product")
        # self.transition = Transition(dim_feature, num_transition, name="transition")
    
    def __call__(self, 
                 node_vec: jnp.ndarray, 
                 edge_vec: jnp.ndarray, 
                 node_mask: jnp.ndarray, 
                 edge_mask: Optional[jnp.ndarray]):

        # Initializing params
        outer_product = OuterProduct(self.dim_feature, self.dim_outerproduct, fp_type=self.fp_type, name="outer_product") # type: ignore
        transition = Transition(self.dim_feature, self.num_transition, name="transition") # type: ignore

        edge_vec = jnp.add(edge_vec, outer_product(node_vec, node_mask))
        # print("[from jax] edge_vec:\n", edge_vec)
        edge_vec = jnp.add(edge_vec, transition(edge_vec))

        return edge_vec

class OuterProduct(hk.Module):

    def __init__(self,
                 dim_feature: int,
                 dim_outerproduct: int,
                 key: Optional[SafeKey] = None,
                 fp_type = jnp.float32,
                 name: str = "outer_product",):
        super().__init__(name=name)

        self.fp_type = fp_type
        self.dim_feature = dim_feature
        self.dim_outerproduct = dim_outerproduct

        if key is None:
            safe_key = SafeKey(hk.next_rng_key())
        else:
            safe_key = key
        
        # safe_key, *sub_keys = safe_key.split(20)
        # sub_keys = iter(sub_keys)

        self.left_weights = hk.get_parameter(name="left_weights",
                                             shape=[self.dim_feature, self.dim_outerproduct], 
                                             dtype=self.fp_type, 
                                             init=lecun_normal_init) # type: ignore
        self.left_bias = hk.get_parameter(name="left_bias",
                                          shape=[self.dim_outerproduct,],
                                          dtype=self.fp_type,
                                          init=jnp.zeros) # type: ignore
        self.right_weights = hk.get_parameter(name="right_weights",
                                              shape=[self.dim_feature, self.dim_outerproduct],
                                              dtype=self.fp_type,
                                              init=lecun_normal_init) # type: ignore
        self.right_bias = hk.get_parameter(name="right_bias",
                                           shape=[self.dim_outerproduct,],
                                           dtype=self.fp_type,
                                           init=jnp.zeros) # type: ignore
        self.output_weights = hk.get_parameter(name="output_weights",
                                               shape=[self.dim_outerproduct * self.dim_outerproduct, self.dim_feature],
                                               dtype=self.fp_type,
                                               init=lecun_normal_init) # type: ignore
        self.output_bias = hk.get_parameter(name="output_bias",
                                            shape=[self.dim_feature,],
                                            dtype=self.fp_type,
                                            init=jnp.zeros) # type: ignore
        
        # norm function default init: scale=1.0, offset=0.0.
        self.norm_fn = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name="norm_fn") # type: ignore
    
   
    def __call__(self, node_vec: jnp.ndarray, node_mask: jnp.ndarray):

        
        # left_projection = hk.Linear(output_size=self.dim_outerproduct, 
        #                             with_bias=True, 
        #                             name="left_projection", 
        #                             w_init=lecun_normal(),
        #                             b_init=zeros()) # type: ignore
        # right_projection = hk.Linear(output_size=self.dim_outerproduct,
        #                              with_bias=True,
        #                              name="right_projection",
        #                              w_init=lecun_normal(),
        #                              b_init=zeros()) # type: ignore
        # linear_output = hk.Linear(output_size=self.dim_feature,
        #                           with_bias=True,
        #                           name="linear_output",
        #                           w_init=lecun_normal(),
        #                           b_init=zeros()) # type: ignore
        
        # Do caculation
        _A = node_vec.shape[0]
        _C = self.dim_outerproduct
        act = self.norm_fn(node_vec)

        # (A, F) -> (A, C) -> (C, A) -> (C * A) -> (C * A, 1)
        left_act = projection(act, self.left_weights, self.left_bias)
        left_act = left_act * jnp.expand_dims(node_mask, axis=-1)
        left_act = jnp.transpose(left_act).reshape(-1)
        left_act = jnp.expand_dims(left_act, axis=-1)

        # (A, F) -> (A, C) -> (A * C) -> (1, A * C)
        right_act = projection(act, self.right_weights, self.right_bias)
        right_act = right_act * jnp.expand_dims(node_mask, axis=-1)
        right_act = jnp.expand_dims(right_act.reshape(-1), axis=0)

        # (C * A, 1) @ (1, A * C) -> (A * C, A * C)
        out_act = jnp.matmul(left_act, right_act)

        # (C * A, A * C) -> (C, A, A, C) -> (A, A, C, C) -> (A, A, C * C)
        out_act = out_act.reshape((_C, _A, _A, _C))
        out_act = jnp.transpose(out_act, axes=(1, 2, 0, 3))
        out_act = out_act.reshape((_A, _A, _C * _C))

        # (A, A, C * C) -> (A, A, F)
        out_act = projection(out_act, self.output_weights, self.output_bias)

        return out_act

class Transition(hk.Module):

    def __init__(self,
                 dim_feature: int,
                 n_transition: int,
                 key: Optional[SafeKey] = None,
                 act_fn: str = "relu",
                 name: str = "transition"):
        super().__init__(name=name)

        self.dim_feature = dim_feature
        self.dim_transition = int(dim_feature * n_transition)

        self.norm_fn = hk.LayerNorm(axis=-1, 
                                    create_scale=True, 
                                    create_offset=True, 
                                    name="norm") # type: ignore
        self.transition_1 = hk.Linear(output_size=self.dim_transition,
                                      with_bias=True,
                                      name="transition_1",
                                      w_init=lecun_normal_init,
                                      b_init=jnp.zeros,) # type: ignore
        self.transition_2 = hk.Linear(output_size=self.dim_feature,
                                      with_bias=True,
                                      name="transition_2",
                                      w_init=lecun_normal_init,
                                      b_init=jnp.zeros,) # type: ignore
        self.act_fn = get_activation(act_fn)
    
    def __call__(self,
                 edge_vec: jnp.ndarray,
                 ):
       
        # Do caculation
        # (A, A, F) -> (A, A, F * T)
        act = self.norm_fn(edge_vec)
        act = self.transition_1(act)
        # Activation function
        act = self.act_fn(act)
        # (A, A, F * T) -> (A, A, F)
        act = self.transition_2(act)

        return act

# Test the modules
if __name__ == "__main__":
    
    pass