# Testing positional embedding and multihead attention.
# 23-10-25

import os
import sys
import jax
import jax.numpy as jnp
import numpy as np
import joblib
import haiku as hk
import mindspore as ms

from jax import Array, jit
from typing import Optional, Union, List
from jax.nn.initializers import lecun_normal, normal, zeros, ones
from mindspore import load_param_into_net, Tensor, nn, context

sys.path.append(os.path.dirname(sys.path[0]))

# ## Test for utils
# setting context & hyper parameters
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

# Test modules in utils.base
# Test PositionalEmbedding
from cybertron.utils.base import PositionalEmbedding
from ms_cybertron.base import PositionalEmbedding as ms_PositionalEmbedding

EPSILON = 1e-3
rng = jax.random.PRNGKey(42)
print("=================Test PositionalEmbedding====================")
## create and show data for test
np.random.seed(42)
test_x = np.random.uniform(size=(2, 3, 4)) # (B, A, F)
test_g = np.random.uniform(size=(2, 3, 3, 4)) # (B, A, A, F)
eye = np.eye(3).reshape(1, 3, 3, 1)
eye_mask = np.array(eye, dtype=np.bool_)
test_g = np.where(eye_mask, eye, test_g)
# test_x = np.ones_like((2, 3, 4)) # (B, A, F)
# test_g = np.ones_like((2, 3, 3, 4)) # (B, A, A, F)
print(f"test_x: shape of {test_x.shape}")
print(f"test_g: shape of {test_g.shape}")

def forward_fn(x, g, training=False):
    # x.shape = (A, F); g.shape = (A, A, F)
    embedding = PositionalEmbedding(dim_feature=4)(x, g) # type: ignore
    return embedding
forward_fn = jax.vmap(forward_fn, in_axes=(0, 0))
embedding_fn = hk.transform(forward_fn, apply_rng=True)
params = embedding_fn.init(rng, test_x, test_g)
embedding_tokens = jit(embedding_fn.apply)(params, rng, test_x, test_g)
for _ in embedding_tokens:
    print(f"PositinalEmbedding output: shape of {_.shape}")
q, k, v = embedding_tokens

_params = hk.data_structures.to_mutable_dict(params)
ms_embedding_fn = ms_PositionalEmbedding(dim=4)
ms_params = {}
ms_params['g_norm.gamma'] = ms.Parameter(Tensor(np.array(_params['positional_embedding/norm']['scale']), ms.float32))
ms_params['g_norm.beta'] = ms.Parameter(Tensor(np.array(_params['positional_embedding/norm']['offset']), ms.float32))
ms_params['x2q.weight'] = ms.Parameter(Tensor(np.array(_params['positional_embedding/q_gen']['w']).T, ms.float32))
ms_params['x2k.weight'] = ms.Parameter(Tensor(np.array(_params['positional_embedding/k_gen']['w']).T, ms.float32))
ms_params['x2v.weight'] = ms.Parameter(Tensor(np.array(_params['positional_embedding/v_gen']['w']).T, ms.float32))

load_param_into_net(ms_embedding_fn, ms_params)
ms_test_x = Tensor(np.array(test_x), ms.float32)
ms_test_g = Tensor(np.array(test_g), ms.float32)
ms_q, ms_k, ms_v = ms_embedding_fn(ms_test_x, ms_test_g)

q_same = np.allclose(q, ms_q.asnumpy(), atol=EPSILON)
k_same = np.allclose(k, ms_k.asnumpy(), atol=EPSILON)
v_same = np.allclose(v, ms_v.asnumpy(), atol=EPSILON)

print(f"q is same: {q_same} in accuracy of {EPSILON}")
print(f"k is same: {k_same} in accuracy of {EPSILON}")
print(f"v is same: {v_same} in accuracy of {EPSILON}")

## Test MultiheadAttention
from cybertron.utils.base import MultiheadAttention
from ms_cybertron.base import MultiheadAttention as ms_MultiheadAttention

print("=================Test MultiheadAttention====================")
mask = np.ones((3, 3), dtype=np.bool_)
cutoff = np.ones((3, 3), dtype=np.float32)
print(f"mask: shape of {mask.shape}")
print(f"cutoff: shape of {cutoff.shape}")

def forward_att(q, k, v, mask, cutoff):
    jx_attention = MultiheadAttention(dim_feature=4, n_heads=2)
    return jx_attention(q, k, v, mask, cutoff)
forward_att = jax.vmap(forward_att, in_axes=(0, 0, 0, None, None))
attention_fn = hk.transform(forward_att, apply_rng=True)
params = attention_fn.init(rng, q, k, v, mask, cutoff)
attention_token = jit(attention_fn.apply)(params, rng, q, k, v, mask, cutoff)
print(f"MultiheadAttention output: shape of {attention_token.shape}")

_params = hk.data_structures.to_mutable_dict(params)
ms_attention = ms_MultiheadAttention(dim_feature=4, n_heads=2)
ms_params['output.weight'] = ms.Parameter(Tensor(np.array(_params['multihead_attention/linear_output']['w']).T, ms.float32))
# ms_params['output.bias'] = ms.Parameter(Tensor(np.array(_params['multihead_attention/linear_output']['b']), ms.float32))
load_param_into_net(ms_attention, ms_params)
mask = np.ones((2, 3, 3), dtype=np.bool_)
cutoff = np.ones((2, 3, 3), dtype=np.float32)
ms_attention_token = ms_attention(
    Tensor(np.array(q), ms.float32),
    Tensor(np.array(k), ms.float32),
    Tensor(np.array(v), ms.float32),
    Tensor(np.array(mask), ms.bool_),
    Tensor(np.array(cutoff), ms.float32)
)

attention_same = np.allclose(attention_token, ms_attention_token.asnumpy(), atol=EPSILON)
print(f"attention is same: {attention_same} in accuracy of {EPSILON}")