from typing import Optional, Union
import numpy as np
import json
import os

import mindspore as ms
from mindspore import nn
from mindspore.ops import functional as F
from mindspore.common.initializer import Normal, initializer
from mindspore import Parameter, Tensor
import mindspore.ops as ops
from mindspore.ops import operations as P

from mindformers import BertModel, BertConfig, BertTokenizer
from mindformers.models.base_model import BaseModel
from mindspore.train.serialization import load_checkpoint, save_checkpoint, load_param_into_net
from mindformers.models.base_config import BaseConfig

from .vit_3d import ViT3DModel, ViT3DConfig

class TextConfig(BaseConfig):
    def __init__(
        self,
        max_length = 128,
        bert_path = '',
        **kwargs
    ):
        super().__init__(**kwargs)
        self.max_length = max_length
        self.bert_path = bert_path

class ClipConfig(BaseConfig):
    def __init__(
        self,
        vision_config: Optional[ViT3DConfig] = ViT3DConfig(),
        text_config: Optional[TextConfig] = TextConfig(),
        logit_scale_init_value: Optional[float] = 2.6592,
        projection_dim: Optional[int] = 512,
        dtype: Optional[str] = "float16",
        **kwargs
    ):
        super().__init__(**kwargs)
        self.text_config = text_config
        self.vision_config = vision_config
        self.projection_dim = projection_dim
        self.logit_scale_init_value = logit_scale_init_value
        self.dtype = dtype


class LayerNorm(nn.LayerNorm):
    r"""Implementation That Supports Fp16 Inputs But Fp32 Gains Biases.

    Args:
        x (ms.Tensor): Input tensor.
            The detailed function could refer to mindspore.nn.LayerNorm.

    Return:
        y (ms.Tensor): Normalized tensor.
    """
    def construct(self, x: ms.Tensor):
        """construct"""
        y = super().construct(P.Cast()(x, ms.float32))
        y = P.Cast()(y, x.dtype)
        return y



class BertEncoder(BaseModel):
    def __init__(self, config):
        super().__init__(config)
        self.config = config
        
        cfg_original = json.load(open(os.path.join(config.bert_path, 'config.json'), 'r'))
        self.bert_config = BertConfig(dtype="float16", seq_length=config.max_length, **cfg_original)
        ckpt = load_checkpoint(os.path.join(config.bert_path, 'ms.ckpt'))

        self.bert = BertModel(self.bert_config)
        load_param_into_net(self.bert, ckpt)
        self.hidden_size = self.bert_config.hidden_size
        self.ln_final = LayerNorm([self.hidden_size])
    
    def get_first_output(self, outputs):
        batch_size = P.Shape()(outputs)[0]
        sequence_slice = self.bert.slice(outputs,
                                    (0, 0, 0),
                                    (batch_size, 1, self.hidden_size),
                                    (1, 1, 1))
        first_token = self.bert.squeeze_1(sequence_slice)
        return first_token

    def construct(self, inputs):
        inputs = [i.astype(ms.int32) for i in inputs]
        outputs = self.bert(*inputs)
        first_token = self.get_first_output(outputs[0])
        return self.ln_final(first_token)


class CLIPModel(BaseModel):
    def __init__(self, config: ClipConfig):
        super(CLIPModel, self).__init__(config)
        self.dtype = self.get_dtype(config.dtype)
        self.cross_entropy = nn.SoftmaxCrossEntropyWithLogits(reduction="mean", sparse=True)

        self.visual_encoder = ViT3DModel(config.vision_config)
        self.text_encoder = BertEncoder(config.text_config)
        
        self.visual_projection = Parameter(initializer(
            Normal(mean=0.0, sigma=config.vision_config.embed_dim ** -0.5),
            [config.vision_config.embed_dim , config.projection_dim], ms.float32))
    
        self.text_projection = Parameter(initializer(
            Normal(mean=0.0, sigma=self.text_encoder.hidden_size ** -0.5),
            [self.text_encoder.hidden_size, config.projection_dim], ms.float32))
        
        self.logit_scale = Parameter(Tensor(np.log(1 / 0.07)).astype(ms.float32))
        self.exp = ops.Exp()
        self.reshape = ops.Reshape()
        self.allgather = ops.AllGather()

    def get_dtype(self, dtype: str):
        """Get_dtype"""
        if dtype == "float16":
            return ms.float16
        if dtype == "float32":
            return ms.float32
        raise TypeError("unsupported data type.")

    def gather_features(self, features):
        target_shape = (-1,) + features.shape[1:]
        all_feature = self.allgather(features)
        all_feature = all_feature.reshape(target_shape)
        return all_feature
        
    def construct(self, image, input_ids, attention_mask, token_type_ids):
        text = (input_ids, attention_mask, token_type_ids)
        image_features = self.get_image_features(image)
        text_features = self.get_text_features(text)

        image_features = image_features / image_features.norm(1, keep_dims=True)
        text_features = text_features / text_features.norm(1, keep_dims=True)
        image_features = self.gather_features(image_features)
        text_features = self.gather_features(text_features)

        logit_scale = self.exp(self.logit_scale)

        if not self.training:
            logits_per_image = ops.matmul(logit_scale * image_features, text_features.T)
            logits_per_text = logits_per_image.T
            return logits_per_image, logits_per_text

        logits = ops.matmul(logit_scale * image_features, text_features.T)
        batch_size, _ = F.shape(logits)

        labels = ms.Tensor(np.arange(batch_size))

        images_loss = self.cross_entropy(logits, labels)
        texts_loss = self.cross_entropy(logits.T, labels)
        loss = (images_loss + texts_loss) / 2
        return loss


    def get_image_features(self, image: ms.Tensor):
        # image = image.astype(self.dtype)
        return ops.matmul(self.visual_encoder(image), self.visual_projection)

    def get_text_features(self, text):
        # text = text.astype(self.dtype)
        return ops.matmul(self.text_encoder(text), self.text_projection)
