from functools import partial

import mindspore
from mindspore import set_context
# from mindspore.ops import Depend, clip_by_global_norm, value_and_grad
import xt.model.impala.vtrace_ms as vtrace
from zeus.common.util.register import Registers
from xt.model import XTModel
from xt.model.model_ms import XTModel_MS
from xt.model.impala.default_config import GAMMA, LR
from xt.model.ms_compat import (
    Cell,
    DTYPE_MAP,
    Adam,
    Conv2d,
    Tensor,
    Flatten,
    WithLossCell,
    TrainOneStepCell,
    ms
)
import mindspore.ops as ops
import mindspore.nn as nn
import numpy as np
from xt.model.ms_utils import MSVariables
from xt.model.atari_model import get_atari_filter
from xt.model.model_utils_ms import state_transform_ms, custom_norm_initializer_ms
from zeus.common.util.common import import_config
from absl import logging
set_context(mode=ms.GRAPH_MODE)




@Registers.model
class ImpalaCnnOptMSY(XTModel_MS):

    """Docstring for ActorNetwork."""
    def __init__(self, model_info):
        model_config = model_info.get("model_config", dict())
        import_config(globals(), model_config)
        self.dtype = DTYPE_MAP.get(model_info.get("default_dtype", "float32"))
        self.input_dtype = model_info.get("input_dtype", "float32")
        self.sta_mean = model_info.get("state_mean", 0.)
        self.sta_std = model_info.get("state_std", 255.)
        # functools.partial用于部分应用一个函数，它基于一个函数创建一个可调用对象，把原函数的某些参数固定，调用时只需要传递未固定的参数即可。
        self._transform = partial(state_transform_ms,
                                  mean=self.sta_mean,
                                  std=self.sta_std,
                                  input_dtype=self.input_dtype)
        self.state_dim = model_info["state_dim"]
        self.action_dim = model_info["action_dim"]
        self.filter_arch = get_atari_filter(self.state_dim)

        # lr schedule with linear_cosine_decay
        self.lr_schedule = model_config.get("lr_schedule", None)
        self.opt_type = model_config.get("opt_type", "adam")
        self.lr = None

        self.ph_state = None
        self.ph_adv = None
        self.out_actions = None
        self.pi_logic_outs, self.baseline = None, None

        # placeholder for behavior policy logic outputs
        self.ph_bp_logic_outs = None
        self.ph_actions = None
        self.ph_dones = None
        self.ph_rewards = None
        self.loss, self.optimizer, self.train_op = None, None, None

        self.grad_norm_clip = model_config.get("grad_norm_clip", 40.0)
        self.sample_batch_steps = model_config.get("sample_batch_step", 50)

        self.saver = None
        self.explore_paras = None
        self.actor_var = None  # store weights for agent
        super().__init__(model_info)
        # todo ImpalaPredictPolicy
        # self.predict_net = self.ImpalaPredictPolicy(self.model)
        # adam = Adam(params=self.predict_net.trainable_params(), learning_rate=LR)
        # # todo WithLossCell
        # loss_fn = WithLossCell(self.loss)
        # # todo NetWithLoss
        # # forward_fn = NetWithLoss()
        # batch_step = self.sample_batch_steps
        # # todo MyTrainOneStepCell
        # self.train_net = TrainOneStepCell(network=self.predict_net, optimizer=adam, sens=1.0)
        # # todo set_train
        # self.train_net.set_train()

    def create_model(self, model_info):
        state_input = state_transform_ms(x=self.ph_state)

        last_layer = state_input

        for (out_size, kernel, stride) in self.filter_arch[:-1]:
            last_layer = ops.Conv2D(
                out_channels=out_size,
                kernel_size=(kernel, kernel),
                stride=(stride, stride),
                pad_mode="same",
            )(last_layer)
            last_layer = ops.ReLU()(last_layer)

        # last convolution
        (out_size, kernel, stride) = self.filter_arch[-1]
        convolution_layer = ops.Conv2D(
            out_channels=out_size,
            kernel_size=(kernel, kernel),
            stride=(stride, stride),
            pad_mode="valid",
        )(last_layer)
        convolution_layer = ops.ReLU()(last_layer)

        self.pi_logic_outs = ops.squeeze(
            ops.Conv2D(
                out_channels=self.action_dim,
                kernel_size=(1, 1),
                pad_mode="same")(convolution_layer),
            axis=(1, 2),
        )
        baseline_flat = nn.Flatten()(convolution_layer)
        self.baseline = ops.squeeze(
            nn.Dense(
                in_channels=baseline_flat,
                out_channels=1,
                activation=None,
                bias_init=custom_norm_initializer_ms(0.01),
            ),
            1,
        )
        self.out_actions = ops.squeeze(
            ops.multinomial(self.pi_logic_outs, num_samples=1),
            1,
        )
        # create learner
        # Split the tensor into batches at known episode cut boundaries.
        # [batch_count * batch_step] -> [batch_step, batch_count]
        batch_step = self.sample_batch_steps

        def split_batches(tensor, drop_last=False):
            batch_count = ops.Shape(tensor)[0] // batch_step
            reshape_tensor = ops.Reshape()(
                tensor,
                ops.concat((ms.Tensor([batch_count, batch_step]), ms.Tensor(ops.shape(tensor)[1:])), axis=0),
            )

            # swap B and T axes
            s1 = ms.Tensor(ops.shape(tensor))
            res = ops.Transpose()(
                reshape_tensor,
                (1, 0) + tuple(range(2, 1 + int(ops.shape(s1)[0]))),
            )

            if drop_last:
                return res[:-1]
            return res

        self.loss = vtrace_loss(
            bp_logic_outs=split_batches(self.ph_bp_logic_outs, drop_last=True),
            tp_logic_outs=split_batches(self.pi_logic_outs, drop_last=True),
            actions=split_batches(self.ph_actions, drop_last=True),
            discounts=split_batches(ops.cast(~self.ph_dones, ms.float32) * GAMMA, drop_last=True),
            rewards=split_batches(ops.clip_by_value(self.ph_rewards, Tensor(-1), Tensor(1)), drop_last=True),
            values=split_batches(self.baseline, drop_last=True),
            bootstrap_value=split_batches(self.baseline)[-1],
        )

        global_step = ms.Parameter(Tensor(0, dtype=ms.int32), name="global_step", requires_grad=False)

        # 优化器
        if self.opt_type == "adam":
            if self.lr_schedule:
                learning_rate = self._get_lr(global_step)
            else:
                learning_rate = LR
            optimizer = Adam(learning_rate)
        elif self.opt_type == "rmsprop":
            optimizer = nn.RMSProp(learning_rate=LR, decay=0.99, epsilon=0.1, centered=True)
        else:
            raise KeyError("invalid opt_type: {}".format(self.opt_type))

        grads_and_vars = ops.value_and_grad(self.loss)

        # global norm
        grads, var = zip(*grads_and_vars)
        grads, _ = ms.ops.clip_by_global_norm(grads, self.grad_norm_clip)
        clipped_gvs = list(zip(grads, var))
        # todo train_op
        # self.train_op = ops.grad(clipped_gvs, global_step=global_step)
        self.train_op = optimizer.apply_gradients(clipped_gvs, global_step=global_step)

        # fixme: help to show the learning rate among training processing
        self.lr = optimizer._lr

        self.actor_var = MSVariables(self.out_actions)


        self.explore_paras = last_layer.trainable_params()

        # self.saver = Saver({t.name: t for t in self.explore_paras}, max_to_keep=self.max_to_keep)

        # todo MyTrainOneStepCell
        self.train_net = TrainOneStepCell(network=self.model, optimizer=optimizer)
        # todo set_train
        self.train_net.set_train()

        return True

    def _get_lr(self, global_step, decay_step=20000): # 原为20000.
        """Make decay learning rate."""
        lr_schedule = self.lr_schedule
        if len(lr_schedule) != 2:
            logging.warning("Need 2 elements in lr_schedule!\n, "
                            "likes [[0, 0.01], [20000, 0.000001]]")
            logging.fatal("lr_schedule invalid: {}".format(lr_schedule))

        if lr_schedule[0][0] != 0:
            logging.info("lr_schedule[0][1] could been init learning rate")

        # todo tf.linear_cosine_decay 和 nn.CosineDecayLR 公式不同，怎么把beta转化？
        learning_rate = nn.CosineDecayLR(lr_schedule[0][1], lr_schedule[0][1], decay_step)(global_step)

        return learning_rate

    # todo train
    def train(self, state, label):
        """Train with sess.run."""
        bp_logic_outs, actions, dones, rewards = label
        ph_state = Tensor.from_numpy(state)
        ph_bp_logic_outs = Tensor.from_numpy(bp_logic_outs)
        ph_actions = Tensor.from_numpy(actions)
        ph_dones = Tensor.from_numpy(dones)
        ph_rewards = Tensor.from_numpy(rewards)

        loss = self.train_net(ph_state, ph_bp_logic_outs, ph_actions, ph_dones, ph_rewards).asnumpy()
        return loss

    def predict(self, state):
        """
        Do predict use the newest model.

        :param: state
        :return: action_logits, action_val, value
        """
        state = Tensor.from_numpy(state)
        pi_logic_outs, baseline, out_actions = self.predict_net(state)
        pi_logic_outs = pi_logic_outs.asnumpy()
        baseline = baseline.asnumpy()
        out_actions = out_actions.asnumpy()
        return pi_logic_outs, baseline, out_actions

    # todo save_model
    def save_model(self, file_name):
        """Save model without meta graph."""
        ck_name = self.saver.save(self.sess, save_path=file_name, write_meta_graph=False)
        return ck_name
    # todo load_model
    def load_model(self, model_name, by_name=False):
        """Load model with inference variables."""
        # restore_tf_variable(self.sess, self.explore_paras, model_name)
    # todo set_weights
    def set_weights(self, weights):
        """Set weight with memory tensor."""
        with self.graph.as_default():
            self.actor_var.set_weights(weights)
    # todo get_weightss
    def get_weights(self):
        """Get weights."""
        with self.graph.as_default():
            return self.actor_var.get_weights()





def calc_baseline_loss(advantages):
    """Calculate the baseline loss."""
    op = ms.ops.ReduceSum()
    return 0.5 * op(ms.ops.square(advantages))


def calc_entropy_loss(logic_outs):
    """Calculate entropy loss."""
    op = ms.nn.Softmax()
    pi = op(logic_outs)
    op = ms.nn.LogSoftmax()
    log_pi = op(logic_outs)
    op = ms.ops.ReduceSum()
    entropy_per_step = op(-pi * log_pi, axis=-1)
    return -op(entropy_per_step)


def calc_pi_loss(logic_outs, actions, advantages):
    """Calculate policy gradient loss."""
    op = ms.nn.SoftmaxCrossEntropyWithLogits()
    cross_entropy = op(logits=logic_outs, labels=actions)
    advantages = ms.ops.stop_gradient(advantages)
    pg_loss_per_step = cross_entropy * advantages
    op = ms.ops.ReduceSum()
    return op(pg_loss_per_step)

def vtrace_loss(
        bp_logic_outs, tp_logic_outs, actions,
        discounts, rewards, values, bootstrap_value):

    value_of_state, pg_advantages = vtrace.from_logic_outputs(
        behaviour_policy_logic_outputs=bp_logic_outs,
        target_policy_logic_outputs=tp_logic_outs,
        actions=actions,
        discounts=discounts,
        rewards=rewards,
        values=values,
        bootstrap_value=bootstrap_value,
    )

    pi_loss = calc_pi_loss(tp_logic_outs, actions, pg_advantages)
    val_loss = calc_baseline_loss(value_of_state - values)
    entropy_loss = calc_entropy_loss(tp_logic_outs)

    return pi_loss + 0.5 * val_loss + 0.01 * entropy_loss





# class MyTrainOneStepCell(TrainOneStepCell):
#     def __init__(self, network, optimizer, max_grad_norm, sens=1.0):
#         super(MyTrainOneStepCell, self).__init__(network, optimizer, sens)
#         self.sens = sens
#         self.depend = Depend()
#         self.max_grad_norm = max_grad_norm
#         self.grad_fn = value_and_grad(
#             self.network, grad_position=None, weights=self.weights)
#
#     def construct(self, *inputs):
#         loss, grads = self.grad_fn(*inputs)
#         grads = clip_by_global_norm(grads, self.max_grad_norm)
#         grads = self.grad_reducer(grads)
#         loss = self.depend(loss, self.optimizer(grads))
#         return loss