#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 04:44:14 2023

@author: winson
"""

import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, Parameter
import mindspore.common.initializer as init
from mindspore.ops import composite as C
from mindspore.ops import functional as F
import mindspore.ops as ops
from objective import add_contrastive_loss, add_supervised_loss


class SimpleCNN(nn.Cell):
    """ write our own deep deep 3D CNN that has 2 heads """

    def __init__(self, num_cls=20, in_channels=1, depth=64, height=64, width=64):
        super(SimpleCNN, self).__init__()
        self.num_cls = num_cls
        self.conv3d1 = ops.Conv3D(out_channel=32, kernel_size=3, pad_mode="same")
        self.weight1 = Parameter(init.initializer(init.XavierUniform(), (32, 1, 3, 3, 3)), name='weight1')
        self.bias1 = Parameter(init.initializer('zeros', (32,)), name='bias1')
        self.max_pool3d1 = ops.MaxPool3D(kernel_size=2, strides=2)
        self.batch_norm3d1 = nn.BatchNorm3d(num_features=32)

        self.conv3d2 = ops.Conv3D(out_channel=32, kernel_size=3, pad_mode="same")
        self.weight2 = Parameter(init.initializer(init.XavierUniform(), (32, 32, 3, 3, 3)), name='weight2')
        self.bias2 = Parameter(init.initializer('zeros', (32,)), name='bias2')
        self.max_pool3d2 = ops.MaxPool3D(kernel_size=2, strides=2)
        self.batch_norm3d2 = nn.BatchNorm3d(num_features=32)

        self.conv3d3 = ops.Conv3D(out_channel=32, kernel_size=3, pad_mode="same")
        self.weight3 = Parameter(init.initializer(init.XavierUniform(), (32, 32, 3, 3, 3)), name='weight3')
        self.bias3 = Parameter(init.initializer('zeros', (32,)), name='bias3')
        self.max_pool3d3 = ops.MaxPool3D(kernel_size=2, strides=2)
        self.batch_norm3d3 = nn.BatchNorm3d(num_features=32)

        self.conv3d4 = ops.Conv3D(out_channel=32, kernel_size=3, pad_mode="same")
        self.weight4 = Parameter(init.initializer(init.XavierUniform(), (32, 32, 3, 3, 3)), name='weight4')
        self.bias4 = Parameter(init.initializer('zeros', (32,)), name='bias4')
        self.max_pool3d4 = ops.MaxPool3D(kernel_size=2, strides=2)
        self.batch_norm3d4 = nn.BatchNorm3d(num_features=32)

        pool_d, pool_h, pool_w = int(depth / 2 ** 4), int(height / 2 ** 4), int(width / 2 ** 4)
        print("pool_d:{}, pool_h:{} pool_w:{}".format(pool_d, pool_h, pool_w))
        self.global_avg_pool = ops.AvgPool3D(kernel_size=(pool_d, pool_h, pool_w))
        self.dense1 = nn.Dense(in_channels=32, out_channels=512, activation="relu")
        self.dropout = nn.Dropout(keep_prob=0.7)
        self.dense2 = nn.Dense(in_channels=512, out_channels=num_cls, activation="softmax")
        self.relu = ops.ReLU()
        self.bias_add = ops.BiasAdd()
        self.squeeze = ops.Squeeze(axis=(2, 3, 4))

    def set_strategy(self, device_num):
        conv3d_strategy = ((1, 1, device_num, 1, 1), (1, 1, 1, 1, 1))
        dense_biasadd_strategy = ((1, 1), (1,))
        self.conv3d1.shard(conv3d_strategy)
        maxpool3d_strategy = ((1, 1, 1, 1, 1),)
        self.max_pool3d4.shard(maxpool3d_strategy)
        self.dense2.bias_add.shard(dense_biasadd_strategy)

    def construct(self, image):
        x = self.conv3d1(image, self.weight1)
        x = self.bias_add(x, self.bias1)
        x = self.relu(x)
        x = self.max_pool3d1(x)
        x = self.batch_norm3d1(x)

        x = self.conv3d2(x, self.weight2)
        x = self.bias_add(x, self.bias2)
        x = self.relu(x)
        x = self.max_pool3d2(x)
        x = self.batch_norm3d2(x)

        x = self.conv3d3(x, self.weight3)
        x = self.bias_add(x, self.bias3)
        x = self.relu(x)
        x = self.max_pool3d3(x)
        x = self.batch_norm3d3(x)

        x = self.conv3d4(x, self.weight4)
        x = self.bias_add(x, self.bias4)
        x = self.relu(x)
        x = self.max_pool3d4(x)
        x = self.batch_norm3d4(x)

        x = self.global_avg_pool(x)
        x = self.squeeze(x)
        x = self.dense1(x)
        x = self.dropout(x)
        # print("x:", x)
        outputs = self.dense2(x)
        # print("self.weight:", self.dense2.weight.asnumpy())
        # print("outputs:", outputs)
        return outputs


class SimpleCNNWithLossCell(nn.Cell):
    """"
    Provide SimpleCNNWithLossCell training loss through network.

    Args:
        network (Cell): The training network.

    Returns:
        Tensor, the loss of the network.
    """

    def __init__(self, network, batch_size, supervise_training=True):
        super(SimpleCNNWithLossCell, self).__init__()
        self.simple_cnn_network = network
        self.supervise_training = supervise_training
        self.decay_params = []
        self.l2loss_ops = []
        for v in self.simple_cnn_network.trainable_params():
            # print("v:",v)
            if 'batch_norm3d' not in v.name:
                self.decay_params.append(v)
                strategy = []
                for _ in range(len(v.shape)):
                    strategy.append(1)
                strategy = tuple(strategy)
                strategy = ((strategy),)
                self.l2loss_ops.append(ops.L2Loss().shard(strategy))

        print(self.decay_params)
        self.print = ops.Print()
        labels = ops.zeros((int(batch_size / 2), batch_size), ms.float32)
        print("labels", labels.shape)
        self.loss = Parameter(Tensor(0.0, ms.float32), requires_grad=False, name="loss")
        self.labels_con_v = Parameter(labels, requires_grad=False, name="labels_con")
        logits = ops.zeros((int(batch_size / 2), int(batch_size / 2)), ms.float32)
        self.logits_con_v = Parameter(logits.astype(ms.float32), requires_grad=False, name="logits_con")
        self.con_loss_v = Parameter(Tensor(0.0, ms.float32), requires_grad=False, name="con_loss")
        self.weight_decay_v = Parameter(Tensor(0.0, ms.float32), requires_grad=False, name="weight_decay")

    def construct(self, image, label):
        projection_head_outputs = self.simple_cnn_network(image)
        if self.supervise_training:
            loss = add_supervised_loss(label, projection_head_outputs)
        else:
            loss = None
            if projection_head_outputs is not None:
                con_loss, logits_con, labels_con = add_contrastive_loss(
                    projection_head_outputs,
                    hidden_norm=True,
                    temperature=0.1,
                    strategy=None)
                loss = con_loss if loss is None else loss + con_loss
                loss = F.depend(loss, ops.assign(self.labels_con_v, labels_con))
                loss = F.depend(loss, ops.assign(self.logits_con_v, logits_con))
                loss = F.depend(loss, ops.assign(self.con_loss_v, con_loss))
            l2_losses = []
            for i in range(len(self.decay_params)):
                decay = self.l2loss_ops[i](self.decay_params[i])
                l2_losses.append(decay)
            # l2_losses = [ops.L2Loss()(v) for v in self.simple_cnn_network.trainable_params()
            #              if 'batch_normalization' not in v.name]
            weight_decay = 1e-6 * ops.AddN()(l2_losses)
            loss = F.depend(loss, ops.assign(self.loss, loss))
            loss = F.depend(loss, ops.assign(self.weight_decay_v, weight_decay))
            loss += weight_decay
        return loss
