# coding: UTF-8

# from os import pread

import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.ops.operations as P
from mindspore import Tensor
from mindspore.nn.cell import Cell
import mindspore.ops.functional as F
import mindspore
# class Config(object):

#     """配置参数"""
#     def __init__(self, dataset, embedding):
#         self.model_name = 'DPCNN'
#         self.train_path = dataset + '/data/train.txt'                                # 训练集
#         self.dev_path = dataset + '/data/dev.txt'                                    # 验证集
#         self.test_path = dataset + '/data/test.txt'                                  # 测试集
#         self.class_list = [x.strip() for x in open(
#             dataset + '/data/class.txt', encoding='utf-8').readlines()]              # 类别名单
#         self.vocab_path = dataset + '/data/vocab.pkl'                                # 词表
#         self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'        # 模型训练结果
#         self.log_path = dataset + '/log/' + self.model_name
#         self.embedding_pretrained = ms.Tensor(
#             np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
#             if embedding != 'random' else None                                       # 预训练词向量

#         self.dropout = 0.5                                              # 随机失活
#         self.require_improvement = 1000                                 # 若超过1000batch效果还没提升，则提前结束训练
#         self.num_classes = len(self.class_list)                         # 类别数
#         self.n_vocab = self.embedding_pretrained.shape[0]\
#             if self.embedding_pretrained is not None else 4762           # 字向量维度
#         self.num_epochs = 20                                            # epoch数
#         self.batch_size = 128                                           # mini-batch大小
#         self.pad_size = 32                                              # 每句话处理成的长度(短填长切)
#         self.learning_rate = 1e-3                                       # 学习率
#         self.embed = self.embedding_pretrained.shape[1]\
#             if self.embedding_pretrained is not None else 300           # 字向量维度
#         self.num_filters = 250                                          # 卷积核数量(channels数)


# '''Deep Pyramid Convolutional Neural Networks for Text Categorization'''

class SoftmaxCrossEntropyExpand(Cell):
    r"""
    Computes softmax cross entropy between logits and labels. Implemented by expanded formula.

    This is a wrapper of several functions.

    .. math::
        \ell(x_i, t_i) = -log\left(\frac{\exp(x_{t_i})}{\sum_j \exp(x_j)}\right),
    where :math:`x_i` is a 1D score Tensor, :math:`t_i` is the target class.

    Note:
        When argument sparse is set to True, the format of label is the index
        range from :math:`0` to :math:`C - 1` instead of one-hot vectors.

    Args:
        sparse(bool): Specifies whether labels use sparse format or not. Default: False.

    Inputs:
        - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
        - **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.

    Outputs:
        Tensor, a scalar tensor including the mean loss.

    Examples:
        >>> loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
        >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32)
        >>> label = Tensor(np.ones([64]), dtype=mindspore.int32)
        >>> loss(input_data, label)
    """
    def __init__(self, sparse=False):
        super(SoftmaxCrossEntropyExpand, self).__init__()
        self.exp = P.Exp()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.onehot = P.OneHot()
        self.on_value = Tensor(1.0, mindspore.float32)
        self.off_value = Tensor(0.0, mindspore.float32)
        self.div = P.Div()
        self.log = P.Log()
        self.sum_cross_entropy = P.ReduceSum(keep_dims=False)
        self.mul = P.Mul()
        self.mul2 = P.Mul()
        self.cast = P.Cast()
        self.reduce_mean = P.ReduceMean(keep_dims=False)
        self.sparse = sparse
        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.sub = P.Sub()

    def construct(self, logit, label):
        """
        construct
        """
        logit_max = self.reduce_max(logit, -1)
        exp = self.exp(self.sub(logit, logit_max))
        exp_sum = self.reduce_sum(exp, -1)
        softmax_result = self.div(exp, exp_sum)
        if self.sparse:
            label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)

        softmax_result_log = self.log(softmax_result)
        loss = self.sum_cross_entropy((self.mul(softmax_result_log, label)), -1)
        loss = self.mul2(F.scalar_to_array(-1.0), loss)
        loss = self.reduce_mean(loss, -1)

        return loss

class DPCNN(nn.Cell):
    def __init__(self, vocab_len, word_len, num_classes, vec_length, num_filters, embedding_table='uniform'):
        super(DPCNN, self).__init__()
        self.vec_length = vec_length
        self.word_len = word_len
        self.num_classes = num_classes
        self.num_filters = num_filters
        
        self.unsqueeze = P.ExpandDims()
        self.embedding = nn.Embedding(vocab_len, self.vec_length, embedding_table=embedding_table)
        self.conv_region = nn.Conv2d(1, self.num_filters, (3, self.vec_length), stride=1, pad_mode='valid')
        # print(self.conv_region)
        self.conv = nn.Conv2d(self.num_filters, self.num_filters, (3, 1), stride=1, pad_mode='valid')
        self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
        self.padding1 = nn.Pad(((0, 0), (0, 0), (1, 1), (0, 0)))    # top bottom
        self.padding2 = nn.Pad(((0, 0), (0, 0), (0, 1), (0, 0)))    # bottom
        # self.padding1 = nn.ZeroPad2d((0, 0, 1, 1))  # top bottom
        # self.padding2 = nn.ZeroPad2d((0, 0, 0, 1))  # bottom
        self.relu = nn.ReLU()
        self.fc = nn.Dense(self.num_filters, self.num_classes)

    def construct(self, x):
        x = self.unsqueeze(x, 1)
        # print("before embedding, shape is ", x.shape)
        x = self.embedding(x)
        # print("after embedding, shape is ", x.shape)
        # x = self.unsqueeze(x, 1)   # [batch_size, 250, seq_len, 1]
        x = self.conv_region(x)  # [batch_size, 250, seq_len-3+1, 1]
        # print("after conv_region, shape is ", x.shape)
        x = self.padding1(x)  # [batch_size, 250, seq_len, 1]
        # print("after padding1, shape is ", x.shape)
        x = self.relu(x)
        x = self.conv(x)  # [batch_size, 250, seq_len-3+1, 1]
        # print("after conv1, shape is ", x.shape)
        x = self.padding1(x)  # [batch_size, 250, seq_len, 1]
        # print("after padding12, shape is ", x.shape)
        x = self.relu(x)
        x = self.conv(x)  # [batch_size, 250, seq_len-3+1, 1]
        # print("after conv2, shape is ", x.shape)
        while x.shape[2] > 2:
            x = self._block(x)
        squeezer = ops.Squeeze()
        x = squeezer(x)  # [batch_size, num_filters(250)]
        x = self.fc(x)
        # print("after all, shape is ", x.shape)
        return x

    def _block(self, x):
        x = self.padding2(x)
        px = self.max_pool(x)

        x = self.padding1(px)
        relu = ops.ReLU()
        x = relu(x)
        x = self.conv(x)

        x = self.padding1(x)
        x = relu(x)
        x = self.conv(x)

        # Short Cut
        x = x + px
        return x
