import numpy as np
from mindspore import Tensor
import mindspore.nn as nn
import mindspore as ms
import mindspore.ops as ops
import mindspore
#
#
# batch_np = np.random.randn(1, 250, 32, 1)
# test_np = np.random.randn(1, 32, 1)

class Config(object):
    """配置参数"""
    def __init__(self, dataset):
        self.model_name = 'DPCNN'
        self.train_path = dataset + '/data/eda_train8.txt'                                # 训练集
        self.dev_path = dataset + '/data/test1.txt'                                    # 验证集
        self.test_path = dataset + '/data/test1.txt'                                  # 测试集
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt', encoding='utf-8').readlines()]              # 类别名单
        self.vocab_path = dataset + '/data/vocab.pkl'                                # 词表
        self.dropout = 0.5                                              # 随机失活
        self.require_improvement = 1000                                 # 若超过1000batch效果还没提升，则提前结束训练
        self.num_classes = len(self.class_list)                         # 类别数
        self.n_vocab = 0                                               # 词表大小，在运行时赋值
        self.num_epochs = 30                                            # epoch数
        self.batch_size = 64                                           # mini-batch大小
        self.device = 'CPU'
        self.pad_size = 32                                              # 每句话处理成的长度(短填长切)
        self.learning_rate = 1e-3                                       # 学习率
        self.embedding_pretrained = None
        self.embed = self.embedding_pretrained.size(1)\
            if self.embedding_pretrained is not None else 300           # 字向量维度
        self.num_filters = 250                                          # 卷积核数量(channels数)

class ZerosPadding1(nn.Cell):
    def __init__(self):
        super(ZerosPadding1, self).__init__()
        # self.padding = nn.Pad(paddings=((1, 1), (0, 0)), mode="CONSTANT")
    def construct(self, x):
        B, C, H, W = x.shape
        pad = ops.Zeros()((B, C, 1, W), ms.float32)
        x = ops.Concat(2)((pad, x, pad))
        return x

class ZerosPadding2(nn.Cell):
    def __init__(self):
        super(ZerosPadding2, self).__init__()
    def construct(self, x):
        B, C, H, W = x.shape
        pad = ops.Zeros()((B, C, 1, W), ms.float32)
        x = ops.Concat(2)((x, pad))
        return x


class DPCNN(nn.Cell):
    def __init__( self, config):
        super(DPCNN, self).__init__()
        self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1, embedding_table=mindspore.common.initializer.Normal(sigma=1))
        self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1, pad_mode='pad', padding=0, weight_init='xavier_uniform', has_bias=True)
        self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1, pad_mode='pad', padding=0, weight_init='xavier_uniform', has_bias=True)
        self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
        self.relu = nn.ReLU()
        self.padding1 = ZerosPadding1()
        self.padding2 = ZerosPadding2()
        self.fc = nn.Dense(config.num_filters, config.num_classes, weight_init='xavier_uniform')

    def construct(self, x):
        x = self.embedding(x)
        x = ops.ExpandDims()(x, 1)       # [batch_size, 250, seq_len, 1]
        x = self.conv_region(x)  # [batch_size, 250, seq_len-3+1, 1]
        x = self.padding1(x)     # [batch_size, 250, seq_len, 1]
        x = self.relu(x)
        x = self.conv(x)         # [batch_size, 250, seq_len-3+1, 1]
        x = self.padding1(x)     # [batch_size, 250, seq_len, 1]
        x = self.relu(x)
        x = self.conv(x)         # [batch_size, 250, seq_len-3+1, 1]
        while x.shape[-2] > 2:
            x = self._block(x)
        x = x.squeeze()          # [batch_size, num_filters(250)]
        x = self.fc(x)
        return x

    def _block(self, x):
        x = self.padding2(x)
        px = self.max_pool(x)
        x = self.padding1(px)
        x = self.relu(x)
        x = self.conv(x)
        x = self.padding1(x)
        x = self.relu(x)
        x = self.conv(x)
        # Short Cut
        x = x + px
        return x













# def ZerosPadding1(x):
#     padding = nn.Pad(paddings=((1, 1), (0, 0)), mode="CONSTANT")
#     batch_size, channel, _, _ = x.shape
#     batch_np = np.random.randn(batch_size, channel, 32, 1)
#     for index, batch in enumerate(x):
#         test_np = np.random.randn(1, 32, 1)
#         for index2, item in enumerate(batch):
#             tmp = item
#             tmp = padding(tmp)
#             # print("tmp shape"+str(tmp.shape))
#             tmp = ops.ExpandDims()(tmp, 0)
#             tmp_np = tmp.asnumpy()
#             if index2 == 0:
#                 test_np = tmp_np
#             else:
#                 test_np = np.concatenate((test_np, tmp_np), axis=0)
#         print(test_np, test_np.shape)
#         tmp_tensor = Tensor(test_np)
#         tmp_tensor = ops.ExpandDims()(tmp_tensor, 0)
#         test_np = tmp_tensor.asnumpy()
#         if index == 0:
#             batch_np = test_np
#         else:
#             batch_np = np.concatenate((batch_np, test_np), axis=0)
#     print(batch_np.shape)
#     out = Tensor(batch_np)
#     return Tensor(out)
#
#
#
# def ZerosPadding2(x):
#     padding = nn.Pad(paddings=((0, 1), (0, 0)), mode="CONSTANT")
#     batch_size, channel, _, _ = x.shape
#     batch_np = np.random.randn(batch_size, channel, 32, 1)
#     for index, batch in enumerate(x):
#         # print('Processing %d batch'%index)
#         test_np = np.random.randn(1, 32, 1)
#         for index2, item in enumerate(batch):
#             tmp = item
#             tmp = padding(tmp)
#             # print("tmp shape"+str(tmp.shape))
#             tmp = ops.ExpandDims()(tmp, 0)
#             tmp_np = tmp.asnumpy()
#             if index2 == 0:
#                 test_np = tmp_np
#             else:
#                 test_np = np.concatenate((test_np, tmp_np), axis=0)
#         print(test_np, test_np.shape)
#         tmp_tensor = Tensor(test_np)
#         tmp_tensor = ops.ExpandDims()(tmp_tensor, 0)
#         test_np = tmp_tensor.asnumpy()
#         if index == 0:
#             batch_np = test_np
#         else:
#             batch_np = np.concatenate((batch_np, test_np), axis=0)
#     print(batch_np.shape)
#     out = Tensor(batch_np)
#     return Tensor(out)
