import numpy as np
from mxnet import nd

'''
# show the attrbues contained in nd
print(dir(nd))
# function help
help(nd.ones_like) 
'''

'''
x = nd.arange(16)
size = x.shape # 获取ndarray实例的形状
y = x.reshape((4,4))#将原实例变形状为3*4

#特殊矩阵创建
oo = nd.zeros((2,3,4))#生成两个3*4的ndarray
nm = nd.random.normal(0,1,shape=(3,4))#生成3*4的ndarray，元素取样于均值0，标准差为1的正态分布
nd.ones((3,4))
y.zeros_like() # create zero matrix shape like y

pl = y+nm
ml = y*y #按对应位乘
dv = y/y #按对应位除
ep = y.exp() #按对应位指数运算
atr = nd.dot(y,y.T) #矩阵运算，y.T矩阵转置



#矩阵的逆,numpy and ndarray
tri = nd.array([[2,1,4],[3,1,6],[2,3,5]])
ptr = tri.asnumpy()      #ndarray traslate to numpy
inv = np.linalg.inv(ptr) # inverse of a matrix
dtr = nd.array(inv)      #numpy traslate to ndarray

cnc1 = nd.concat(y,y.T,dim=0)
cnc2 = nd.concat(y,y.T,dim=1)

x.norm() # 2-norm // seems like nd.norm(x,2)
x.sum() # sum of the total element
x.norm().asscalar() #translate ndarray to python standard

y1 = y[1:3,1:3] # obey the rule :left closed right away.get element two row and two col
id(y1) # read y1 RAM adress 
'''

from mxnet import autograd
# automatic calculate gradient
x =  nd.arange(4).reshape(4,1)
x.attach_grad() # apply memorys to store gradient's calc data
with autograd.record():
    y = 3*nd.dot(x.T,x)
y.backward() #return the answer with the parameter x
# assert (x.grad-4*x).norm().asscalar() == 4,'gradient value not equal to 4' 
#assert expression [, arguments]  assert的异常参数，其实就是在断言表达式后添加字符串信息，用来解释断言并更好的知道是哪里出了问题。
print(x.grad) # x.grad ## return the matrix of calculation result by the value of matrix x


# help(nd.ones)

