import numpy
numpy.random.seed(10)
class Tape:
    def __init__(self,inputs,outputs,function):
        self.inputs=inputs
        self.outputs=outputs
        self.dfunction=function
gradient_tape=[]
_name = 6
def fresh_name():
    global _name
    name = f'v{_name}'
    _name += 1
    return name
class Variable:
    def __init__(self, array, name=None):#记录变量的名称
        self.array = array
        self.name = name or fresh_name()#已经有名字则，则使用原来的名字

    def __repr__(self):
        return repr(self.array)
    # We need to start with some tensors whose values were not computed
    # inside the autograd. This function constructs leaf nodes.
    # Multiplication of a Variable, tracking gradients

    def __mul__(self, other):
        return ops_mul(self, other)

    def __add__(self, other):
        return ops_add(self, other)

    def __sub__(self, other):
        return ops_sub(self, other)

    def __pow__(self, power, modulo=None):
        return ops_pow(self, power,modulo)

    @staticmethod
    def sigmoid(self):
        return ops_sigmoid(self)

    @staticmethod
    def dropout(self,p):
        return ops_dropout(self,p)

    @staticmethod
    def reLU(self):
        return ops_reLU(self)

    @staticmethod
    def softmax(self):
        return ops_softmax(self)

    @staticmethod
    def sin(self):
        return ops_sin(self)

    @staticmethod
    def log(self):
        return ops_log(self)

    @staticmethod
    def sum(self,axis=None):
        return ops_sum(self,axis)

    @staticmethod
    def dot(self,other):
        return ops_dot(self,other)

    @staticmethod
    def exp(self):
        return ops_exp(self)

    @staticmethod
    def grad(l, results):
        dl_d = {}  # map dL/dX for all values X
        dl_d[l.name] = Variable(1.)

        def gather_grad(entries):
            return [dl_d[entry] if entry in dl_d else None for entry in entries]

        for entry in reversed(gradient_tape):
            dl_doutputs = gather_grad(entry.outputs)
            dl_dinputs = entry.dfunction(dl_doutputs)
            for input, dl_dinput in zip(entry.inputs, dl_dinputs):
                if input not in dl_d:
                    dl_d[input] = dl_dinput
                else:
                    dl_d[input] += dl_dinput
        return gather_grad(result.name for result in results)

def ops_pow(self, power,modulo):
    x = Variable(self.array**power)
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(power*self.array**(power-1))
        dl_dself = dl_dx * dx_dself
        return [dl_dself]
    # record the input and output of the op
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x

def ops_mul(self, other):
    # forward
    if isinstance(other,int) or isinstance(other,float):
        x = Variable(self.array * other)
    else:
        x = Variable(self.array * other.array)
    # backward
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = other # partial derivate of r = self*other
        dx_dother = self # partial derivate of r = self*other
        dl_dself = dl_dx * dx_dself
        dl_dother = dl_dx * dx_dother
        dl_dinputs = [dl_dself, dl_dother]
        return dl_dinputs

    # record the input and output of the op
    tape = Tape(inputs=[self.name, other.name], outputs=[x.name],function=propagate)
    gradient_tape.append(tape)
    return x

def ops_add(self, other):
    if isinstance(other, int) or isinstance(other, float):
        x = Variable(self.array + other)
    else:
        x = Variable(self.array + other.array)
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        global n
        if dl_dx.array.ndim==3 and dl_dx.array.shape[1]==dl_dx.array.shape[0] and n==1:#只有第一次才进行该操作
            dl_dx = Variable(dl_dx.array.sum(axis=1))
            n=n+1
        elif dl_dx.array.ndim == 3 and dl_dx.array.shape[1]==x.array.shape[0]:  # 关键在这里,解决该问题则得到结果
            dl_dx = Variable(dl_dx.array.sum(axis=0))
        elif dl_dx.array.ndim == 3 and dl_dx.array.shape[1]!=x.array.shape[0]:#th
            dl_dx = Variable(dl_dx.array.sum(axis=1))
        dx_dself = Variable(numpy.ones(x.array.shape))
        dx_dother = Variable(numpy.ones(x.array.shape))
        dl_dself = dl_dx * dx_dself
        dl_dother = dl_dx * dx_dother
        return [dl_dself, dl_dother]

    # record the input and output of the op
    tape = Tape(inputs=[self.name, other.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x
def ops_exp(self):
    pass

def ops_sub(self, other):
    if isinstance(other,int) or isinstance(other,float) or isinstance(other,numpy.ndarray):
        x=Variable(self.array - other)
    else:
        x = Variable(self.array - other.array)
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(numpy.ones(x.array.shape))
        dx_dother = Variable(-numpy.ones(x.array.shape))
        dl_dself = dl_dx * dx_dself
        dl_dother = dl_dx * dx_dother
        return [dl_dself, dl_dother]
    # record the input and output of the op
    tape = Tape(inputs=[self.name, other.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x

def ops_sigmoid(self):
    x = Variable(1/(1+numpy.exp(-self.array)))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(1/(1+numpy.exp(-self.array))*(1-1/(1+numpy.exp(-self.array))))
        dl_dself = dl_dx * dx_dself
        return [dl_dself]

    # record the input and output of the op
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x

def ops_reLU(self):
    x = Variable(numpy.max(numpy.stack((self.array,numpy.zeros(self.array.shape))),axis=0))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself=Variable(numpy.where(self.array>0,1,0))
        dl_dself = dl_dx * dx_dself
        return [dl_dself]
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x

def ops_softmax(self):
    x=Variable(numpy.exp(self.array)/numpy.sum(numpy.exp(self.array),axis=0))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself_array =numpy.zeros((x.array.shape[0],x.array.shape[0],x.array.shape[1]))
        for i in range(x.array.shape[0]):
            tmp=-x.array[i,:]*x.array
            dx_dself_array[i]=tmp
            dx_dself_array[i,i,:]=dx_dself_array[i,i,:]+x.array[i,:]
        dx_dself=Variable(dx_dself_array)
        dl_dself = dl_dx * dx_dself
        return [dl_dself]
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x


def ops_sin(self):
    x = Variable(numpy.sin(self.array))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(numpy.cos(self.array))
        dl_dself = dl_dx * dx_dself
        return [dl_dself]

    # record the input and output of the op
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x
def ops_log(self):
    x = Variable(numpy.log(self.array))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(1 / self.array)
        dl_dself = dl_dx * dx_dself
        return [dl_dself]#闭包的特性，内层函数记住了外层函数的词法作用域,然后根据词法环境取得值
    # record the input and output of the op
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x
def ops_sum(self,axis=None):
    x=Variable(numpy.sum(self.array,axis))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        dx_dself = Variable(numpy.ones(self.array.shape))
        dl_dself = dl_dx * dx_dself
        return [dl_dself]#闭包的特性，内层函数记住了外层函数的词法作用域,然后根据词法环境取得值
    # record the input and output of the op
    tape = Tape(inputs=[self.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x
def ops_dot(self,other):
    x = Variable(numpy.dot(self.array,other.array))
    def propagate(dl_doutputs):
        dl_dx, = dl_doutputs
        if self.array.ndim==1 and other.array.ndim==1:
            dx_dself = Variable(other.array)
            dx_dother = Variable(self.array)
            dl_dself = dl_dx * dx_dself
            dl_dother=dl_dx * dx_dother
        elif self.array.ndim==2 and other.array.ndim==1:
            dx_dself = Variable(numpy.tile(other.array,(self.array.shape[0],1)))
            dx_dother = Variable(self.array)
            dl_dself = dl_dx * dx_dself
            dl_dother=dl_dx*dx_dother
        else:
            dx_dself = Variable(numpy.tile(other.array, (self.array.shape[0], 1,1)).transpose((2,0,1)))#关键部分
            dx_dother = Variable(numpy.tile(self.array,(other.array.shape[1],1,1)).transpose((1,2,0)))
            dl_dx_self=Variable(numpy.tile(dl_dx.array,(dx_dself.array.shape[2],1,1)).transpose((2,1,0)))
            dl_dx_other=Variable(numpy.tile(dl_dx.array,(dx_dother.array.shape[1],1,1)).transpose((1,0,2)))
            dl_dself =  dl_dx_self * dx_dself
            dl_dother = dl_dx_other * dx_dother
        return [dl_dself,dl_dother]  # 闭包的特性，内层函数记住了外层函数的词法作用域,然后根据词法环境取得值

    # record the input and output of the op
    tape = Tape(inputs=[self.name,other.name], outputs=[x.name], function=propagate)
    gradient_tape.append(tape)
    return x
def ops_dropout(self,p):
    pass
import pandas
def softmax(x):
    y = numpy.exp(x)/numpy.sum(numpy.exp(x),axis=0)
    return y
def reLU(x):
    y=numpy.max(numpy.stack((x, numpy.zeros(x.shape))), axis=0)
    return y
# def accuracy(x, y, w, b):
#     pre = predict(x, w, b)
#     return numpy.sum(numpy.where(pre == y, 1, 0)) / len(y)
def activation(x, w1, b1,w2,b2,w3,b3):
    return softmax(numpy.dot(w3,reLU(numpy.dot(w2,reLU(numpy.dot(w1,x)+b1))+b2)) + b3)
def predict(x, w1, b1,w2,b2,w3,b3):
    result = activation(x, w1, b1,w2,b2,w3,b3)
    index = numpy.argmax(result, axis=0)
    res = numpy.zeros((3, 30))
    res[index, range(30)] = 1  # 按照坐标挑选值
    print(numpy.sum(numpy.all(res == y_test, axis=0)) /30)
data = pandas.read_csv("iris.cv", header=None)
x = data.iloc[0:150, [0, 1, 2, 3]]
y = data.iloc[0:150, 4].values
y_convert = numpy.empty((150, 3))
for i in range(len(y)):
    if y[i] == "Iris-setosa":
        y_convert[i] = [1, 0, 0]
    elif y[i] == "Iris-versicolor":
        y_convert[i] = numpy.array([0, 1, 0])
    else:
        y_convert[i] = numpy.array([0, 0, 1])
x_train = numpy.empty((120, 4))
x_test = numpy.empty((30, 4))
y_train = numpy.empty((120, 3))
y_test = numpy.empty((30, 3))
x_train[:40], x_train[40:80], x_train[80:] = x[0:40], x[50:90], x[100:140]
x_test[:10], x_test[10:20], x_test[20:] = x[40:50], x[90:100], x[140:150]
y_train[:40], y_train[40:80], y_train[80:120] = y_convert[0:40], y_convert[50:90], y_convert[100:140]
y_test[:10], y_test[10:20], y_test[20:] = y_convert[40:50], y_convert[90:100], y_convert[140:150]
x_train=x_train.T
x_test=x_test.T
y_test=y_test.T
y_train=y_train.T
w1=Variable(numpy.random.randn(6,4),name='v-1')    #4,120
x=Variable(x_train,name='v0')
b1=Variable(numpy.random.randn(6,1),name='v1')
w2=Variable(numpy.random.randn(4,6),name='v2') #8,120  #6,120
b2=Variable(numpy.random.randn(4,1),name='v3')
w3=Variable(numpy.random.randn(3,4),name='v4') #6,120  #3,120
b3=Variable(numpy.random.randn(3,1),name='v5')
y=Variable(y_train)
eta=0.1

for i in range(3000):
    n=1
    gradient_tape.clear()
    f=Variable.sum(y*Variable.log(Variable.softmax(Variable.dot(w3,Variable.reLU(Variable.dot(w2,Variable.reLU(Variable.dot(w1,x)+b1))+b2))+b3)))*Variable(-1/240)
    dw1,db1,dw2,db2,dw3,db3=Variable.grad(f,[w1,b1,w2,b2,w3,b3])
    dw1=Variable.sum(dw1,axis=0)
    db1=Variable.sum(db1,axis=1)
    dw2 = Variable.sum(dw2, axis=0)
    db2 = Variable.sum(db2, axis=1)
    dw3 = Variable.sum(dw3, axis=0)
    db3 = Variable.sum(db3, axis=1)
    w1=Variable(w1.array-dw1.array*eta,name='v-1')
    b1=Variable(b1.array-db1.array.reshape(-1,1)*eta,name='v1')
    w2 = Variable(w2.array - dw2.array * eta, name='v2')
    b2 = Variable(b2.array - db2.array.reshape(-1, 1) * eta, name='v3')
    w3 = Variable(w3.array - dw3.array * eta, name='v4')
    b3 = Variable(b3.array - db3.array.reshape(-1, 1) * eta, name='v5')
    predict(x_test,w1.array,b1.array,w2.array,b2.array,w3.array,b3.array)