import numpy as np 
from copy import deepcopy 

#__name__ = "test" #测试域 


#Tensor变量 
class tensor(): 
    def __init__(self, x, scalar = False): 
        self.scalar = scalar 
        if type(x) in (int, float): 
            self.scalar = True 
        self.former_val = []
        if self.scalar: 
            self.val = x 
            self.fgrad = 0 
            self.bgrad = 0 
        else: 
            self.val = np.array(x) 
            self.bgrad = np.zeros(self.val.shape) 
            self.fgrad = np.zeros(self.val.shape) 
        
    def reset_bgrad(self): 
        if self.scalar: 
            self.bgrad = 0 
        else: 
            self.bgrad = np.zeros(self.val.shape) 

    def add_grad(self, loc = 0): #loc 输入需要是tuple形式 
        if self.scalar: 
            self.fgrad = 1
        else: 
            self.fgrad[loc] = 1 

    def __add__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_sum([self, other]) 
    
    def __radd__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_sum([self, other]) 
    
    def __sub__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_sum([self, ad_negative(other)])
     
    def __rsub__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_sum([self, ad_negative(other)]) 
    
    def __neg__(self): 
        return ad_negative(self) 

    def __mul__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_scal_prod(self, other) 
    
    def __rmul__(self, other): 
        if type(other) in (int, float, np.ndarray, list, tuple): 
            other = tensor(other) 
        return ad_scal_prod(self, other) 
        
    def __pow__(self, a): 
        return ad_power(self, a) 

#运算函数
class ad_mat_prod(tensor): #Tensor的矩阵乘法（仅支持二维，高维需要另写einsum）
    def __init__(self, x1, x2): 
        val = np.matmul(x1.val, x2.val) 
        fgrad = np.matmul(x1.val, x2.fgrad) + np.matmul(x1.fgrad, x2.val) 
        super().__init__(val) 
        self.fgrad = fgrad 
        self.former_val.append(x1)
        self.former_val.append(x2) 
        add_sequence(self) 

    def backward(self): #out最好也传tensor 
        if len(self.bgrad.shape) == 1: 
            self.bgrad = np.expand_dims(self.bgrad, 1) 
        
        if len(self.former_val[1].val.shape) == 1: 
            fv1 = np.expand_dims(self.former_val[1].val, 1)
        else: 
            fv1 = self.former_val[1].val 
        self.former_val[0].bgrad += np.matmul(self.bgrad, np.transpose(fv1)) 
        if len(self.former_val[1].bgrad.shape) == 1: 
            self.former_val[1].bgrad += np.matmul(np.transpose(self.former_val[0].val), self.bgrad).reshape(-1)
        else: 
            self.former_val[1].bgrad += np.matmul(np.transpose(self.former_val[0].val), self.bgrad) 

class ad_transpose(tensor): #Tensor转置（二维）
    def __init__(self, x):
        super().__init__(np.transpose(x.val)) 
        self.fgrad = np.transpose(x.fgrad) 
        self.former_val.append(x) 
        add_sequence(self) 

    def backward(self): 
        self.former_val[0].bgrad = np.transpose(self.bgrad) 

class ad_concat(tensor): 
    def __init__(self, x, axis = 0): 
        self.axis = axis
        xval = []
        xfgrad = []
        for i in range(len(x)):
            xval.append(x[i].val) 
            xfgrad.append(x[i].fgrad)  
        val = np.concatenate(xval, axis = axis) 
        fgrad = np.concatenate(xfgrad, axis = axis) 
        super().__init__(val) 
        self.fgrad = fgrad 
        for i in range(len(x)): 
            self.former_val.append(x[i])  
        add_sequence(self) 

    def backward(self): #用np.split()
        split_arr = [] 
        sum = 0 
        for i in range(len(self.former_val)):
            sum += self.former_val[i].val.shape[self.axis]
            split_arr.append(sum) 
        
        bgrad_split = np.split(self.bgrad, split_arr, axis = self.axis) 
        for i in range(len(self.former_val)): 
            self.former_val[i].bgrad += bgrad_split[i] 

class ad_expand_dim(tensor): 
    def __init__(self, x, axis): 
        self.axis = axis 
        super().__init__(np.expand_dims(x.val, axis)) 
        self.fgrad = np.expand_dims(x.fgrad, axis) 
        self.former_val.append(x) 
        add_sequence(self) 

    def backward(self): 
         newshape = tuple(np.delete(self.bgrad.shape, self.axis)) 
         self.former_val[0].bgrad += self.bgrad.reshape(newshape) 

class ad_tile(tensor): 
    def __init__(self, x, tileshape): 
        self.tileshape = np.array(tileshape) 
        #扩展维度对应sum，因为该值被复制了n次 
        self.sum_axis = np.array(self.tileshape > 0)
        super().__init__(np.tile(x.val, tileshape)) 
        self.fgrad = np.tile(x.fgrad, tileshape) 
        self.former_val.append(x) 
        add_sequence(self) 
    
    def backward(self): 
        bgrad = deepcopy(self.bgrad) 
        for i in range(self.sum_axis.shape[0]): 
            if self.sum_axis[i]: 
                bgrad = np.sum(np.array(np.split(bgrad, self.tileshape[i], i)),axis = 0)
            print(bgrad.shape) 
        self.former_val[0].bgrad += bgrad 

class ad_reshape(tensor): 
    def __init__(self, x, reshape): 
        fgrad = np.ones(x.val.shape) 
        val = np.reshape(x.val, reshape)
        super().__init__(val) 
        self.fgrad = np.reshape(fgrad, reshape) 
        self.former_val.append(x) 
        add_sequence(self) 
    
    def backward(self): 
        self.former_val[0].bgrad += np.reshape(self.bgrad, self.former_val[0].val.shape) 
    
class ad_scal_prod(tensor): #Tensor点乘 
    def __init__(self, a, b): 
        self.former_scalar = [a.scalar, b.scalar]
        val = np.multiply(a.val, b.val) 
        fgrad = np.multiply(a.val, b.fgrad) + np.multiply(a.fgrad, b.val) 
        scalar = False
        if a.scalar and b.scalar: 
            scalar = True 
        super().__init__(val, scalar) 
        self.fgrad = fgrad 
        self.former_val.append(a) 
        self.former_val.append(b) 
        add_sequence(self) 

    def backward(self): 
        bgrad0 = np.multiply(self.bgrad, self.former_val[1].val) 
        bgrad1 = np.multiply(self.former_val[0].val, self.bgrad) 
        if self.former_scalar[0] and self.former_scalar[1] == False: 
            bgrad0 = np.max(bgrad0) #should be a constant that is the same for every matrix therefore max 
        if self.former_scalar[1] and self.former_scalar[0] == False: 
            bgrad1 = np.max(bgrad1)  

        self.former_val[0].bgrad += bgrad0 
        self.former_val[1].bgrad += bgrad1 

class ad_sum(tensor): #多个Tensor相加
    def __init__(self, x):#list of tensors 
        if x[0].scalar == True: 
            val = 0 
            fgrad = 0  
        else: 
            val = np.zeros(x[0].val.shape) 
            fgrad = np.zeros(x[0].fgrad.shape) 
        for i in range(len(x)): 
            val += x[i].val 
            fgrad += x[i].fgrad #val和fgrad分别加总 

        super().__init__(val, x[0].scalar) 
        self.fgrad = fgrad 
        for i in range(len(x)): 
            self.former_val.append(x[i]) 

        add_sequence(self) 

    def backward(self): 
        for i in range(len(self.former_val)): 
            self.former_val[i].bgrad += self.bgrad 

class ad_negative(tensor): #Tensor负数
    def __init__(self, x):
        super().__init__(-x.val, x.scalar) 
        self.fgrad = -x.fgrad 
        self.former_val.append(x) 
        add_sequence(self) 

    def backward(self): 
        self.former_val[0].bgrad = -self.bgrad 

class ad_power(tensor): #Tensor幂
    def __init__(self, x, a): #a是一个float，不需要作为variable或者Tensor 
        self.a = a
        val = np.power(x.val, self.a) 
        super().__init__(val, x.scalar)  
        if self.a == 0: 
            self.fgrad = x.fgrad 
        else: 
            self.fgrad = a*np.power(x.val, a-1)*x.fgrad 
        self.former_val.append(x)
        add_sequence(self) 

    def backward(self): 
        if self.a == 0: 
            self.former_val[0].bgrad += self.bgrad  
        else: 
            self.former_val[0].bgrad += self.bgrad*self.a*np.power(self.former_val[0].val, self.a-1) 

class ad_innersum(tensor): #Tensor内部求和 
    def __init__(self, x, axis = 0): 
        self.axis = axis
        val = np.sum(x.val, axis = self.axis)
        fgrad = np.sum(x.fgrad, axis = self.axis) 
        scalar = False 
        if len(x.val.shape) == 1: 
            scalar = True 
        super().__init__(val, scalar) 
        self.fgrad = fgrad 
        self.former_val.append(x) 
        add_sequence(self) 

    def backward(self): 
        tileshape = self.former_val[0].val.shape[self.axis] 
        bgrad = np.tile(np.expand_dims(self.bgrad, self.axis-1), tileshape) 
        self.former_val[0].bgrad += bgrad 

class ad_exp(tensor): 
    def __init__(self, x):
        super().__init__(np.exp(x.val)) 
        self.fgrad = np.exp(x.val)*x.fgrad 
        self.former_val.append(x) 
        add_sequence(self)

    def backward(self): 
        self.former_val[0].bgrad += self.bgrad*np.exp(self.former_val[0].val)   

class ad_ln(tensor): 
    def __init__(self, x): 
        super().__init__(np.log(x.val)) 
        self.fgrad = np.log(x.val)*x.fgrad 
        self.former_val.append(x)
        add_sequence(self)
    
    def backward(self): 
        self.former_val[0].bgrad += self.bgrad*np.log(self.former_val[0].val)    

class ad_sin(tensor):
    def __init__(self): 
        pass 

class ad_cos(tensor):
    def __init__(self): 
        pass 

class ad_sigmoid(tensor): 
    def __init__(self): 
        pass 

class ad_softmax(tensor): #RN -> RN 
    def __init__(self, x): 
        ex = ad_exp(x)
        sumex = np.sum(ex.val) 
        val = ex.val/sumex 
        super().__init__(val) 
        xval = x.val.reshape((-1, 1)) 
        self.DS = -np.matmul(xval, np.transpose(xval)) 
        for i in range(xval.shape[0]): 
            self.DS[i, i] = x.val[i]*(1-x.val[i]) 
        #fgrad matmul(DS (k*k), fgrad (k*1)) = sum_j dsi/daj*daj/dx   
        self.fgrad = np.matmul(self.DS, x.fgrad)
        self.former_val.append(x)
        add_sequence(self) 

    def backward(self): 
        #matmul(bgrad (1*k), DS (k*k)) = sum_i dz/dsi*dsi/daj 
        self.former_val[0].bgrad += np.matmul(self.bgrad, self.DS) 

class ad_ReLU(tensor): #其实和abs是同一类，abs：lower=-1 
    def __init__(self, x, lower = 0, upper = 1): 
        # d ReLU(x)/dx = 1 at x = 0 

        self.lower = lower
        self.upper = upper 
        greater = (x.val >= 0) 
        val = x.val*(greater*upper + (1-greater)*lower) 
        fgrad = x.fgrad*(greater*upper + (1-greater)*lower) 
        super().__init__(val)
        self.fgrad = fgrad  

        self.former_val.append(x)
        add_sequence(self)

    def backward(self): 
        greater = (self.former_val[0].val >= 0) 
        self.former_val[0].bgrad += self.bgrad*(greater*self.upper + (1-greater)*self.lower) 

#流程图操作，流程管理
class Flow(): 
    def __init__(self): 
        global sequence 
        sequence = [] 
        self.input = []

    def add_input(self, input = []): 
        for i in range(len(input)): 
            self.input.append(input[i]) 

    def end(self): 
        global sequence 
        self.sequence = sequence #必须pass指针，不然back没用 
        sequence = [] 
    
    def backward(self): 
        for i in range(len(self.sequence)): 
            self.sequence[::-1][i].backward() 

        self.bgrad = [] 
        for i in range(len(self.input)): 
            self.bgrad.append(self.input[i].bgrad) 

    def append(self, other): 
        for i in range(len(other)): 
            self.sequence.extend(other[i].sequence)

#global sequence 后台函数，给class function调用或者测试用 
def reset_sequence(): #非常危险的操作、建议使用Flow().end()，因为加上备份功能
    global sequence 
    sequence = [] 

def add_sequence(x): #function后台函数，传global sequence
    global sequence 
    sequence.append(x) 

#Layers 
class Linear(): #全连接层、输入必须是Tensor数组 
    def __init__(self, x, w, b = None): 
        self.f = Flow() 
        if b == None: 
            self.f.add_input([x, w]) 
            if x.scalar: 
                self.out = x*w 
            else: 
                self.out = ad_mat_prod(x, w) 
        else: 
            self.f.add_input([x, w, b]) 
            if x.scalar: 
                self.out = x*w + b 
            else: 
                self.out = ad_mat_prod(x, w) + b 

        self.f.end() 

#Loss 
class Cross_Entropy(): 
    def __init__(self, p, q): #p真实值，q预测值
        pass 

class MSE(): 
    def __init__(self, y, yhat): 
        self.f = Flow() 
        self.f.add_input([y, yhat]) 
        y1 = ad_reshape(y, -1) 
        yhat1 = ad_reshape(yhat, -1) 
        N = y1.val.shape[0] 
        N1 = 0.5/N 
        self.out = ad_innersum(ad_power(y1 - yhat1, 2))*N1 
        self.f.end() 
        self.out.bgrad = 1 
        #self.f.backward() 

#Regularization 
class Ln_Regularization(): 
    def __init__(self, b): 
        pass 

#梯度下降 
class Optimizer(): 
    def __init__():
        pass 

class Model(): 
    def __init__(self):
        self.w_sequence = []
        self.w_grad = [] 
        self.func_sequence = [] 
        self.flow_sequence = []
        self.train_parameters() 
        self.prev_train_loss = np.nan 
        self.tl_diff = 1E-6 #收敛精度百分比 
    
    def add_layer(self, f, w_shape, activation = None): #简单点指定w的形状
        #只生成weights和函数，train的时候才调用 
        self.w_sequence.append(tensor(np.random.normal(0, 1, w_shape))) 
        self.func_sequence.append(f) 

    def add_loss(self, f): 
        self.loss_f = f 
    
    def train_parameters(self, Batch_Size = None, epoch = 100, lr = 0.1): 
        self.Batch_Size = Batch_Size #batchsize sample trainx 
        self.epoch = epoch #epoch 
        self.lr = lr #learning rate 

    def train(self, train_x, train_y, test_x = None, test_y = None): 
        if self.Batch_Size == None: 
            self.train_x = np.expand_dims(train_x, 0)
            self.train_y = np.expand_dims(train_y, 0)
        else: 
            #Split sample 
            pass 
        
        for _ in range(self.epoch): 
            for i in range(1):#Batch_Size  
                self.d_sequence = [] 
                self.d_sequence.append(tensor(self.train_x[i])) 
                self.yreal = tensor(self.train_y[i]) 

                #apply model 
                for l in range(len(self.func_sequence)): 
                    layer_l = self.func_sequence[l](self.d_sequence[l], self.w_sequence[l]) 
                    self.flow_sequence.append(layer_l.f) 
                    self.d_sequence.append(layer_l.out) 
                    self.d_sequence[l].reset_bgrad() 

                #calc loss 
                loss_func = self.loss_f(self.yreal, self.d_sequence[-1]) 
                self.train_loss = loss_func.out 
                self.flow_sequence.append(loss_func.f) 
                self.train_loss.bgrad = 1 

                if len(self.flow_sequence) > 1: 
                    self.flow_sequence[0].append(self.flow_sequence[1:])

                self.flow_sequence[0].backward() 
                self.flow_sequence = [] #reset flow sequence 
                #gradient descent 
                self.w_grad = [] 

                for i in range(len(self.w_sequence)): 
                    self.w_grad.append(deepcopy(self.w_sequence[i].bgrad)) 
                    self.w_sequence[i].val -= self.lr*self.w_grad[i] 
                    self.w_sequence[i].reset_bgrad() 
            
            print("Epoch: {}, Train Loss: {:.4f}".format(_, self.train_loss.val))  

            if np.abs(self.train_loss.val - self.prev_train_loss) < self.tl_diff*self.train_loss.val: 
                break 
            else: 
                self.prev_train_loss = deepcopy(self.train_loss.val) 

if __name__ == "test": 

    #test vector quadratic: z = x^2 + 2xy + y^2 (x, y has shape (1, 2)) 
    
    #test flow: 定义输入和计算过程（即为图流），在图流中需要定义forward或者backward的变量 
    #定义好变量后Flow().backward()计算整个图流的反向传播梯度 
    #通过传递全局变量来实现图流的传参，但一次只能支持一条图流（所以只能做序列运算），但支持图流控制  

    f = Flow() 
    x = tensor([1, 2])
    y = tensor([3, 4]) 
    y.add_grad(1) 
    f.add_input([x, y]) #传入list 
    z = ad_innersum(x**2 + 2*x*y + y**2) #重载运算符 计算过程大幅简化 
    f.end()

    z.bgrad = 1 #反向传播梯度 
    
    f.backward() #对已定义的input计算bgrad 

    print(f.bgrad) 

    x = tensor(np.arange(5)) 
    y = tensor(np.ones(5)) 
    z = MSE(x, y) 
 
if __name__ == "__main__": 
    """
    后续：
    1）可以利用变量scalar来做函数 
    1.5）nonlocal实现控制流 
    2）exp，sin，cos，tan，ln，sigmoid，relu，leakyrelu，（conv2d做不了因为numpy没有，除非搞FFT）
    3）全连接层封装：Flow图可以用list extend来拼接 
    4）NN梯度下降反向传播（Linear Regression）（SGD封装） 
    5）Torch环境转换 
    """ 
    #Linear regression 
    # y = 4 + 3x1 + 2x2 + e 
    N = 1000 
    e = np.random.normal(0, 1, N) 
    x = np.random.normal(0, 1, [N, 3]) 
    #w_real = np.array([4]) 
    w_real = np.array([4, 3, 2]) 

    y = np.matmul(x, w_real) + e
    xtx_inv = np.linalg.inv(np.matmul(np.transpose(x), x))
    xty = np.matmul(np.transpose(x), y) 
    w_linreg = np.matmul(xtx_inv, xty)

    w0 = tensor(np.ones(3)) 
    xin = tensor(x) 
    yreal = tensor(y) 

    eta = 0.1 

    #"""
    m = Model() 
    m.add_layer(Linear, 3) 
    m.add_loss(MSE) 
    m.train(x, y) 
    print("AD Gradient Descent {}".format(m.w_sequence[0].val.round(4)))
    #"""
    """
    layer1 = Linear(xin, w0)
    yguess = layer1.out 
    f1 = layer1.f 

    loss = MSE(yreal, yguess) 
    loss_val = loss.out 
    f2 = loss.f 

    f1.append([f2]) 
    #loss_val.bgrad = 1 
    f1.backward() 

    w1 = w0.val - w0.bgrad 
    print(w1) 
    #""" 
    print("Linear Regression: {}".format(w_linreg.round(4))) 
    print("Real Weight: {}".format(w_real.round(4))) 