from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt

test=1

class Relu:
    def __init__(self):
        self.mask = None

    def forword(self, x):
        self.mask = (x < 0)  # 变量mask是由True/False构成的Numpy数组
        out = x.copy()
        out[self.mask] = 0
        return out

    def backward(self, dout):
        dout[self.mask] = 0
        dx = dout
        return dx

class Sigmoid:
    def __init__(self):
        self.out = None

    def forward(self, x):
        out = 1 / (1 + np.exp(-x))
        self.out = out
        return out

    def backward(self):
        dx =  np.multiply(self.out,1-self.out)
        return dx

class SoftmaxWithLoss:
    def __init__(self):
        self.loss = None # 损失
        self.y = None    # softmax的输出
        self.t = None    # 监督数据（one-hot vector）

    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.t)
        return self.loss

    def backward(self, dout=1):
        batch_size = self.t.shape[0]
        dx = (self.y - self.t) / batch_size
        return dx

class Dense:
    def __init__(self,size,activate='sigmoid'):
        self._sise=size
        self._act=activate
        if activate=='sigmoid':
            self._acter=Sigmoid()
        elif activate=='relu':
            self._acter=Relu()
        if test==0:
            self._w=np.random.randn(self._sise[0],self._sise[1])
            self._b = 0
        else:
            self._w=np.array([[0.15,0.25],[0.20,0.30]])
            self._b = np.array(0.35)

    def forward(self,input):
        self.input=input
        self.output=np.matrix(input).dot(self._w)+np.full((1,self._w.shape[1]),self._b)
        self.output = self._acter.forward(self.output)
        return self.output

    def backward(self,dense):
        # self.err=np.multiply(self.output-Y,np.multiply(self.output,1-self.output))
        self.Eo=np.multiply(np.sum(np.multiply(dense.err,dense._w),axis=1).T,self._acter.backward())

    def renew(self, lr):
        self._w -= lr * np.multiply(self.Eo,np.repeat(self.input.T.reshape((len(self.input),1)),self._w.shape[1],axis=1))

class Dense2:
    def __init__(self,size,activate='sigmoid'):
        self._sise=size
        self._act=activate
        if activate=='sigmoid':
            self._acter=Sigmoid()
        elif activate=='relu':
            self._acter=Relu()
        if test == 0:
            self._w=np.random.randn(self._sise[0],self._sise[1])
            self._b = 0
        else:
            self._w=np.array([[0.40, 0.50], [0.45, 0.55]])
            self._b = np.array(0.60)

    def forward(self,input):
        self.input=input
        self.output=np.matrix(input).dot(self._w)+np.full((1,self._w.shape[1]),self._b)
        self.output = self._acter.forward(self.output)
        return self.output

    def backward(self,Y):
        self.err=np.multiply(self.output-Y,np.multiply(self.output,1-self.output))
        self.Eo=np.multiply(self.err,self.input.T)

    def renew(self,lr):
        self._w-=lr*self.Eo

class Classifier:
    def __init__(self,size,X,Y,lr):
        self.layers={}
        self.layers_num=len(size)-1
        for i in range(self.layers_num):
            if i+1<self.layers_num:
                self.layers['l'+str(i+1)]=Dense((size[i],size[i+1]))
            else:
                self.layers['l'+str(i+1)] =Dense2((size[i],size[i+1]))

        self.X=X
        self.Y=Y
        self._lr=lr

    def forward(self):
        tmp_out=self.X
        for i in range(self.layers_num):
            tmp_out=self.layers['l'+str(i+1)].forward(tmp_out)
        return tmp_out

    def backward(self):
        for i in range(self.layers_num-1,-1,-1):
            if i==self.layers_num-1:
                self.layers['l'+str(i+1)].backward(Y)
            else:
                self.layers['l' + str(i + 1)].backward(self.layers['l'+str(i + 2)])
        for i in range(self.layers_num-1,-1,-1):
            self.layers['l' + str(i + 1)].renew(0.5)
        # self.Eo=np.multiply(np.multiply(self.out2-self.Y,np.multiply(self.out2,1-self.out2)),self.out1)

def preProcess(X,resize_width,resize_height):
    # 图片形状已经拉成1行
    pixel_num = resize_height * resize_width
    reIm=np.resize(X,(len(X),pixel_num))
    for i in range(len(reIm)):
        # threshold=(np.amax(reIm[i])+np.amin(reIm[i]))/2
        # reIm[i]=np.where(reIm[i]>threshold,1,0)
        reIm[i]=(MinMaxScaler().fit_transform(np.reshape(reIm[i],(resize_height,resize_width)))).reshape([1,pixel_num])
        # if (i<5):
        #     num=np.reshape(reIm[i],(resize_height,resize_width))
        #     num=np.where(num>0,1,0)
        #     print(num)
        #     plt.imshow(num)
        #     plt.show()
    # print(np.amax(reIm))

if __name__=='__main__':
    data=load_digits(n_class=10)
    X=data.data
    Y=data.target
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.15)
    preProcess(X_train,8,8)
    if test == 0:
        tmp_X=X_train[0,:]
        leng=len(X)
        mat_Y=np.full((leng,10),0)
        for i in range(leng):
            tmp=Y[i]
            tmp_Y=np.zeros((1,len(set(Y))))
            tmp_Y[0,tmp]=1
            mat_Y[i]=tmp_Y
        net=Classifier([64,120,10],X[0],mat_Y[0],lr=0.5)
    else:
        X=[0.05,0.10]
        Y=[0.01,0.99]
        net = Classifier([2, 2, 2], np.array(X), np.array(Y), lr=0.5)

    epoch=10000
    for i in range(epoch):
        if(epoch%100==0): print(net.forward())
        net.backward()