from scipy.optimize import minimize
import struct
import numpy as np
import matplotlib.pyplot as plt
# 读取图片数据集
with open('D:/PythonProject/GAN/data/MNIST/raw/train-images-idx3-ubyte', 'rb') as imgpath:
    _, images_num, rows, cols = struct.unpack('>IIII', imgpath.read(16))
    images = np.fromfile(imgpath, dtype=np.uint8).reshape(images_num, rows * cols)
def logi_func(x):
    if np.all(x >= 0):  # 对sigmoid函数优化，避免出现极大的数据溢出
        return 1.0 / (1 + np.exp(-x))
    else:
        return np.exp(x) / (1 + np.exp(x))
def logi_func_grad(x):
    return np.exp(x)/(np.exp(x)+1)

def non_activate_func(x):
    return x
def none_function(x):
    return x
def mseloss(x,y):
    return (x - y) ** 2



class Frame:
    def __init__(self,inputdata,*hidden):
        self.hidden = hidden
        self.inputdata = inputdata
        self.weightnum = 0
        for i in hidden:
            self.weightnum+=(i[0]*i[1])
        self.init_weight = np.random.rand(self.weightnum)
    def set_sample(self,addtional_sample):
        self.inputdata = addtional_sample
    def forward(self,init_weight):
        start,end= 0,0
        temp = self.inputdata
        for i,layer in enumerate(self.hidden):
            end = start+layer[0]*layer[1]
            weight_matrix =init_weight[start:end].reshape(layer[0], layer[1])
            start = end
            temp = layer[2](np.dot(temp,weight_matrix))
        return temp
    def testfunc(self,init_weight,x):
        start,end= 0,0
        temp = x
        for i,layer in enumerate(self.hidden):
            end = start+layer[0]*layer[1]
            weight_matrix =init_weight[start:end].reshape(layer[0], layer[1])
            start = end
            temp = layer[2](np.dot(temp,weight_matrix))
        return temp
    def loss(self,loss_function,y):
        return loss_function(self.forward(self.init_weight),y)

    def lossvalue(self,init_weight,loss_function,y,additional_function = none_function,genarg = None):
        if genarg is None:
            return loss_function(additional_function(self.forward(init_weight)),y)
        else:
            return loss_function(additional_function(genarg,self.forward(init_weight)),y)

    def optim(self,loss_function,y,additional_function = none_function,additional_arg = None):
        res = minimize(self.lossvalue, self.init_weight, args=(loss_function,y,additional_function,additional_arg),method='L-BFGS-B')
        self.init_weight = res.x




# frame = Frame(x,(1, 30, logi_func), (30, 1, non_activate_func))
# print(frame.loss(mseloss, y))
# frame.optim(mseloss,y)
# print(frame.loss(mseloss, y))

def Leaky_relu(x):
    return np.maximum(0.01*x,x)

def tanh(x):
    return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))

def sigmoid(x):
    return logi_func(x)

def relu(x):
    return np.maximum(0,x)


batch = 6000
rand_noise = np.random.rand(20).reshape(1,20)
gen = Frame(rand_noise,
            (20, 6, relu),
            (6, 10, relu),
            (10, 28 * 28, tanh))
dis = Frame(None,
            (28 * 28, 10, Leaky_relu),
            (10, 20, Leaky_relu),
            (20, 1, sigmoid))
train_image_label = np.ones(batch).reshape(batch,1)

for i in range(10):
    print("第{}次开始".format(i+1))
    trainimage = images[i * batch:(i + 1) * batch]
    print("第{}次开始生成图片".format(i+1))
    gen_image = gen.forward(gen.init_weight).reshape(28,28)
    print("第{}次加入样本".format(i+1))
    dis.set_sample(trainimage)
    print("第{}次dis优化".format(i+1))
    dis.optim(mseloss,np.append(train_image_label,0))
    print("第{}次gen优化".format(i+1))
    gen.optim(mseloss,np.ones(1),additional_function=dis.testfunc,additional_arg=dis.init_weight)

plt.imshow(gen.forward(gen.init_weight).reshape(28,28))
plt.show()