###########卷积网络mnist,准备数据
import random,struct,numpy as np
import paddle.nn.functional as F,paddle
from paddle.io import Dataset,DataLoader
from paddle.static import InputSpec

code2type = {0x08: 'B', 0x09: 'b', 0x0B: 'h', 0x0c: 'i', 0x0D: 'f', 0x0E: 'd'}
def readMatrix(filename):
	with open(filename,'rb') as f:
		buff = f.read()
		offset = 0
		fmt = '>HBB'#格式定义，>表示高位在前，I表示4字节整数
		_,dcode,dimslen = struct.unpack_from(fmt,buff,offset)
		offset += struct.calcsize(fmt)

		fmt = '>{}I'.format(dimslen)
		shapes = struct.unpack_from(fmt,buff,offset)
		offset += struct.calcsize(fmt)

		fmt = '>'+ str(np.prod(shapes)) + code2type[dcode]
		matrix = struct.unpack_from(fmt,buff,offset)
		matrix = np.reshape(matrix,shapes).astype(code2type[dcode])
	return matrix

def dataReader(imgfile, labelfile, batch_size, drop_last=False):
	images = readMatrix(imgfile) #np.ndarray,utype,(60000,28,28)
	labels = readMatrix(labelfile)
	buff=list(zip(images,labels))
	batchnum=int(len(images)/batch_size)
	# 样本数据打乱
	random.shuffle(buff)#range(0,60000),shuffle->(12,1000,20000,...)
	def batch_reader():
		# 每次迭代提供batch_size个样本
		for i in range(batchnum):
			yield buff[i*batch_size:(i+1)*batch_size]#[(img1,1),(img2,1)...]
		if drop_last and len(images)%batch_size!=0 :
			yield buff[batchnum*batch_size:]
	return batch_reader

#############  组网2
class LeNet(paddle.nn.Layer):
	def __init__(self):
		super(LeNet, self).__init__()
		self.conv1 = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2)
		self.max_pool = paddle.nn.MaxPool2D(kernel_size=2,  stride=2)
		self.conv2 = paddle.nn.Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1)
		self.linear1 = paddle.nn.Linear(in_features=16*5*5, out_features=120)
		self.linear2 = paddle.nn.Linear(in_features=120, out_features=84)
		self.linear3 = paddle.nn.Linear(in_features=84, out_features=10)

	def forward(self, x):
		x=self.conv1(x)
		x=F.relu(x)
		x=self.max_pool(x)
		# x = self.max_pool(F.relu(self.conv1(x)))
		x = self.max_pool(F.relu(self.conv2(x)))
		x = paddle.flatten(x, start_axis=1,stop_axis=-1)
		x = F.relu(self.linear1(x))
		x = F.relu(self.linear2(x))
		x = self.linear3(x)
		return x

#####训练3
def train(model,loader):
	# 将此层及其所有子层设置为训练模式。会影响某些模块，如Dropout和BatchNorm。
	model.train()
	epochs = 2
	optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
	# 用Adam作为优化函数
	for epoch in range(epochs):
		for batch_id, data in enumerate(loader()):
			images,labels = zip(*data)
			images = np.array(images).astype('float32')
			labels = np.array(labels).astype('int64')
			images = paddle.to_tensor(images)
			labels = paddle.to_tensor(labels)
			images = paddle.unsqueeze(images, 1)#N,28*28   N*1*28*28
			labels = paddle.unsqueeze(labels, 1)
			predicts = model(images)
			loss = F.cross_entropy(predicts, labels)
			# 计算损失
			acc = paddle.metric.accuracy(predicts, labels)
			loss.backward()
			if batch_id % 300 == 0:
				print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy()))
			optim.step()
			optim.clear_grad()
	return model

# ######## 加载测试数据集,验证
def test(model,loader):
	model.eval()
	correct = 0
	total = 0
	for batch_id, data in enumerate(loader()):
		images, labels = zip(*data)
		images = np.array(images).astype('float32')
		labels = np.array(labels)#.astype('int64') #np.ndarray
		images = paddle.to_tensor(images)
		images = paddle.unsqueeze(images, 1)
		predicts = model(images)
		# 获取预测结果
		predicts = paddle.argmax(predicts,1)#paddle.Tensor
		#print(type(predicts))
		total += labels.size
		correct += (predicts.numpy() == labels).sum()
		print('Accuracy of the network on the {:d} test images: {:f}%'.format(total, 100 * correct / total))

# ####### 利用高级API准备数据
class MnistDataset(Dataset):
	def __init__(self, data,label):
		super(MnistDataset, self).__init__()
		self.matrix=readMatrix(data).astype('float32')
		self.label=readMatrix(label).astype('int64')
		self.num_samples = len(self.matrix)
	def __getitem__(self, idx):
		data = np.expand_dims(self.matrix[idx],axis=0)/255
		label = np.expand_dims(self.label[idx],axis=0) #self.label[idx][np.newaxis, :]
		return data, label
	def __len__(self):
		return self.num_samples

## 模型训练 train2
def train2(model,loader):
	# 将此层及其所有子层设置为训练模式。这只会影响某些模块，如Dropout和BatchNorm。
	model.train()
	epochs = 2
	optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
	# 用Adam作为优化函数
	for epoch in range(epochs):
		for batch_id, (images, labels) in enumerate(loader()):
			predicts = model(images)
			loss = F.cross_entropy(predicts, labels)
			# 计算损失
			acc = paddle.metric.accuracy(predicts, labels)
			loss.backward()
			optim.step()
			optim.clear_grad()
			if batch_id % 300 == 0:
				print("epoch: {}, batch_id: {}, loss is: {}, acc is: {}".format(epoch, batch_id, loss.numpy(), acc.numpy()))
	return model

## 模型预测test2
def test2(model,loader):
	# 将此层及其所有子层设置为预测模式。
	model.eval()
	correct = 0
	total = 0
	for batch_id, (images, labels)  in enumerate(loader()):
		predicts = model(images)
		predicts = paddle.argmax(predicts, 1)
		total += labels.size
		correct += (predicts == paddle.squeeze(labels)).numpy().sum()
		print('Accuracy of the network on the {:d} test images: {:f}%'.format(total, 100 * correct / total))
	print(correct,total)

if __name__ == '__main__':
	###########  方案1
	# train_loader = dataReader('../data/mnist/train-images-idx3-ubyte', '../data/mnist/train-labels-idx1-ubyte', 16,True)
	# test_loader = dataReader('../data/mnist/t10k-images-idx3-ubyte', '../data/mnist/t10k-labels-idx1-ubyte', 64, True)
	# PARM_PATH = "mnist_net.pdparams"
	#
	# model = LeNet()
	# model = train(model,train_loader)
	# paddle.save(model.state_dict(), PARM_PATH)
	#
	# model = LeNet()
	# layer_state_dict = paddle.load(PARM_PATH)
	# model.set_state_dict(layer_state_dict)  # 加载参数
	# test(model,test_loader)

	########  方案2
	train_dataset = MnistDataset('../data/mnist/train-images-idx3-ubyte', '../data/mnist/train-labels-idx1-ubyte')
	train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, drop_last=True, num_workers=0)
	test_dataset = MnistDataset('../data/mnist/t10k-images-idx3-ubyte', '../data/mnist/t10k-labels-idx1-ubyte')
	test_loader = DataLoader(test_dataset,batch_size=64,shuffle=True,drop_last=False,num_workers=0)
	PARM_PATH = "mnist_net2.pdparams"
	MODEL_PATH = "model/mnist"
	# model = LeNet()
	# model = train2(model,train_loader)
	# paddle.save(model.state_dict(), PARM_PATH)#保存模型参数

	# 训练完成后使用 paddle.jit.save 对模型和参数进行存储：
	# paddle.jit.save(
	# 	layer=model,
	# 	path=MODEL_PATH,
	# 	input_spec=[InputSpec(shape=[None, 1,28,28], dtype='float32')])#保存模型结构及参数

	# 创建模型结构,加载模型参数
	model = LeNet()
	layer_state_dict = paddle.load(PARM_PATH)
	model.set_state_dict(layer_state_dict)
	test2(model,train_loader)

	### 载入模型及参数，
	model = paddle.jit.load(MODEL_PATH)  # 加载模型及参数
	## 载入模型及参数后进行预测
	test2(model, test_loader)