import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import os
from BilinearInterpolation import BilinearInterpolation

device = torch.device("cuda" if torch.cuda.is_available else "cpu")
n_epochs = 3 #要训练数据集的轮次
batch_size_train = 64 #每批处理的数据个数
batch_size_test = 200
learning_rate = 0.01
momentum = 0.5
log_interval = 36
random_seed = 1
torch.manual_seed(random_seed)
writer = SummaryWriter('./log')
save_path = "./model/"
if not os.path.exists(save_path):
	os.makedirs(save_path, exist_ok=True)
		
#图像处理
pipeline = torchvision.transforms.Compose([
	 torchvision.transforms.ToTensor(),
	 torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
#下载训练数据集
train_data = torchvision.datasets.MNIST('./data/', train=True, download=True, transform=pipeline)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, shuffle=True)
print(train_data.classes)

#下载测试数据集
test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./data/', train=False, download=True, transform=pipeline),
    batch_size=batch_size_test, shuffle=True)
    
		
#查看图片
"""
with open("./data/MNIST/raw/train-images-idx3-ubyte", "rb") as f:
	file = f.read()
img1 = [int(str(item).encode("ascii"), 16) for item in file[16: 16+784]]
print(img1)
import numpy as np
img_np = np.array(img1, dtype=np.uint8).reshape(28, 28, 1) #转成 uinit8 型，范围是0-255
plt.imshow(img_np, cmap='gray', interpolation='none')
plt.show()
"""
BI = BilinearInterpolation(8, 8)

class Net(nn.Module):
	def __init__(self):
		super(Net, self).__init__()
		self.conv1 = nn.Conv2d(1, 10, kernel_size=5) #定义第一个卷积层，输入通道数为1（灰度）（RGB为3），输出通道数为10，卷积核大小为5×5
		self.conv2 = nn.Conv2d(10, 20, kernel_size=3) #定义第二个卷积层，输入通道数为10，输出通道数为20，卷积核大小为3x3

		self.fc1 = nn.Linear(20*10*10, 500) #定义第一个全连接层，输入特征数为20×10×10（根据卷积和池化后的图像大小计算得到），输出特征数为500
		self.fc2 = nn.Linear(500, 10)
 
	def forward(self, x):
		print(x[0])
		#writer.add_images("feature map ori", x)
		x = self.conv1(x) #调用第一个卷积层，输入：batch_size_train*1*28*28(通道为1，像素为28*28)，输出：batch_size_train*10*24*24(通道为10，24=(28-5)/1+1=(图像尺寸-卷积核尺寸 + 2*填充值)/步长+1)
		print("parameters:", list(self.conv1.parameters()))
		a
		"""
		feature_map = x.detach().cpu().numpy()
		feature_map_sum = feature_map[0, :, :]
		feature_map_sum = np.expand_dims(feature_map_sum, axis=2)
		feature_map_split = feature_map[1,:, :]
		feature_map_split = np.expand_dims(feature_map_split,axis=2)
		feature_map_sum +=feature_map_split
		print(feature_map_sum.shape, feature_map_split.shape)
		feature_map_split = BI.transform(feature_map_split)
		print(feature_map_sum.shape, feature_map_split.shape)
		return
		"""
		
		"""
		for i, b in enumerate(x):
			m1 = torch.tensor([])
			for j, c in enumerate(b):
				m2 = torch.tensor([[c.detach().numpy()]])
				m1 = torch.cat([m1, m2], 0)
			writer.add_images('feature map1 {}'.format(i), m1)
		print(m1.shape)
		writer.flush()
		"""

		
		x = F.max_pool2d(x, 2, 2) #池化。参数二为kernel_size ，参数三为stride，参数四为padding，输出为： batch_size_train*10*12*12(图像尺寸-卷积核尺寸 + 2*填充值)/步长+1)=(24-2)/2+1=12
		x = F.relu(x) #激活，输出保持不变
		
		x = self.conv2(x) #进行第二层卷积 ，输出为：batch_size_train*20*10*10
		
		"""
		for i, b in enumerate(x):
			m1 = torch.tensor([])
			for j, c in enumerate(b):
				m2 = torch.tensor([[c.detach().numpy()]])
				m1 = torch.cat([m1, m2], 0)
			writer.add_images('feature map2 {}'.format(i), m1)
		print(m1.shape)
		writer.flush()
		"""
		
		
		x = F.relu(x)
		#print(x.shape)
		x = torch.flatten(x, 1) #将x展平为一维向量，-1表示自动计算维度大小
		#print(x.shape) #torch.Size([64, 2000])
		
		x = self.fc1(x) #输入：batch_size_train*20*5*5，输出：500
		x = F.relu(x)
		
		x = self.fc2(x) #输入：batch_size_train*500，输出：10
		
		return x
		
network = Net().to("cpu")
optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)
"""
for param in network	.parameters():
	print(param, param.size())
"""

def print_state_dict():
	print("Model's state_dict:")
	for param_tensor in network.state_dict():
		print(param_tensor, "\t", network.state_dict()[param_tensor].size())

	print("Optimizer's state_dict:")
	for var_name in optimizer.state_dict():
		print(var_name, "\t", optimizer.state_dict()[var_name])

def save_state_dict():
	torch.save(network.state_dict(), save_path+"model.pth")
	torch.save(optimizer.state_dict(), save_path+"optimizer.pth")

def load_state_dict():
	if not os.path.exists(save_path+"model.pth"):
		return
	network.load_state_dict(torch.load(save_path+"model.pth"))
	optimizer.load_state_dict(torch.load(save_path+"optimizer.pth"))
	
load_state_dict()
network.eval()

"""
conv_layers = []
for i in network.children():
	if type(i) == nn.Conv2d:
		conv_layers.append(i)
print(conv_layers)
"""

def train(epoch):
	network.train()
	for batch_idx, (data, target) in enumerate(train_loader): #data为数据，target为确切目标
		optimizer.zero_grad() #梯度初始化为0
		output = network(data) #训练后的结果
		output = F.log_softmax(output, dim=1) # 返回输出的类别概率分布
		loss = F.nll_loss(output, target) #计算损失
		loss.backward() #反向传播
		optimizer.step() #参数优化
		if batch_idx % log_interval == 0: #每处理log_interval条数据打印信息
			print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data),
			                                                                           len(train_loader.dataset),
			                                                                           100. * batch_idx / len(train_loader),
			                                                                           loss.item()))
def test(epoch):
	network.eval()
	test_loss = 0
	correct = 0
	with torch.no_grad():
			for data, target in test_loader:
					output = network(data)
					test_loss += F.nll_loss(output, target, reduction='sum').item()
					pred = output.data.max(1, keepdim=True)[1]
					correct += pred.eq(target.data.view_as(pred)).sum()
					
					for idx, label in enumerate(target):
						if pred[idx] != label:
							img_name = '轮次:{} 预测分类:{} 目标分类:{}'.format(epoch, pred[idx].item(), label)
							writer.add_images(img_name, torch.tensor([data[idx].numpy()]), epoch)
							
	test_loss /= len(test_loader.dataset)
	print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
			test_loss, correct, len(test_loader.dataset),
			100. * correct / len(test_loader.dataset)))
	fig = plt.figure()
	for i in range(6):
	  plt.subplot(2,3,i+1)
	  plt.tight_layout()
	  plt.imshow(data[i][0], cmap='gray', interpolation='none')
	  plt.title("Prediction: {}".format(
	    output.data.max(1, keepdim=True)[1][i].item()))
	  plt.xticks([])
	  plt.yticks([])
	plt.show()
	
for epoch in range(1, 2):
	train(epoch)
	test(epoch)
	save_state_dict()
	#print_state_dict()

