#! python
#-*- coding: utf-8 -*- 

import numpy as np
import torch
from torch import nn
from torch.nn import functional as F

# prepare for plot
import matplotlib.pyplot as plt
# 设置字体
# 在Windows上使用黑体
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号'-'显示为方块的问题[citation:1][citation:5][citation:7]
plt.rcParams['axes.unicode_minus'] = False

def get_loader_minist(data_path=None,
					  batch_size_train = 64,
					  batch_size_test = 1000
					  ):
    if data_path is None:
        import sys
        from pathlib import Path
        curent_script_path = Path(sys.argv[0])
        data_path = curent_script_path.parent / 'data'
        print(f"data_path = {data_path}")
    import torchvision
    import torch
    transform=torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(
            (0.1307,), (0.3081,))
        ])
    train_dataset = torchvision.datasets.MNIST(data_path, train=True, download=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size_train, shuffle=True)
    test_dataset = torchvision.datasets.MNIST(data_path, train=False, download=False, transform=transform)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size_test, shuffle=True)
    return train_loader, test_loader


def visualize_samples(loader, num_samples=6):
    dataiter = iter(loader)
    images, labels = next(dataiter)
    _, axes = plt.subplots(1, num_samples, figsize=(12, 2))
    for i in range(num_samples):
        axes[i].imshow(images[i][0], cmap='gray')
        axes[i].set_title(f'标签: {labels[i].item()}')
        axes[i].axis('off')
    plt.tight_layout()
    plt.show()


class MNISTClassifier(nn.Module):
    def __init__(self):
        super(MNISTClassifier, self).__init__()
        # 第一个卷积层：输入通道1(灰度图)，输出通道32，卷积核3x3
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
        # 第二个卷积层：输入通道32，输出通道64，卷积核3x3
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        # 最大池化层：2x2窗口
        self.pool = nn.MaxPool2d(2, 2)
        # Dropout层，防止过拟合
        self.dropout1 = nn.Dropout(0.25)
        self.dropout2 = nn.Dropout(0.5)
        # 全连接层
        self.fc1 = nn.Linear(64 * 7 * 7, 128)  # 经过两次池化后尺寸为7x7
        self.fc2 = nn.Linear(128, 10)  # 输出10个类别（数字0-9）
    
    def forward(self, x):
        # 第一个卷积块：卷积 -> ReLU -> 池化 -> Dropout
        x = self.pool(F.relu(self.conv1(x)))
        x = self.dropout1(x)
        
        # 第二个卷积块：卷积 -> ReLU -> 池化 -> Dropout
        x = self.pool(F.relu(self.conv2(x)))
        x = self.dropout1(x)
        
        # 展平特征图
        x = x.view(-1, 64 * 7 * 7)
        
        # 全连接层
        x = F.relu(self.fc1(x))
        x = self.dropout2(x)
        x = self.fc2(x)
        
        return F.log_softmax(x, dim=1)  # 输出对数概率


def train(random_seed = 1,
		  ):
	# n_epochs = 3, learning_rate = 0.01, momentum = 0.5, log_interval = 10
	import torch
	torch.backends.cudnn.enabled = False
	torch.manual_seed(random_seed)



def main():
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
	train_loader, test_loader = get_loader_minist()
	visualize_samples(train_loader, num_samples=6)
	model = MNISTClassifier()
	with torch.no_grad():  # 测试时不需要计算梯度
		for data, target in test_loader:
			data, target = data.to(device), target.to(device)
			print(f"data.shape = {data.shape}")
			print(f"target.shape = {target.shape}")
			first = data[0:2]
			print(f"first.shape = {first.shape}")
			output = model(first)
			print(f"output = {output}")
			pred = output.argmax(dim=1, keepdim=True)
			print(f"pred = {pred}")
			prob = F.softmax(output, dim=1)
			print(f"prob = {prob}")
			total = prob.sum(dim=1)
			print(f"total = {total}")
			prob1 = np.exp(output)
			print(f"prob1 = {prob1}")
			break


if __name__ == '__main__':
	main()

